Merge "Update clearkey plugin to drm HAL v1.4" into sc-dev
diff --git a/apex/mediatranscoding.rc b/apex/mediatranscoding.rc
index fa4acf8..ae9f8ba 100644
--- a/apex/mediatranscoding.rc
+++ b/apex/mediatranscoding.rc
@@ -6,5 +6,7 @@
user media
group media
ioprio rt 4
- task_profiles ProcessCapacityHigh HighPerformance
+ # Restrict to little cores only with system-background cpuset.
+ writepid /dev/cpuset/system-background/tasks
+ interface aidl media.transcoding
disabled
diff --git a/camera/Android.bp b/camera/Android.bp
index 2c01496..6878c20 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -119,6 +119,8 @@
"aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
"aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
"aidl/android/hardware/camera2/ICameraOfflineSession.aidl",
+ "aidl/android/hardware/camera2/ICameraInjectionCallback.aidl",
+ "aidl/android/hardware/camera2/ICameraInjectionSession.aidl",
],
path: "aidl",
}
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 8af704d..873d738 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -20,6 +20,8 @@
import android.hardware.ICameraClient;
import android.hardware.camera2.ICameraDeviceUser;
import android.hardware.camera2.ICameraDeviceCallbacks;
+import android.hardware.camera2.ICameraInjectionCallback;
+import android.hardware.camera2.ICameraInjectionSession;
import android.hardware.camera2.params.VendorTagDescriptor;
import android.hardware.camera2.params.VendorTagDescriptorCache;
import android.hardware.camera2.utils.ConcurrentCameraIdCombination;
@@ -161,6 +163,9 @@
boolean supportsCameraApi(String cameraId, int apiVersion);
// Determines if a cameraId is a hidden physical camera of a logical multi-camera.
boolean isHiddenPhysicalCamera(String cameraId);
+ // Inject the external camera to replace the internal camera session.
+ ICameraInjectionSession injectCamera(String packageName, String internalCamId,
+ String externalCamId, in ICameraInjectionCallback CameraInjectionCallback);
void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
@@ -174,6 +179,13 @@
oneway void notifySystemEvent(int eventId, in int[] args);
/**
+ * Notify the camera service of a display configuration change.
+ *
+ * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+ */
+ oneway void notifyDisplayConfigurationChange();
+
+ /**
* Notify the camera service of a device physical status change. May only be called from
* a privileged process.
*
diff --git a/camera/aidl/android/hardware/camera2/ICameraInjectionCallback.aidl b/camera/aidl/android/hardware/camera2/ICameraInjectionCallback.aidl
new file mode 100644
index 0000000..9791352
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraInjectionCallback.aidl
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+import android.hardware.camera2.ICameraInjectionSession;
+
+/**
+ * Binder interface used to call back the error state injected by the external camera,
+ * and camera service can be switched back to internal camera when binder signals process death.
+ *
+ * @hide
+ */
+interface ICameraInjectionCallback
+{
+ // Error codes for onInjectionError
+ // To indicate all invalid error codes
+ const int ERROR_INJECTION_INVALID_ERROR = -1;
+ // To indicate the camera injection session has encountered a fatal error, such as injection
+ // init failure, configure failure or injecting failure etc.
+ const int ERROR_INJECTION_SESSION = 0;
+ // To indicate the camera service has encountered a fatal error.
+ const int ERROR_INJECTION_SERVICE = 1;
+ // To indicate the injection camera does not support certain camera functions, such as
+ // unsupport stream format, no capture/record function or no multi-camera function etc.
+ // When this error occurs, the default processing is still in the inject state, and the app is
+ // notified to display an error message and a black screen.
+ const int ERROR_INJECTION_UNSUPPORTED = 2;
+
+ oneway void onInjectionError(int errorCode);
+}
diff --git a/camera/aidl/android/hardware/camera2/ICameraInjectionSession.aidl b/camera/aidl/android/hardware/camera2/ICameraInjectionSession.aidl
new file mode 100644
index 0000000..c31c30b
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraInjectionSession.aidl
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+/** @hide */
+interface ICameraInjectionSession
+{
+ oneway void stopInjection();
+}
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 7387442..dab2fef 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -24,6 +24,28 @@
using namespace android;
+// Formats not listed in the public API, but still available to AImageReader
+// Enum value must match corresponding enum in ui/PublicFormat.h (which is not
+// available to VNDK)
+enum AIMAGE_PRIVATE_FORMATS {
+ /**
+ * Unprocessed implementation-dependent raw
+ * depth measurements, opaque with 16 bit
+ * samples.
+ *
+ */
+
+ AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
+
+ /**
+ * Device specific 10 bits depth RAW image format.
+ *
+ * <p>Unprocessed implementation-dependent raw depth measurements, opaque with 10 bit samples
+ * and device specific bit layout.</p>
+ */
+ AIMAGE_FORMAT_RAW_DEPTH10 = 0x1003,
+};
+
/**
* ACameraMetadata Implementation
*/
@@ -290,6 +312,10 @@
format = AIMAGE_FORMAT_DEPTH_POINT_CLOUD;
} else if (format == HAL_PIXEL_FORMAT_Y16) {
format = AIMAGE_FORMAT_DEPTH16;
+ } else if (format == HAL_PIXEL_FORMAT_RAW16) {
+ format = static_cast<int32_t>(AIMAGE_FORMAT_RAW_DEPTH);
+ } else if (format == HAL_PIXEL_FORMAT_RAW10) {
+ format = static_cast<int32_t>(AIMAGE_FORMAT_RAW_DEPTH10);
}
filteredDepthStreamConfigs.push_back(format);
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 6c1cf33..2b7f040 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -61,6 +61,10 @@
*/
typedef void (*ACameraCaptureSession_stateCallback)(void* context, ACameraCaptureSession *session);
+/**
+ * Capture session state callbacks used in {@link ACameraDevice_createCaptureSession} and
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters}
+ */
typedef struct ACameraCaptureSession_stateCallbacks {
/// optional application context.
void* context;
@@ -246,6 +250,10 @@
void* context, ACameraCaptureSession* session,
ACaptureRequest* request, ACameraWindowType* window, int64_t frameNumber);
+/**
+ * ACaptureCaptureSession_captureCallbacks structure used in
+ * {@link ACameraCaptureSession_capture} and {@link ACameraCaptureSession_setRepeatingRequest}.
+ */
typedef struct ACameraCaptureSession_captureCallbacks {
/// optional application context.
void* context;
@@ -413,7 +421,10 @@
*/
void ACameraCaptureSession_close(ACameraCaptureSession* session);
-struct ACameraDevice;
+/**
+ * ACameraDevice is opaque type that provides access to a camera device.
+ * A pointer can be obtained using {@link ACameraManager_openCamera} method.
+ */
typedef struct ACameraDevice ACameraDevice;
/**
@@ -591,6 +602,10 @@
camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
__INTRODUCED_IN(24);
+/**
+ * Opaque object for capture session output, use {@link ACaptureSessionOutput_create} or
+ * {@link ACaptureSessionSharedOutput_create} to create an instance.
+ */
typedef struct ACaptureSessionOutput ACaptureSessionOutput;
/**
@@ -604,9 +619,9 @@
*
* <p>Native windows that get removed must not be part of any active repeating or single/burst
* request or have any pending results. Consider updating repeating requests via
- * {@link ACaptureSessionOutput_setRepeatingRequest} and then wait for the last frame number
+ * {@link ACameraCaptureSession_setRepeatingRequest} and then wait for the last frame number
* when the sequence completes
- * {@link ACameraCaptureSession_captureCallback#onCaptureSequenceCompleted}.</p>
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.</p>
*
* <p>Native windows that get added must not be part of any other registered ACaptureSessionOutput
* and must be compatible. Compatible windows must have matching format, rotation and
@@ -713,7 +728,15 @@
* Same as ACameraCaptureSession_captureCallbacks
*/
void* context;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted}.
+ */
ACameraCaptureSession_captureCallback_start onCaptureStarted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureProgressed}.
+ */
ACameraCaptureSession_captureCallback_result onCaptureProgressed;
/**
@@ -751,10 +774,18 @@
ACameraCaptureSession_logicalCamera_captureCallback_failed onLogicalCameraCaptureFailed;
/**
- * Same as ACameraCaptureSession_captureCallbacks
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.
*/
ACameraCaptureSession_captureCallback_sequenceEnd onCaptureSequenceCompleted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceAborted}.
+ */
ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureBufferLost}.
+ */
ACameraCaptureSession_captureCallback_bufferLost onCaptureBufferLost;
} ACameraCaptureSession_logicalCamera_captureCallbacks;
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index f72fe8d..7be4bd3 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -124,6 +124,10 @@
*/
typedef void (*ACameraDevice_ErrorStateCallback)(void* context, ACameraDevice* device, int error);
+/**
+ * Applications' callbacks for camera device state changes, register with
+ * {@link ACameraManager_openCamera}.
+ */
typedef struct ACameraDevice_StateCallbacks {
/// optional application context.
void* context;
@@ -198,6 +202,10 @@
*/
const char* ACameraDevice_getId(const ACameraDevice* device) __INTRODUCED_IN(24);
+/**
+ * Capture request pre-defined template types, used in {@link ACameraDevice_createCaptureRequest}
+ * and {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ */
typedef enum {
/**
* Create a request suitable for a camera preview window. Specifically, this
@@ -301,10 +309,12 @@
const ACameraDevice* device, ACameraDevice_request_template templateId,
/*out*/ACaptureRequest** request) __INTRODUCED_IN(24);
-
+/**
+ * Opaque object for CaptureSessionOutput container, use
+ * {@link ACaptureSessionOutputContainer_create} to create an instance.
+ */
typedef struct ACaptureSessionOutputContainer ACaptureSessionOutputContainer;
-typedef struct ACaptureSessionOutput ACaptureSessionOutput;
/**
* Create a capture session output container.
@@ -844,7 +854,7 @@
/*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
/**
- * Check whether a particular {@ACaptureSessionOutputContainer} is supported by
+ * Check whether a particular {@link ACaptureSessionOutputContainer} is supported by
* the camera device.
*
* <p>This method performs a runtime check of a given {@link
@@ -875,6 +885,7 @@
* device.</li>
* <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
* supported by the camera device.</li>
+ * </ul>
*/
camera_status_t ACameraDevice_isSessionConfigurationSupported(
const ACameraDevice* device,
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 9d77eb4..26db7f2 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,7 +40,13 @@
__BEGIN_DECLS
+/**
+ * Camera status enum types.
+ */
typedef enum {
+ /**
+ * Camera operation has succeeded.
+ */
ACAMERA_OK = 0,
ACAMERA_ERROR_BASE = -10000,
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index be32b11..729182e 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -326,7 +326,7 @@
* @see ACameraManager_registerExtendedAvailabilityCallback
*/
typedef struct ACameraManager_ExtendedAvailabilityListener {
- ///
+ /// Called when a camera becomes available or unavailable
ACameraManager_AvailabilityCallbacks availabilityCallbacks;
/// Called when there is camera access permission change
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index 0d5e6c4..b331d50 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -256,10 +256,12 @@
/**
* Return a {@link ACameraMetadata} that references the same data as
- * {@link cameraMetadata}, which is an instance of
- * {@link android.hardware.camera2.CameraMetadata} (e.g., a
- * {@link android.hardware.camera2.CameraCharacteristics} or
- * {@link android.hardware.camera2.CaptureResult}).
+ * <a href="/reference/android/hardware/camera2/CameraMetadata">
+ * android.hardware.camera2.CameraMetadata</a> from Java API. (e.g., a
+ * <a href="/reference/android/hardware/camera2/CameraCharacteristics">
+ * android.hardware.camera2.CameraCharacteristics</a>
+ * or <a href="/reference/android/hardware/camera2/CaptureResult">
+ * android.hardware.camera2.CaptureResult</a>).
*
* <p>The returned ACameraMetadata must be freed by the application by {@link ACameraMetadata_free}
* after application is done using it.</p>
@@ -269,11 +271,13 @@
* the Java metadata is garbage collected.
*
* @param env the JNI environment.
- * @param cameraMetadata the source {@link android.hardware.camera2.CameraMetadata} from which the
+ * @param cameraMetadata the source <a href="/reference/android/hardware/camera2/CameraMetadata">
+ android.hardware.camera2.CameraMetadata </a>from which the
* returned {@link ACameraMetadata} is a view.
*
- * @return a valid ACameraMetadata pointer or NULL if {@link cameraMetadata} is null or not a valid
- * instance of {@link android.hardware.camera2.CameraMetadata}.
+ * @return a valid ACameraMetadata pointer or NULL if cameraMetadata is null or not a valid
+ * instance of <a href="android/hardware/camera2/CameraMetadata">
+ * android.hardware.camera2.CameraMetadata</a>.
*
*/
ACameraMetadata* ACameraMetadata_fromCameraMetadata(JNIEnv* env, jobject cameraMetadata)
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 70ce864..d1b4ede 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1868,7 +1868,7 @@
* <li>If the camera device has BURST_CAPTURE capability, the frame rate requirement of
* BURST_CAPTURE must still be met.</li>
* <li>All streams not larger than the maximum streaming dimension for BOKEH_STILL_CAPTURE mode
- * (queried via {@link ACAMERA_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_CAPABILITIES })
+ * (queried via {@link ACAMERA_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES })
* will have preview bokeh effect applied.</li>
* </ul>
* <p>When set to BOKEH_CONTINUOUS mode, configured streams dimension should not exceed this mode's
@@ -3502,7 +3502,7 @@
* preCorrectionActiveArraySize covers the camera device's field of view "after" zoom. See
* ACAMERA_CONTROL_ZOOM_RATIO for details.</p>
* <p>For camera devices with the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability, ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION /
* ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION must be used as the
* coordinate system for requests where ACAMERA_SENSOR_PIXEL_MODE is set to
@@ -3578,7 +3578,7 @@
* android.scaler.availableInputOutputFormatsMap.</p>
* <p>The following table describes the minimum required output stream
* configurations based on the hardware level
- * (ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL):</p>
+ * (ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL), prior to Android 12:</p>
* <p>Format | Size | Hardware Level | Notes
* :-------------:|:--------------------------------------------:|:--------------:|:--------------:
* JPEG | ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE | Any |
@@ -3589,6 +3589,21 @@
* YUV_420_888 | all output sizes available for JPEG | FULL |
* YUV_420_888 | all output sizes available for JPEG, up to the maximum video size | LIMITED |
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
+ * <p>Starting from Android 12, the camera device may not support JPEG sizes smaller than the
+ * minimum of 1080p and the camera sensor active array size. The requirements for
+ * IMPLEMENTATION_DEFINED and YUV_420_888 stay the same. This new minimum required output
+ * stream configurations are illustrated by the table below:</p>
+ * <p>Format | Size | Hardware Level | Notes
+ * :-------------:|:--------------------------------------------:|:--------------:|:--------------:
+ * JPEG | ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE | Any |
+ * JPEG | 1920x1080 (1080p) | Any | if 1080p <= activeArraySize
+ * YUV_420_888 | ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE | FULL |
+ * YUV_420_888 | 1920x1080 (1080p) | FULL | if 1080p <= activeArraySize
+ * YUV_420_888 | 1280x720 (720) | FULL | if 720p <= activeArraySize
+ * YUV_420_888 | 640x480 (480p) | FULL | if 480p <= activeArraySize
+ * YUV_420_888 | 320x240 (240p) | FULL | if 240p <= activeArraySize
+ * YUV_420_888 | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED |
+ * IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
* mandatory stream configurations on a per-capability basis.</p>
* <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
@@ -3964,7 +3979,7 @@
* configurations which belong to this physical camera, and it will advertise and will only
* advertise the maximum supported resolutions for a particular format.</p>
* <p>If this camera device isn't a physical camera device constituting a logical camera,
- * but a standalone <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * but a standalone <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* camera, this field represents the multi-resolution input/output stream configurations of
* default mode and max resolution modes. The sizes will be the maximum resolution of a
* particular format for default mode and max resolution mode.</p>
@@ -4867,12 +4882,12 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_DEFAULT">CameraMetadata#SENSOR_PIXEL_MODE_DEFAULT</a> mode.
* When operating in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_DEFAULT">CameraMetadata#SENSOR_PIXEL_MODE_DEFAULT</a> mode, sensors
- * with <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * with <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability would typically perform pixel binning in order to improve low light
* performance, noise reduction etc. However, in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
* mode (supported only
- * by <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * by <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* sensors), sensors typically operate in unbinned mode allowing for a larger image size.
* The stream configurations supported in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
@@ -4905,7 +4920,7 @@
* </ul></p>
*
* <p>This key will only be present in devices advertisting the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability which also advertise <code>REMOSAIC_REPROCESSING</code> capability. On all other devices
* RAW targets will have a regular bayer pattern.</p>
*/
@@ -5231,7 +5246,7 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
* counterparts.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
* <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
@@ -5263,7 +5278,7 @@
* is, when ACAMERA_SENSOR_PIXEL_MODE is set to
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
*
* @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
@@ -5291,7 +5306,7 @@
* when ACAMERA_SENSOR_PIXEL_MODE is set to
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
* <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
@@ -5321,7 +5336,7 @@
* <p>This key will not be present if REMOSAIC_REPROCESSING is not supported, since RAW images
* will have a regular bayer pattern.</p>
* <p>This key will not be present for sensors which don't have the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
*/
ACAMERA_SENSOR_INFO_BINNING_FACTOR = // int32[2]
@@ -9264,13 +9279,13 @@
/**
* <p>This is the default sensor pixel mode. This is the only sensor pixel mode
* supported unless a camera device advertises
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.</p>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.</p>
*/
ACAMERA_SENSOR_PIXEL_MODE_DEFAULT = 0,
/**
* <p>This sensor pixel mode is offered by devices with capability
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.
* In this mode, sensors typically do not bin pixels, as a result can offer larger
* image sizes.</p>
*/
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index df977da..0838fba 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -50,4 +50,6 @@
typedef ANativeWindow ACameraWindowType;
#endif
+/** @} */
+
#endif //_NDK_CAMERA_WINDOW_TYPE_H
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index a4dc374..d83c5b3 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,10 +44,10 @@
__BEGIN_DECLS
-// Container for output targets
+/** Container for output targets */
typedef struct ACameraOutputTargets ACameraOutputTargets;
-// Container for a single output target
+/** Container for a single output target */
typedef struct ACameraOutputTarget ACameraOutputTarget;
/**
@@ -383,10 +383,10 @@
* Set/change a camera capture control entry with unsigned 8 bits data type for
* a physical camera backing a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_u8, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_u8, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -413,10 +413,10 @@
* Set/change a camera capture control entry with signed 32 bits data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_i32, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_i32, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -443,10 +443,10 @@
* Set/change a camera capture control entry with float data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_float, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_float, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -473,10 +473,10 @@
* Set/change a camera capture control entry with signed 64 bits data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_i64, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_i64, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -503,10 +503,10 @@
* Set/change a camera capture control entry with double data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_double, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_double, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -533,10 +533,10 @@
* Set/change a camera capture control entry with rational data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_rational, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_rational, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 988cda9..ec0b878 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -211,7 +211,7 @@
}
auto allLogs(gLogBuf.getLogs());
- LOG2BI("framework logs size %zu; plugin logs size %zu",
+ LOG2BD("framework logs size %zu; plugin logs size %zu",
allLogs.size(), pluginLogs.size());
std::copy(pluginLogs.begin(), pluginLogs.end(), std::back_inserter(allLogs));
std::sort(allLogs.begin(), allLogs.end(),
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
index a537e63..7c6d86c 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
@@ -22,7 +22,6 @@
#include <openssl/aes.h>
#include <utils/KeyedVector.h>
#include <utils/Mutex.h>
-#include <utils/RefBase.h>
namespace android {
struct ABuffer;
@@ -30,7 +29,7 @@
namespace clearkeycas {
class KeyFetcher;
-class ClearKeyCasSession : public RefBase {
+class ClearKeyCasSession {
public:
explicit ClearKeyCasSession(CasPlugin *plugin);
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
}
infoMap.clear();
+ android::Mutex::Autolock lock(mPlayPolicyLock);
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
}
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
void initProperties();
void setPlayPolicy();
- android::Mutex mPlayPolicyLock;
+ mutable android::Mutex mPlayPolicyLock;
android::KeyedVector<String8, String8> mPlayPolicy;
android::KeyedVector<String8, String8> mStringProperties;
android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index 619ece0..6c68532 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -50,7 +50,7 @@
relative_install_path: "hw",
- cflags: ["-Wall", "-Werror"],
+ cflags: ["-Wall", "-Werror", "-Wthread-safety"],
shared_libs: [
"android.hardware.drm@1.0",
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 18ceed0..b92f236 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -37,6 +37,8 @@
sp<IMemory> hidlMemory = mapMemory(base);
ALOGE_IF(hidlMemory == nullptr, "mapMemory returns nullptr");
+ std::lock_guard<std::mutex> shared_buffer_lock(mSharedBufferLock);
+
// allow mapMemory to return nullptr
mSharedBufferMap[bufferId] = hidlMemory;
return Void();
@@ -94,6 +96,7 @@
return Void();
}
+ std::unique_lock<std::mutex> shared_buffer_lock(mSharedBufferLock);
if (mSharedBufferMap.find(source.bufferId) == mSharedBufferMap.end()) {
_hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
"source decrypt buffer base not set");
@@ -142,12 +145,17 @@
base = static_cast<uint8_t *>(static_cast<void *>(destBase->getPointer()));
- if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+ totalSize = 0;
+ if (__builtin_add_overflow(destBuffer.offset, destBuffer.size, &totalSize) ||
+ totalSize > destBase->getSize()) {
+ android_errorWriteLog(0x534e4554, "176444622");
_hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
return Void();
}
- destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+ destPtr = static_cast<void*>(base + destination.nonsecureMemory.offset);
+ // release mSharedBufferLock
+ shared_buffer_lock.unlock();
// Calculate the output buffer size and determine if any subsamples are
// encrypted.
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index 98cc1c3..4318af4 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -221,6 +221,7 @@
if (requestString.find(kOfflineLicense) != std::string::npos) {
std::string emptyResponse;
std::string keySetIdString(keySetId.begin(), keySetId.end());
+ Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.StoreLicense(keySetIdString,
DeviceFiles::kLicenseStateReleasing,
emptyResponse)) {
@@ -336,6 +337,7 @@
}
*keySetId = kKeySetIdPrefix + ByteArrayToHexString(
reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
+ Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.LicenseExists(*keySetId)) {
// collision, regenerate
ALOGV("Retry generating KeySetId");
@@ -393,6 +395,7 @@
if (status == Status::OK) {
if (isOfflineLicense) {
if (isRelease) {
+ Mutex::Autolock lock(mFileHandleLock);
mFileHandle.DeleteLicense(keySetId);
mSessionLibrary->destroySession(session);
} else {
@@ -401,6 +404,7 @@
return Void();
}
+ Mutex::Autolock lock(mFileHandleLock);
bool ok = mFileHandle.StoreLicense(
keySetId,
DeviceFiles::kLicenseStateActive,
@@ -455,6 +459,7 @@
DeviceFiles::LicenseState licenseState;
std::string offlineLicense;
Status status = Status::OK;
+ Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
&licenseState, &offlineLicense)) {
ALOGE("Failed to restore offline license");
@@ -577,7 +582,6 @@
Return<void> DrmPlugin::queryKeyStatus(
const hidl_vec<uint8_t>& sessionId,
queryKeyStatus_cb _hidl_cb) {
-
if (sessionId.size() == 0) {
// Returns empty key status KeyValue pair
_hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -587,12 +591,14 @@
std::vector<KeyValue> infoMapVec;
infoMapVec.clear();
+ mPlayPolicyLock.lock();
KeyValue keyValuePair;
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
keyValuePair.key = mPlayPolicy[i].key;
keyValuePair.value = mPlayPolicy[i].value;
infoMapVec.push_back(keyValuePair);
}
+ mPlayPolicyLock.unlock();
_hidl_cb(Status::OK, toHidlVec(infoMapVec));
return Void();
}
@@ -780,6 +786,8 @@
}
Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
+ Mutex::Autolock lock(mFileHandleLock);
+
std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
std::vector<KeySetId> keySetIds;
if (mMockError != Status_V1_2::OK) {
@@ -800,6 +808,7 @@
return toStatus_1_0(mMockError);
}
std::string licenseName(keySetId.begin(), keySetId.end());
+ Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.DeleteLicense(licenseName)) {
return Status::OK;
}
@@ -808,6 +817,8 @@
Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
getOfflineLicenseState_cb _hidl_cb) {
+ Mutex::Autolock lock(mFileHandleLock);
+
std::string licenseName(keySetId.begin(), keySetId.end());
DeviceFiles::LicenseState state;
std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index 56910be..e61db3f 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,11 +24,13 @@
}
bool MemoryFileSystem::FileExists(const std::string& fileName) const {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
return result != mMemoryFileSystem.end();
}
ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
return static_cast<ssize_t>(result->second.getFileSize());
@@ -40,6 +42,7 @@
std::vector<std::string> MemoryFileSystem::ListFiles() const {
std::vector<std::string> list;
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
for (const auto& filename : mMemoryFileSystem) {
list.push_back(filename.first);
}
@@ -48,6 +51,7 @@
size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
std::string key = GetFileName(path);
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
std::string serializedHashFile = result->second.getContent();
@@ -61,6 +65,7 @@
size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
std::string key = GetFileName(path);
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(key);
@@ -70,6 +75,7 @@
}
bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(result);
@@ -81,6 +87,7 @@
}
bool MemoryFileSystem::RemoveAllFiles() {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
mMemoryFileSystem.clear();
return mMemoryFileSystem.empty();
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
index 53fe1dc..a7b2427 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
@@ -20,6 +20,8 @@
#include <android/hardware/drm/1.2/ICryptoPlugin.h>
#include <android/hidl/memory/1.0/IMemory.h>
+#include <mutex>
+
#include "ClearKeyTypes.h"
#include "Session.h"
#include "Utils.h"
@@ -93,7 +95,7 @@
const SharedBuffer& source,
uint64_t offset,
const DestinationBuffer& destination,
- decrypt_1_2_cb _hidl_cb);
+ decrypt_1_2_cb _hidl_cb) NO_THREAD_SAFETY_ANALYSIS; // use unique_lock
Return<void> setSharedBufferBase(const hidl_memory& base,
uint32_t bufferId);
@@ -105,7 +107,8 @@
private:
CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoPlugin);
- std::map<uint32_t, sp<IMemory> > mSharedBufferMap;
+ std::mutex mSharedBufferLock;
+ std::map<uint32_t, sp<IMemory>> mSharedBufferMap GUARDED_BY(mSharedBufferLock);
sp<Session> mSession;
Status mInitStatus;
};
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index cb5c9fe..5d6e3da 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -432,7 +432,8 @@
mMockError = Status_V1_2::OK;
}
- DeviceFiles mFileHandle;
+ DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
+ Mutex mFileHandleLock;
Mutex mSecureStopLock;
CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index 1d98860..a90d818 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,7 +5,9 @@
#ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
#define CLEARKEY_MEMORY_FILE_SYSTEM_H_
+#include <android-base/thread_annotations.h>
#include <map>
+#include <mutex>
#include <string>
#include "ClearKeyTypes.h"
@@ -49,10 +51,12 @@
size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
private:
+ mutable std::mutex mMemoryFileSystemLock;
+
// License file name is made up of a unique keySetId, therefore,
// the filename can be used as the key to locate licenses in the
// memory file system.
- std::map<std::string, MemoryFile> mMemoryFileSystem;
+ std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
std::string GetFileName(const std::string& path);
diff --git a/media/bufferpool/2.0/BufferPoolClient.cpp b/media/bufferpool/2.0/BufferPoolClient.cpp
index 9308b81..cda23ff 100644
--- a/media/bufferpool/2.0/BufferPoolClient.cpp
+++ b/media/bufferpool/2.0/BufferPoolClient.cpp
@@ -29,7 +29,7 @@
namespace V2_0 {
namespace implementation {
-static constexpr int64_t kReceiveTimeoutUs = 1000000; // 100ms
+static constexpr int64_t kReceiveTimeoutUs = 2000000; // 2s
static constexpr int kPostMaxRetry = 3;
static constexpr int kCacheTtlUs = 1000000; // TODO: tune
static constexpr size_t kMaxCachedBufferCount = 64;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index ea76cbb..d865ab2 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -272,8 +272,9 @@
return UNKNOWN_ERROR;
}
- if (sbrMode != -1 && aacProfile == C2Config::PROFILE_AAC_ELD) {
- if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, sbrMode)) {
+ if (sbrMode != C2Config::AAC_SBR_AUTO && aacProfile == C2Config::PROFILE_AAC_ELD) {
+ int aacSbrMode = sbrMode != C2Config::AAC_SBR_OFF;
+ if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, aacSbrMode)) {
ALOGE("Failed to set AAC encoder parameters");
return UNKNOWN_ERROR;
}
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 0207311..e8287f9 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -26,7 +26,6 @@
#include <SimpleC2Interface.h>
#include "C2SoftAvcDec.h"
-#include "ih264d.h"
namespace android {
@@ -391,12 +390,14 @@
}
while (true) {
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
- setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (0 == s_decode_op.u4_output_present) {
+ setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+ if (0 == ps_decode_op->u4_output_present) {
resetPlugin();
break;
}
@@ -411,8 +412,8 @@
}
status_t C2SoftAvcDec::createDecoder() {
- ivdext_create_ip_t s_create_ip;
- ivdext_create_op_t s_create_op;
+ ivdext_create_ip_t s_create_ip = {};
+ ivdext_create_op_t s_create_op = {};
s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
@@ -438,8 +439,8 @@
}
status_t C2SoftAvcDec::setNumCores() {
- ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
- ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+ ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+ ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -458,22 +459,26 @@
}
status_t C2SoftAvcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
- ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
- ivd_ctl_set_config_op_t s_set_dyn_params_op;
+ ih264d_ctl_set_config_ip_t s_h264d_set_dyn_params_ip = {};
+ ih264d_ctl_set_config_op_t s_h264d_set_dyn_params_op = {};
+ ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+ &s_h264d_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+ ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+ &s_h264d_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
- s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
- s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
- s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
- s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
- s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
- s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
- s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
- s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+ ps_set_dyn_params_ip->u4_size = sizeof(ih264d_ctl_set_config_ip_t);
+ ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+ ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+ ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+ ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+ ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+ ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+ ps_set_dyn_params_op->u4_size = sizeof(ih264d_ctl_set_config_op_t);
IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
- &s_set_dyn_params_ip,
- &s_set_dyn_params_op);
+ &s_h264d_set_dyn_params_ip,
+ &s_h264d_set_dyn_params_op);
if (status != IV_SUCCESS) {
- ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+ ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
return UNKNOWN_ERROR;
}
@@ -481,8 +486,8 @@
}
void C2SoftAvcDec::getVersion() {
- ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
- ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+ ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+ ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
UWORD8 au1_buf[512];
s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -538,7 +543,7 @@
if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
}
- ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+ ps_decode_ip->u4_size = sizeof(ih264d_video_decode_ip_t);
ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
if (inBuffer) {
ps_decode_ip->u4_ts = tsMarker;
@@ -567,14 +572,14 @@
ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
}
ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
- ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+ ps_decode_op->u4_size = sizeof(ih264d_video_decode_op_t);
return true;
}
bool C2SoftAvcDec::getVuiParams() {
- ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
- ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+ ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+ ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -622,8 +627,8 @@
}
status_t C2SoftAvcDec::setFlushMode() {
- ivd_ctl_flush_ip_t s_set_flush_ip;
- ivd_ctl_flush_op_t s_set_flush_op;
+ ivd_ctl_flush_ip_t s_set_flush_ip = {};
+ ivd_ctl_flush_op_t s_set_flush_op = {};
s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -641,8 +646,8 @@
}
status_t C2SoftAvcDec::resetDecoder() {
- ivd_ctl_reset_ip_t s_reset_ip;
- ivd_ctl_reset_op_t s_reset_op;
+ ivd_ctl_reset_ip_t s_reset_ip = {};
+ ivd_ctl_reset_op_t s_reset_op = {};
s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -671,8 +676,8 @@
status_t C2SoftAvcDec::deleteDecoder() {
if (mDecHandle) {
- ivdext_delete_ip_t s_delete_ip;
- ivdext_delete_op_t s_delete_op;
+ ivdext_delete_ip_t s_delete_ip = {};
+ ivdext_delete_op_t s_delete_op = {};
s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -837,8 +842,10 @@
return;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
{
C2GraphicView wView = mOutBlock->map().get();
if (wView.error()) {
@@ -846,7 +853,7 @@
work->result = wView.error();
return;
}
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
inOffset + inPos, inSize - inPos, workIndex)) {
mSignalledError = true;
work->workletsProcessed = 1u;
@@ -862,26 +869,27 @@
WORD32 delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
WORD32 decodeTime;
GETTIME(&mTimeEnd, nullptr);
TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
- s_decode_op.u4_num_bytes_consumed);
+ ps_decode_op->u4_num_bytes_consumed);
}
- if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("allocation failure in decoder");
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
+ (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGV("resolution changed");
drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
resetDecoder();
@@ -890,16 +898,16 @@
/* Decode header and get new dimensions */
setParams(mStride, IVD_DECODE_HEADER);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
- ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+ ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
}
- if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
- mOutputDelay = s_decode_op.i4_reorder_depth;
+ if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+ mOutputDelay = ps_decode_op->i4_reorder_depth;
ALOGV("New Output delay %d ", mOutputDelay);
C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -917,16 +925,16 @@
return;
}
}
- if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+ if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
- mStride = ALIGN32(s_decode_op.u4_pic_wd);
+ mStride = ALIGN32(ps_decode_op->u4_pic_wd);
setParams(mStride, IVD_DECODE_FRAME);
}
- if (s_decode_op.u4_pic_wd != mWidth || s_decode_op.u4_pic_ht != mHeight) {
- mWidth = s_decode_op.u4_pic_wd;
- mHeight = s_decode_op.u4_pic_ht;
- CHECK_EQ(0u, s_decode_op.u4_output_present);
+ if (ps_decode_op->u4_pic_wd != mWidth || ps_decode_op->u4_pic_ht != mHeight) {
+ mWidth = ps_decode_op->u4_pic_wd;
+ mHeight = ps_decode_op->u4_pic_ht;
+ CHECK_EQ(0u, ps_decode_op->u4_output_present);
C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -945,11 +953,11 @@
}
}
(void)getVuiParams();
- hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
}
- inPos += s_decode_op.u4_num_bytes_consumed;
+ inPos += ps_decode_op->u4_num_bytes_consumed;
}
if (eos) {
drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -987,16 +995,18 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
mSignalledError = true;
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
} else {
fillEmptyWork(work);
break;
diff --git a/media/codec2/components/avc/C2SoftAvcDec.h b/media/codec2/components/avc/C2SoftAvcDec.h
index bd84de0..5c07d29 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.h
+++ b/media/codec2/components/avc/C2SoftAvcDec.h
@@ -25,8 +25,7 @@
#include <SimpleC2Component.h>
#include "ih264_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ih264d.h"
namespace android {
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index bf9e5ff..bab651f 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -1082,29 +1082,31 @@
/* Getting MemRecords Attributes */
{
- iv_fill_mem_rec_ip_t s_fill_mem_rec_ip;
- iv_fill_mem_rec_op_t s_fill_mem_rec_op;
+ ih264e_fill_mem_rec_ip_t s_ih264e_mem_rec_ip = {};
+ ih264e_fill_mem_rec_op_t s_ih264e_mem_rec_op = {};
+ iv_fill_mem_rec_ip_t *ps_fill_mem_rec_ip = &s_ih264e_mem_rec_ip.s_ive_ip;
+ iv_fill_mem_rec_op_t *ps_fill_mem_rec_op = &s_ih264e_mem_rec_op.s_ive_op;
- s_fill_mem_rec_ip.u4_size = sizeof(iv_fill_mem_rec_ip_t);
- s_fill_mem_rec_op.u4_size = sizeof(iv_fill_mem_rec_op_t);
+ ps_fill_mem_rec_ip->u4_size = sizeof(ih264e_fill_mem_rec_ip_t);
+ ps_fill_mem_rec_op->u4_size = sizeof(ih264e_fill_mem_rec_op_t);
- s_fill_mem_rec_ip.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
- s_fill_mem_rec_ip.ps_mem_rec = mMemRecords;
- s_fill_mem_rec_ip.u4_num_mem_rec = mNumMemRecords;
- s_fill_mem_rec_ip.u4_max_wd = width;
- s_fill_mem_rec_ip.u4_max_ht = height;
- s_fill_mem_rec_ip.u4_max_level = mAVCEncLevel;
- s_fill_mem_rec_ip.e_color_format = DEFAULT_INP_COLOR_FORMAT;
- s_fill_mem_rec_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
- s_fill_mem_rec_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
- s_fill_mem_rec_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
- s_fill_mem_rec_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+ ps_fill_mem_rec_ip->e_cmd = IV_CMD_FILL_NUM_MEM_REC;
+ ps_fill_mem_rec_ip->ps_mem_rec = mMemRecords;
+ ps_fill_mem_rec_ip->u4_num_mem_rec = mNumMemRecords;
+ ps_fill_mem_rec_ip->u4_max_wd = width;
+ ps_fill_mem_rec_ip->u4_max_ht = height;
+ ps_fill_mem_rec_ip->u4_max_level = mAVCEncLevel;
+ ps_fill_mem_rec_ip->e_color_format = DEFAULT_INP_COLOR_FORMAT;
+ ps_fill_mem_rec_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+ ps_fill_mem_rec_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+ ps_fill_mem_rec_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+ ps_fill_mem_rec_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
- status = ive_api_function(nullptr, &s_fill_mem_rec_ip, &s_fill_mem_rec_op);
+ status = ive_api_function(nullptr, &s_ih264e_mem_rec_ip, &s_ih264e_mem_rec_op);
if (status != IV_SUCCESS) {
ALOGE("Fill memory records failed = 0x%x\n",
- s_fill_mem_rec_op.u4_error_code);
+ ps_fill_mem_rec_op->u4_error_code);
return C2_CORRUPTED;
}
}
@@ -1133,48 +1135,51 @@
/* Codec Instance Creation */
{
- ive_init_ip_t s_init_ip;
- ive_init_op_t s_init_op;
+ ih264e_init_ip_t s_enc_ip = {};
+ ih264e_init_op_t s_enc_op = {};
+
+ ive_init_ip_t *ps_init_ip = &s_enc_ip.s_ive_ip;
+ ive_init_op_t *ps_init_op = &s_enc_op.s_ive_op;
mCodecCtx = (iv_obj_t *)mMemRecords[0].pv_base;
mCodecCtx->u4_size = sizeof(iv_obj_t);
mCodecCtx->pv_fxns = (void *)ive_api_function;
- s_init_ip.u4_size = sizeof(ive_init_ip_t);
- s_init_op.u4_size = sizeof(ive_init_op_t);
+ ps_init_ip->u4_size = sizeof(ih264e_init_ip_t);
+ ps_init_op->u4_size = sizeof(ih264e_init_op_t);
- s_init_ip.e_cmd = IV_CMD_INIT;
- s_init_ip.u4_num_mem_rec = mNumMemRecords;
- s_init_ip.ps_mem_rec = mMemRecords;
- s_init_ip.u4_max_wd = width;
- s_init_ip.u4_max_ht = height;
- s_init_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
- s_init_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
- s_init_ip.u4_max_level = mAVCEncLevel;
- s_init_ip.e_inp_color_fmt = mIvVideoColorFormat;
+ ps_init_ip->e_cmd = IV_CMD_INIT;
+ ps_init_ip->u4_num_mem_rec = mNumMemRecords;
+ ps_init_ip->ps_mem_rec = mMemRecords;
+ ps_init_ip->u4_max_wd = width;
+ ps_init_ip->u4_max_ht = height;
+ ps_init_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+ ps_init_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+ ps_init_ip->u4_max_level = mAVCEncLevel;
+ ps_init_ip->e_inp_color_fmt = mIvVideoColorFormat;
if (mReconEnable || mPSNREnable) {
- s_init_ip.u4_enable_recon = 1;
+ ps_init_ip->u4_enable_recon = 1;
} else {
- s_init_ip.u4_enable_recon = 0;
+ ps_init_ip->u4_enable_recon = 0;
}
- s_init_ip.e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
- s_init_ip.e_rc_mode = DEFAULT_RC_MODE;
- s_init_ip.u4_max_framerate = DEFAULT_MAX_FRAMERATE;
- s_init_ip.u4_max_bitrate = DEFAULT_MAX_BITRATE;
- s_init_ip.u4_num_bframes = mBframes;
- s_init_ip.e_content_type = IV_PROGRESSIVE;
- s_init_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
- s_init_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
- s_init_ip.e_slice_mode = mSliceMode;
- s_init_ip.u4_slice_param = mSliceParam;
- s_init_ip.e_arch = mArch;
- s_init_ip.e_soc = DEFAULT_SOC;
+ ps_init_ip->e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
+ ps_init_ip->e_rc_mode = DEFAULT_RC_MODE;
+ ps_init_ip->u4_max_framerate = DEFAULT_MAX_FRAMERATE;
+ ps_init_ip->u4_max_bitrate = DEFAULT_MAX_BITRATE;
+ ps_init_ip->u4_num_bframes = mBframes;
+ ps_init_ip->e_content_type = IV_PROGRESSIVE;
+ ps_init_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+ ps_init_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+ ps_init_ip->e_slice_mode = mSliceMode;
+ ps_init_ip->u4_slice_param = mSliceParam;
+ ps_init_ip->e_arch = mArch;
+ ps_init_ip->e_soc = DEFAULT_SOC;
- status = ive_api_function(mCodecCtx, &s_init_ip, &s_init_op);
+ status = ive_api_function(mCodecCtx, &s_enc_ip, &s_enc_op);
if (status != IV_SUCCESS) {
- ALOGE("Init encoder failed = 0x%x\n", s_init_op.u4_error_code);
+ ALOGE("Init encoder failed = 0x%x\n", ps_init_op->u4_error_code);
return C2_CORRUPTED;
}
}
@@ -1502,15 +1507,17 @@
}
// while (!mSawOutputEOS && !outQueue.empty()) {
c2_status_t error;
- ive_video_encode_ip_t s_encode_ip;
- ive_video_encode_op_t s_encode_op;
- memset(&s_encode_op, 0, sizeof(s_encode_op));
+ ih264e_video_encode_ip_t s_video_encode_ip = {};
+ ih264e_video_encode_op_t s_video_encode_op = {};
+ ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+ ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+ memset(ps_encode_op, 0, sizeof(*ps_encode_op));
if (!mSpsPpsHeaderReceived) {
constexpr uint32_t kHeaderLength = MIN_STREAM_SIZE;
uint8_t header[kHeaderLength];
error = setEncodeArgs(
- &s_encode_ip, &s_encode_op, nullptr, header, kHeaderLength, workIndex);
+ ps_encode_ip, ps_encode_op, nullptr, header, kHeaderLength, workIndex);
if (error != C2_OK) {
ALOGE("setEncodeArgs failed: %d", error);
mSignalledError = true;
@@ -1518,22 +1525,22 @@
work->workletsProcessed = 1u;
return;
}
- status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ status = ive_api_function(mCodecCtx, ps_encode_ip, ps_encode_op);
if (IV_SUCCESS != status) {
ALOGE("Encode header failed = 0x%x\n",
- s_encode_op.u4_error_code);
+ ps_encode_op->u4_error_code);
work->workletsProcessed = 1u;
return;
} else {
ALOGV("Bytes Generated in header %d\n",
- s_encode_op.s_out_buf.u4_bytes);
+ ps_encode_op->s_out_buf.u4_bytes);
}
mSpsPpsHeaderReceived = true;
std::unique_ptr<C2StreamInitDataInfo::output> csd =
- C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+ C2StreamInitDataInfo::output::AllocUnique(ps_encode_op->s_out_buf.u4_bytes, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -1541,7 +1548,7 @@
work->workletsProcessed = 1u;
return;
}
- memcpy(csd->m.value, header, s_encode_op.s_out_buf.u4_bytes);
+ memcpy(csd->m.value, header, ps_encode_op->s_out_buf.u4_bytes);
work->worklets.front()->output.configUpdate.push_back(std::move(csd));
DUMP_TO_FILE(
@@ -1635,7 +1642,7 @@
}
error = setEncodeArgs(
- &s_encode_ip, &s_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
+ ps_encode_ip, ps_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
if (error != C2_OK) {
ALOGE("setEncodeArgs failed : %d", error);
mSignalledError = true;
@@ -1652,17 +1659,17 @@
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
- status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ status = ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
if (IV_SUCCESS != status) {
- if ((s_encode_op.u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
+ if ((ps_encode_op->u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
// TODO: use IVE_CMD_CTL_GETBUFINFO for proper max input size?
mOutBufferSize *= 2;
mOutBlock.reset();
continue;
}
ALOGE("Encode Frame failed = 0x%x\n",
- s_encode_op.u4_error_code);
+ ps_encode_op->u4_error_code);
mSignalledError = true;
work->result = C2_CORRUPTED;
work->workletsProcessed = 1u;
@@ -1672,7 +1679,7 @@
// Hold input buffer reference
if (inputBuffer) {
- mBuffers[s_encode_ip.s_inp_buf.apv_bufs[0]] = inputBuffer;
+ mBuffers[ps_encode_ip->s_inp_buf.apv_bufs[0]] = inputBuffer;
}
GETTIME(&mTimeEnd, nullptr);
@@ -1680,9 +1687,9 @@
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
- s_encode_op.s_out_buf.u4_bytes);
+ ps_encode_op->s_out_buf.u4_bytes);
- void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+ void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
/* If encoder frees up an input buffer, mark it as free */
if (freed != nullptr) {
if (mBuffers.count(freed) == 0u) {
@@ -1694,17 +1701,17 @@
}
}
- if (s_encode_op.output_present) {
- if (!s_encode_op.s_out_buf.u4_bytes) {
+ if (ps_encode_op->output_present) {
+ if (!ps_encode_op->s_out_buf.u4_bytes) {
ALOGE("Error: Output present but bytes generated is zero");
mSignalledError = true;
work->result = C2_CORRUPTED;
work->workletsProcessed = 1u;
return;
}
- uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
- s_encode_op.u4_timestamp_low;
- finishWork(workId, work, &s_encode_op);
+ uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+ ps_encode_op->u4_timestamp_low;
+ finishWork(workId, work, ps_encode_op);
}
if (mSawInputEOS) {
drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -1744,9 +1751,11 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ive_video_encode_ip_t s_encode_ip;
- ive_video_encode_op_t s_encode_op;
- if (C2_OK != setEncodeArgs(&s_encode_ip, &s_encode_op, nullptr,
+ ih264e_video_encode_ip_t s_video_encode_ip = {};
+ ih264e_video_encode_op_t s_video_encode_op = {};
+ ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+ ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+ if (C2_OK != setEncodeArgs(ps_encode_ip, ps_encode_op, nullptr,
wView.base(), wView.capacity(), 0)) {
ALOGE("setEncodeArgs failed for drainInternal");
mSignalledError = true;
@@ -1754,9 +1763,9 @@
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void)ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ (void)ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
- void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+ void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
/* If encoder frees up an input buffer, mark it as free */
if (freed != nullptr) {
if (mBuffers.count(freed) == 0u) {
@@ -1768,10 +1777,10 @@
}
}
- if (s_encode_op.output_present) {
- uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
- s_encode_op.u4_timestamp_low;
- finishWork(workId, work, &s_encode_op);
+ if (ps_encode_op->output_present) {
+ uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+ ps_encode_op->u4_timestamp_low;
+ finishWork(workId, work, ps_encode_op);
} else {
if (work->workletsProcessed != 1u) {
work->worklets.front()->output.flags = work->input.flags;
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.h b/media/codec2/components/avc/C2SoftAvcEnc.h
index ee6d47a..673a282 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.h
+++ b/media/codec2/components/avc/C2SoftAvcEnc.h
@@ -24,8 +24,7 @@
#include <SimpleC2Component.h>
#include "ih264_typedefs.h"
-#include "iv2.h"
-#include "ive2.h"
+#include "ih264e.h"
namespace android {
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index fb3fbd0..dfad226 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -110,17 +110,20 @@
}
case kWhatStop: {
int32_t err = thiz->onStop();
+ thiz->mOutputBlockPool.reset();
Reply(msg, &err);
break;
}
case kWhatReset: {
thiz->onReset();
+ thiz->mOutputBlockPool.reset();
mRunning = false;
Reply(msg);
break;
}
case kWhatRelease: {
thiz->onRelease();
+ thiz->mOutputBlockPool.reset();
mRunning = false;
Reply(msg);
break;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index a374dfa..6bcf3a2 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -26,7 +26,6 @@
#include <SimpleC2Interface.h>
#include "C2SoftHevcDec.h"
-#include "ihevcd_cxa.h"
namespace android {
@@ -380,12 +379,14 @@
}
while (true) {
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
- setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (0 == s_decode_op.u4_output_present) {
+ setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ if (0 == ps_decode_op->u4_output_present) {
resetPlugin();
break;
}
@@ -400,8 +401,8 @@
}
status_t C2SoftHevcDec::createDecoder() {
- ivdext_create_ip_t s_create_ip;
- ivdext_create_op_t s_create_op;
+ ivdext_create_ip_t s_create_ip = {};
+ ivdext_create_op_t s_create_op = {};
s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
@@ -427,8 +428,8 @@
}
status_t C2SoftHevcDec::setNumCores() {
- ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
- ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+ ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+ ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -447,22 +448,26 @@
}
status_t C2SoftHevcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
- ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
- ivd_ctl_set_config_op_t s_set_dyn_params_op;
+ ihevcd_cxa_ctl_set_config_ip_t s_hevcd_set_dyn_params_ip = {};
+ ihevcd_cxa_ctl_set_config_op_t s_hevcd_set_dyn_params_op = {};
+ ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+ &s_hevcd_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+ ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+ &s_hevcd_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
- s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
- s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
- s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
- s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
- s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
- s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
- s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
- s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+ ps_set_dyn_params_ip->u4_size = sizeof(ihevcd_cxa_ctl_set_config_ip_t);
+ ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+ ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+ ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+ ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+ ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+ ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+ ps_set_dyn_params_op->u4_size = sizeof(ihevcd_cxa_ctl_set_config_op_t);
IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
- &s_set_dyn_params_ip,
- &s_set_dyn_params_op);
+ ps_set_dyn_params_ip,
+ ps_set_dyn_params_op);
if (status != IV_SUCCESS) {
- ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+ ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
return UNKNOWN_ERROR;
}
@@ -470,8 +475,8 @@
}
status_t C2SoftHevcDec::getVersion() {
- ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
- ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+ ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+ ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
UWORD8 au1_buf[512];
s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -529,7 +534,7 @@
if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
}
- ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+ ps_decode_ip->u4_size = sizeof(ihevcd_cxa_video_decode_ip_t);
ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
if (inBuffer) {
ps_decode_ip->u4_ts = tsMarker;
@@ -558,15 +563,15 @@
ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
}
ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
- ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+ ps_decode_op->u4_size = sizeof(ihevcd_cxa_video_decode_op_t);
ps_decode_op->u4_output_present = 0;
return true;
}
bool C2SoftHevcDec::getVuiParams() {
- ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
- ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+ ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+ ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -614,8 +619,8 @@
}
status_t C2SoftHevcDec::setFlushMode() {
- ivd_ctl_flush_ip_t s_set_flush_ip;
- ivd_ctl_flush_op_t s_set_flush_op;
+ ivd_ctl_flush_ip_t s_set_flush_ip = {};
+ ivd_ctl_flush_op_t s_set_flush_op = {};
s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -633,8 +638,8 @@
}
status_t C2SoftHevcDec::resetDecoder() {
- ivd_ctl_reset_ip_t s_reset_ip;
- ivd_ctl_reset_op_t s_reset_op;
+ ivd_ctl_reset_ip_t s_reset_ip = {};
+ ivd_ctl_reset_op_t s_reset_op = {};
s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -662,8 +667,8 @@
status_t C2SoftHevcDec::deleteDecoder() {
if (mDecHandle) {
- ivdext_delete_ip_t s_delete_ip;
- ivdext_delete_op_t s_delete_op;
+ ivdext_delete_ip_t s_delete_ip = {};
+ ivdext_delete_op_t s_delete_op = {};
s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -835,9 +840,11 @@
work->result = wView.error();
return;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
inOffset + inPos, inSize - inPos, workIndex)) {
mSignalledError = true;
work->workletsProcessed = 1u;
@@ -852,26 +859,26 @@
WORD32 delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
WORD32 decodeTime;
GETTIME(&mTimeEnd, nullptr);
TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
- s_decode_op.u4_num_bytes_consumed);
- if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ ps_decode_op->u4_num_bytes_consumed);
+ if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("allocation failure in decoder");
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
} else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
- (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGV("resolution changed");
drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
resetDecoder();
@@ -880,16 +887,16 @@
/* Decode header and get new dimensions */
setParams(mStride, IVD_DECODE_HEADER);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
- ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+ ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
}
- if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
- mOutputDelay = s_decode_op.i4_reorder_depth;
+ if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+ mOutputDelay = ps_decode_op->i4_reorder_depth;
ALOGV("New Output delay %d ", mOutputDelay);
C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -907,15 +914,15 @@
return;
}
}
- if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+ if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
- setParams(ALIGN32(s_decode_op.u4_pic_wd), IVD_DECODE_FRAME);
+ setParams(ALIGN32(ps_decode_op->u4_pic_wd), IVD_DECODE_FRAME);
}
- if (s_decode_op.u4_pic_wd != mWidth || s_decode_op.u4_pic_ht != mHeight) {
- mWidth = s_decode_op.u4_pic_wd;
- mHeight = s_decode_op.u4_pic_ht;
- CHECK_EQ(0u, s_decode_op.u4_output_present);
+ if (ps_decode_op->u4_pic_wd != mWidth || ps_decode_op->u4_pic_ht != mHeight) {
+ mWidth = ps_decode_op->u4_pic_wd;
+ mHeight = ps_decode_op->u4_pic_ht;
+ CHECK_EQ(0u, ps_decode_op->u4_output_present);
C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -935,15 +942,15 @@
}
}
(void) getVuiParams();
- hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
}
- if (0 == s_decode_op.u4_num_bytes_consumed) {
+ if (0 == ps_decode_op->u4_num_bytes_consumed) {
ALOGD("Bytes consumed is zero. Ignoring remaining bytes");
break;
}
- inPos += s_decode_op.u4_num_bytes_consumed;
+ inPos += ps_decode_op->u4_num_bytes_consumed;
if (hasPicture && (inSize - inPos)) {
ALOGD("decoded frame in current access nal, ignoring further trailing bytes %d",
(int)inSize - (int)inPos);
@@ -985,16 +992,18 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
mSignalledError = true;
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
} else {
fillEmptyWork(work);
break;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.h b/media/codec2/components/hevc/C2SoftHevcDec.h
index 600d7c1..b9b0a48 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.h
+++ b/media/codec2/components/hevc/C2SoftHevcDec.h
@@ -23,8 +23,7 @@
#include <SimpleC2Component.h>
#include "ihevc_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ihevcd_cxa.h"
namespace android {
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 31ca705..a03d4e2 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -87,7 +87,9 @@
.withFields({C2F(mPcmEncodingInfo, value).oneOf({
C2Config::PCM_16,
C2Config::PCM_8,
- C2Config::PCM_FLOAT})
+ C2Config::PCM_FLOAT,
+ C2Config::PCM_24,
+ C2Config::PCM_32})
})
.withSetter((Setter<decltype(*mPcmEncodingInfo)>::StrictValueWithNoDeps))
.build());
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index f8aa672..8ee5f33 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -394,6 +394,7 @@
_C2_PL_VP9_BASE = 0x7000,
_C2_PL_DV_BASE = 0x8000,
_C2_PL_AV1_BASE = 0x9000,
+ _C2_PL_VP8_BASE = 0xA000,
C2_PROFILE_LEVEL_VENDOR_START = 0x70000000,
};
@@ -547,6 +548,12 @@
PROFILE_AV1_0 = _C2_PL_AV1_BASE, ///< AV1 Profile 0 (4:2:0, 8 to 10 bit)
PROFILE_AV1_1, ///< AV1 Profile 1 (8 to 10 bit)
PROFILE_AV1_2, ///< AV1 Profile 2 (8 to 12 bit)
+
+ // VP8 profiles
+ PROFILE_VP8_0 = _C2_PL_VP8_BASE, ///< VP8 Profile 0
+ PROFILE_VP8_1, ///< VP8 Profile 1
+ PROFILE_VP8_2, ///< VP8 Profile 2
+ PROFILE_VP8_3, ///< VP8 Profile 3
};
enum C2Config::level_t : uint32_t {
@@ -1901,7 +1908,9 @@
C2ENUM(C2Config::pcm_encoding_t, uint32_t,
PCM_16,
PCM_8,
- PCM_FLOAT
+ PCM_FLOAT,
+ PCM_24,
+ PCM_32
)
typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2Config::pcm_encoding_t>, kParamIndexPcmEncoding>
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
new file mode 120000
index 0000000..136279c
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/.clang-format
@@ -0,0 +1 @@
+../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 3a47ae9..efc5813 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -33,14 +33,40 @@
using android::C2AllocatorIon;
#include "media_c2_hidl_test_common.h"
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kDecodeTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+struct CompToURL {
+ std::string mime;
+ std::string mURL;
+ std::string info;
+};
-// Resource directory
-static std::string sResourceDir = "";
+std::vector<CompToURL> kCompToURL = {
+ {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.info"},
+ {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac",
+ "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"},
+ {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.info"},
+ {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3",
+ "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"},
+ {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.info"},
+ {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb",
+ "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"},
+ {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.info"},
+ {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb",
+ "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"},
+ {"vorbis", "bbb_vorbis_stereo_128kbps_48000hz.vorbis",
+ "bbb_vorbis_stereo_128kbps_48000hz.info"},
+ {"opus", "bbb_opus_stereo_128kbps_48000hz.opus", "bbb_opus_stereo_128kbps_48000hz.info"},
+ {"g711-alaw", "bbb_g711alaw_1ch_8khz.raw", "bbb_g711alaw_1ch_8khz.info"},
+ {"g711-mlaw", "bbb_g711mulaw_1ch_8khz.raw", "bbb_g711mulaw_1ch_8khz.info"},
+ {"gsm", "bbb_gsm_1ch_8khz_13kbps.raw", "bbb_gsm_1ch_8khz_13kbps.info"},
+ {"raw", "bbb_raw_1ch_8khz_s32le.raw", "bbb_raw_1ch_8khz_s32le.info"},
+ {"flac", "bbb_flac_stereo_680kbps_48000hz.flac", "bbb_flac_stereo_680kbps_48000hz.info"},
+};
class LinearBuffer : public C2Buffer {
public:
@@ -76,33 +102,17 @@
mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, mBlockPoolId++);
ASSERT_NE(mLinearPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
- const StringToName kStringToName[] = {
- {"xaac", xaac}, {"mp3", mp3}, {"amrnb", amrnb},
- {"amrwb", amrwb}, {"aac", aac}, {"vorbis", vorbis},
- {"opus", opus}, {"pcm", pcm}, {"g711.alaw", g711alaw},
- {"g711.mlaw", g711mlaw}, {"gsm", gsm}, {"raw", raw},
- {"flac", flac},
- };
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE}, C2_DONT_BLOCK, &queried);
+ ASSERT_GT(queried.size(), 0);
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::input*)queried[0].get())->m.value;
+
mEos = false;
mFramesReceived = 0;
mTimestampUs = 0u;
mWorkResult = C2_OK;
mTimestampDevTest = false;
- if (mCompName == unknown_comp) mDisableTest = true;
if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
}
@@ -119,6 +129,8 @@
virtual void validateTimestampList(int32_t* bitStreamInfo);
+ void GetURLForComponent(char* mURL, char* info, size_t streamIndex = 0);
+
struct outputMetaData {
uint64_t timestampUs;
uint32_t rangeLength;
@@ -158,29 +170,12 @@
}
}
- enum standardComp {
- xaac,
- mp3,
- amrnb,
- amrwb,
- aac,
- vorbis,
- opus,
- pcm,
- g711alaw,
- g711mlaw,
- gsm,
- raw,
- flac,
- unknown_comp,
- };
-
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
bool mEos;
bool mDisableTest;
bool mTimestampDevTest;
- standardComp mCompName;
int32_t mWorkResult;
uint64_t mTimestampUs;
@@ -207,9 +202,8 @@
}
};
-class Codec2AudioDecHidlTest
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioDecHidlTest : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -217,7 +211,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2AudioDecHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -244,13 +238,6 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2AudioDecHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
@@ -271,7 +258,7 @@
// parsing the header of elementary stream. Client needs to collect this
// information and reconfigure
void getInputChannelInfo(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2AudioDecHidlTest::standardComp compName, int32_t* bitStreamInfo) {
+ std::string mime, int32_t* bitStreamInfo) {
// query nSampleRate and nChannels
std::initializer_list<C2Param::Index> indices{
C2StreamSampleRateInfo::output::PARAM_TYPE,
@@ -288,89 +275,29 @@
C2Param* param = inParams[i].get();
bitStreamInfo[i] = *(int32_t*)((uint8_t*)param + offset);
}
- switch (compName) {
- case Codec2AudioDecHidlTest::amrnb: {
- ASSERT_EQ(bitStreamInfo[0], 8000);
- ASSERT_EQ(bitStreamInfo[1], 1);
- break;
- }
- case Codec2AudioDecHidlTest::amrwb: {
- ASSERT_EQ(bitStreamInfo[0], 16000);
- ASSERT_EQ(bitStreamInfo[1], 1);
- break;
- }
- case Codec2AudioDecHidlTest::gsm: {
- ASSERT_EQ(bitStreamInfo[0], 8000);
- break;
- }
- default:
- break;
+ if (mime.find("3gpp") != std::string::npos) {
+ ASSERT_EQ(bitStreamInfo[0], 8000);
+ ASSERT_EQ(bitStreamInfo[1], 1);
+ } else if (mime.find("amr-wb") != std::string::npos) {
+ ASSERT_EQ(bitStreamInfo[0], 16000);
+ ASSERT_EQ(bitStreamInfo[1], 1);
+ } else if (mime.find("gsm") != std::string::npos) {
+ ASSERT_EQ(bitStreamInfo[0], 8000);
}
}
}
-// number of elementary streams per component
-#define STREAM_COUNT 2
-
// LookUpTable of clips and metadata for component testing
-void GetURLForComponent(Codec2AudioDecHidlTest::standardComp comp, char* mURL, char* info,
- size_t streamIndex = 0) {
- struct CompToURL {
- Codec2AudioDecHidlTest::standardComp comp;
- const char mURL[STREAM_COUNT][512];
- const char info[STREAM_COUNT][512];
- };
- ASSERT_TRUE(streamIndex < STREAM_COUNT);
-
- static const CompToURL kCompToURL[] = {
- {Codec2AudioDecHidlTest::standardComp::xaac,
- {"bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.aac"},
- {"bbb_aac_stereo_128kbps_48000hz.info",
- "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"}},
- {Codec2AudioDecHidlTest::standardComp::mp3,
- {"bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.mp3"},
- {"bbb_mp3_stereo_192kbps_48000hz.info",
- "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"}},
- {Codec2AudioDecHidlTest::standardComp::aac,
- {"bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.aac"},
- {"bbb_aac_stereo_128kbps_48000hz.info",
- "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"}},
- {Codec2AudioDecHidlTest::standardComp::amrnb,
- {"sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.amrnb"},
- {"sine_amrnb_1ch_12kbps_8000hz.info",
- "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"}},
- {Codec2AudioDecHidlTest::standardComp::amrwb,
- {"bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb"},
- {"bbb_amrwb_1ch_14kbps_16000hz.info",
- "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"}},
- {Codec2AudioDecHidlTest::standardComp::vorbis,
- {"bbb_vorbis_stereo_128kbps_48000hz.vorbis", ""},
- {"bbb_vorbis_stereo_128kbps_48000hz.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::opus,
- {"bbb_opus_stereo_128kbps_48000hz.opus", ""},
- {"bbb_opus_stereo_128kbps_48000hz.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::g711alaw,
- {"bbb_g711alaw_1ch_8khz.raw", ""},
- {"bbb_g711alaw_1ch_8khz.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::g711mlaw,
- {"bbb_g711mulaw_1ch_8khz.raw", ""},
- {"bbb_g711mulaw_1ch_8khz.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::gsm,
- {"bbb_gsm_1ch_8khz_13kbps.raw", ""},
- {"bbb_gsm_1ch_8khz_13kbps.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::raw,
- {"bbb_raw_1ch_8khz_s32le.raw", ""},
- {"bbb_raw_1ch_8khz_s32le.info", ""}},
- {Codec2AudioDecHidlTest::standardComp::flac,
- {"bbb_flac_stereo_680kbps_48000hz.flac", ""},
- {"bbb_flac_stereo_680kbps_48000hz.info", ""}},
- };
-
- for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
- if (kCompToURL[i].comp == comp) {
- strcat(mURL, kCompToURL[i].mURL[streamIndex]);
- strcat(info, kCompToURL[i].info[streamIndex]);
- return;
+void Codec2AudioDecHidlTestBase::GetURLForComponent(char* mURL, char* info, size_t streamIndex) {
+ int streamCount = 0;
+ for (size_t i = 0; i < kCompToURL.size(); ++i) {
+ if (mMime.find(kCompToURL[i].mime) != std::string::npos) {
+ if (streamCount == streamIndex) {
+ strcat(mURL, kCompToURL[i].mURL.c_str());
+ strcat(info, kCompToURL[i].info.c_str());
+ return;
+ }
+ streamCount++;
}
}
}
@@ -461,7 +388,7 @@
void Codec2AudioDecHidlTestBase::validateTimestampList(int32_t* bitStreamInfo) {
uint32_t samplesReceived = 0;
// Update SampleRate and ChannelCount
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
int32_t nSampleRate = bitStreamInfo[0];
int32_t nChannels = bitStreamInfo[1];
std::list<uint64_t>::iterator itIn = mTimestampUslist.begin();
@@ -486,7 +413,7 @@
TEST_P(Codec2AudioDecHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid audio component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
@@ -495,15 +422,13 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ASSERT_EQ(mComponent->start(), C2_OK);
int32_t bitStreamInfo[2] = {0};
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
setupConfigParam(mComponent, bitStreamInfo);
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-class Codec2AudioDecDecodeTest
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioDecDecodeTest : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<DecodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -514,16 +439,15 @@
description("Decodes input file");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
- uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
- ;
- bool signalEOS = !std::get<3>(GetParam()).compare("true");
+ uint32_t streamIndex = std::get<2>(GetParam());
+ bool signalEOS = std::get<3>(GetParam());
mTimestampDevTest = true;
char mURL[512], info[512];
android::Vector<FrameInfo> Info;
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info, streamIndex);
+ GetURLForComponent(mURL, info, streamIndex);
if (!strcmp(mURL, sResourceDir.c_str())) {
ALOGV("EMPTY INPUT sResourceDir.c_str() %s mURL %s ", sResourceDir.c_str(), mURL);
return;
@@ -536,11 +460,11 @@
mFramesReceived = 0;
mTimestampUs = 0;
int32_t bitStreamInfo[2] = {0};
- if (mCompName == raw) {
+ if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
} else {
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
if (!setupConfigParam(mComponent, bitStreamInfo)) {
std::cout << "[ WARN ] Test Skipped \n";
@@ -591,17 +515,17 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
int32_t numCsds = populateInfoVector(info, &Info, mTimestampDevTest, &mTimestampUslist);
ASSERT_GE(numCsds, 0) << "Error in parsing input info file: " << info;
int32_t bitStreamInfo[2] = {0};
- if (mCompName == raw) {
+ if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
} else {
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
if (!setupConfigParam(mComponent, bitStreamInfo)) {
std::cout << "[ WARN ] Test Skipped \n";
@@ -683,17 +607,17 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
int32_t numCsds = populateInfoVector(info, &Info, mTimestampDevTest, &mTimestampUslist);
ASSERT_GE(numCsds, 0) << "Error in parsing input info file: " << info;
int32_t bitStreamInfo[2] = {0};
- if (mCompName == raw) {
+ if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
} else {
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
if (!setupConfigParam(mComponent, bitStreamInfo)) {
std::cout << "[ WARN ] Test Skipped \n";
@@ -768,7 +692,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
eleInfo.open(info);
ASSERT_EQ(eleInfo.is_open(), true) << mURL << " - file not found";
@@ -798,11 +722,11 @@
}
eleInfo.close();
int32_t bitStreamInfo[2] = {0};
- if (mCompName == raw) {
+ if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
} else {
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
if (!setupConfigParam(mComponent, bitStreamInfo)) {
std::cout << "[ WARN ] Test Skipped \n";
@@ -833,9 +757,8 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-class Codec2AudioDecCsdInputTests
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2AudioDecCsdInputTests : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<CsdFlushTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -853,7 +776,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
if (!strcmp(mURL, sResourceDir.c_str())) {
ALOGV("EMPTY INPUT sResourceDir.c_str() %s mURL %s ", sResourceDir.c_str(), mURL);
return;
@@ -864,11 +787,11 @@
ASSERT_GE(numCsds, 0) << "Error in parsing input info file";
int32_t bitStreamInfo[2] = {0};
- if (mCompName == raw) {
+ if (mMime.find("raw") != std::string::npos) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
} else {
- ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ ASSERT_NO_FATAL_FAILURE(getInputChannelInfo(mComponent, mMime, bitStreamInfo));
}
if (!setupConfigParam(mComponent, bitStreamInfo)) {
std::cout << "[ WARN ] Test Skipped \n";
@@ -881,7 +804,7 @@
ASSERT_EQ(eleStream.is_open(), true);
bool signalEOS = false;
- bool flushCsd = !std::get<2>(GetParam()).compare("true");
+ bool flushCsd = std::get<2>(GetParam());
ALOGV("sending %d csd data ", numCsds);
int framesToDecode = numCsds;
ASSERT_NO_FATAL_FAILURE(decodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
@@ -937,44 +860,36 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioDecHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// DecodeTest with StreamIndex and EOS / No EOS
INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2AudioDecDecodeTest,
testing::ValuesIn(kDecodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2AudioDecCsdInputTests,
testing::ValuesIn(kCsdFlushTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
int main(int argc, char** argv) {
+ parseArgs(argc, argv);
kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_DECODER);
for (auto params : kTestParameters) {
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
- }
-
- // Set the resource directory based on command line args.
- // Test will fail to set up if the argument is not set.
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-P") == 0 && i < argc - 1) {
- sResourceDir = argv[i + 1];
- break;
- }
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
index e3a4f68..562c77f 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
@@ -35,11 +35,9 @@
#include "media_c2_hidl_test_common.h"
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kEncodeTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, int32_t>;
-// Resource directory
-static std::string sResourceDir = "";
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
class LinearBuffer : public C2Buffer {
public:
@@ -75,30 +73,17 @@
mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, mBlockPoolId++);
ASSERT_NE(mLinearPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
- const StringToName kStringToName[] = {
- {"aac", aac}, {"flac", flac}, {"opus", opus}, {"amrnb", amrnb}, {"amrwb", amrwb},
- };
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+ &queried);
+ ASSERT_GT(queried.size(), 0);
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
mEos = false;
mCsd = false;
mFramesReceived = 0;
mWorkResult = C2_OK;
mOutputSize = 0u;
- if (mCompName == unknown_comp) mDisableTest = true;
- if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
getInputMaxBufSize();
}
@@ -113,6 +98,8 @@
// Get the test parameters from GetParam call.
virtual void getParams() {}
+ void GetURLForComponent(char* mURL);
+
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
@@ -133,21 +120,13 @@
}
}
}
- enum standardComp {
- aac,
- flac,
- opus,
- amrnb,
- amrwb,
- unknown_comp,
- };
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
bool mEos;
bool mCsd;
bool mDisableTest;
- standardComp mCompName;
int32_t mWorkResult;
uint32_t mFramesReceived;
@@ -192,9 +171,8 @@
}
};
-class Codec2AudioEncHidlTest
- : public Codec2AudioEncHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioEncHidlTest : public Codec2AudioEncHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -202,7 +180,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2AudioEncHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -229,13 +207,6 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2AudioEncHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
@@ -253,56 +224,48 @@
}
// Get config params for a component
-bool getConfigParams(Codec2AudioEncHidlTest::standardComp compName, int32_t* nChannels,
- int32_t* nSampleRate, int32_t* samplesPerFrame) {
- switch (compName) {
- case Codec2AudioEncHidlTest::aac:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 1024;
- break;
- case Codec2AudioEncHidlTest::flac:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 1152;
- break;
- case Codec2AudioEncHidlTest::opus:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 960;
- break;
- case Codec2AudioEncHidlTest::amrnb:
- *nChannels = 1;
- *nSampleRate = 8000;
- *samplesPerFrame = 160;
- break;
- case Codec2AudioEncHidlTest::amrwb:
- *nChannels = 1;
- *nSampleRate = 16000;
- *samplesPerFrame = 160;
- break;
- default:
- return false;
- }
+bool getConfigParams(std::string mime, int32_t* nChannels, int32_t* nSampleRate,
+ int32_t* samplesPerFrame) {
+ if (mime.find("mp4a-latm") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 1024;
+ } else if (mime.find("flac") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 1152;
+ } else if (mime.find("opus") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 960;
+ } else if (mime.find("3gpp") != std::string::npos) {
+ *nChannels = 1;
+ *nSampleRate = 8000;
+ *samplesPerFrame = 160;
+ } else if (mime.find("amr-wb") != std::string::npos) {
+ *nChannels = 1;
+ *nSampleRate = 16000;
+ *samplesPerFrame = 160;
+ } else
+ return false;
+
return true;
}
// LookUpTable of clips and metadata for component testing
-void GetURLForComponent(Codec2AudioEncHidlTest::standardComp comp, char* mURL) {
+void Codec2AudioEncHidlTestBase::GetURLForComponent(char* mURL) {
struct CompToURL {
- Codec2AudioEncHidlTest::standardComp comp;
+ std::string mime;
const char* mURL;
};
static const CompToURL kCompToURL[] = {
- {Codec2AudioEncHidlTest::standardComp::aac, "bbb_raw_2ch_48khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::amrnb, "bbb_raw_1ch_8khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::amrwb, "bbb_raw_1ch_16khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::flac, "bbb_raw_2ch_48khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::opus, "bbb_raw_2ch_48khz_s16le.raw"},
+ {"mp4a-latm", "bbb_raw_2ch_48khz_s16le.raw"}, {"3gpp", "bbb_raw_1ch_8khz_s16le.raw"},
+ {"amr-wb", "bbb_raw_1ch_16khz_s16le.raw"}, {"flac", "bbb_raw_2ch_48khz_s16le.raw"},
+ {"opus", "bbb_raw_2ch_48khz_s16le.raw"},
};
for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
- if (kCompToURL[i].comp == comp) {
+ if (mMime.find(kCompToURL[i].mime) != std::string::npos) {
strcat(mURL, kCompToURL[i].mURL);
return;
}
@@ -395,14 +358,12 @@
TEST_P(Codec2AudioEncHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid audio component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
-class Codec2AudioEncEncodeTest
- : public Codec2AudioEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioEncEncodeTest : public Codec2AudioEncHidlTestBase,
+ public ::testing::WithParamInterface<EncodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -414,17 +375,17 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ GetURLForComponent(mURL);
+ bool signalEOS = std::get<2>(GetParam());
// Ratio w.r.t to mInputMaxBufSize
- int32_t inputMaxBufRatio = std::stoi(std::get<3>(GetParam()));
+ int32_t inputMaxBufRatio = std::get<3>(GetParam());
int32_t nChannels;
int32_t nSampleRate;
int32_t samplesPerFrame;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -464,11 +425,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
- if (!mCsd) {
- ALOGE("CSD buffer missing");
- ASSERT_TRUE(false);
- }
+ if ((mMime.find("flac") != std::string::npos) || (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
+ ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_EQ(mEos, true);
ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -522,15 +481,15 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
mFlushedIndices.clear();
int32_t nChannels;
int32_t nSampleRate;
int32_t samplesPerFrame;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -587,7 +546,7 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
std::ifstream eleStream;
eleStream.open(mURL, std::ifstream::binary);
@@ -600,8 +559,8 @@
int32_t numFrames = 16;
int32_t maxChannelCount = 8;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -611,7 +570,7 @@
// Looping through the maximum number of channel count supported by encoder
for (nChannels = 1; nChannels < maxChannelCount; nChannels++) {
- ALOGV("Configuring %u encoder for channel count = %d", mCompName, nChannels);
+ ALOGV("Configuring encoder %s for channel count = %d", mComponentName.c_str(), nChannels);
if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
@@ -668,7 +627,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+ if ((mMime.find("flac") != std::string::npos) ||
+ (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_TRUE(mEos);
@@ -687,7 +648,7 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
std::ifstream eleStream;
eleStream.open(mURL, std::ifstream::binary);
@@ -699,8 +660,8 @@
int32_t nChannels;
int32_t numFrames = 16;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -711,7 +672,7 @@
uint32_t prevSampleRate = 0u;
for (int32_t nSampleRate : sampleRateValues) {
- ALOGV("Configuring %u encoder for SampleRate = %d", mCompName, nSampleRate);
+ ALOGV("Configuring encoder %s for SampleRate = %d", mComponentName.c_str(), nSampleRate);
if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
@@ -772,7 +733,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+ if ((mMime.find("flac") != std::string::npos) ||
+ (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_TRUE(mEos);
@@ -786,36 +749,28 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioEncHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// EncodeTest with EOS / No EOS and inputMaxBufRatio
// inputMaxBufRatio is ratio w.r.t. to mInputMaxBufSize
INSTANTIATE_TEST_SUITE_P(EncodeTest, Codec2AudioEncEncodeTest,
testing::ValuesIn(kEncodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
int main(int argc, char** argv) {
+ parseArgs(argc, argv);
kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_ENCODER);
for (auto params : kTestParameters) {
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "1"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false, 1));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "2"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false, 2));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "1"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true, 1));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "2"));
- }
-
- // Set the resource directory based on command line args.
- // Test will fail to set up if the argument is not set.
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-P") == 0 && i < argc - 1) {
- sResourceDir = argv[i + 1];
- break;
- }
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true, 2));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 0251ec2..1f1681d 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -22,6 +22,48 @@
#include <android/hardware/media/c2/1.0/IComponentStore.h>
+std::string sResourceDir = "";
+
+std::string sComponentNamePrefix = "";
+
+static constexpr struct option kArgOptions[] = {
+ {"res", required_argument, 0, 'P'},
+ {"prefix", required_argument, 0, 'p'},
+ {"help", required_argument, 0, 'h'},
+ {nullptr, 0, nullptr, 0},
+};
+
+void printUsage(char* me) {
+ std::cerr << "VTS tests to test codec2 components \n";
+ std::cerr << "Usage: " << me << " [options] \n";
+ std::cerr << "\t -P, --res: Mandatory path to a folder that contains test resources \n";
+ std::cerr << "\t -p, --prefix: Optional prefix to select component/s to be tested \n";
+ std::cerr << "\t All codecs are tested by default \n";
+ std::cerr << "\t Eg: c2.android - test codecs starting with c2.android \n";
+ std::cerr << "\t Eg: c2.android.aac.decoder - test a specific codec \n";
+ std::cerr << "\t -h, --help: Print usage \n";
+}
+
+void parseArgs(int argc, char** argv) {
+ int arg;
+ int option_index;
+ while ((arg = getopt_long(argc, argv, ":P:p:h", kArgOptions, &option_index)) != -1) {
+ switch (arg) {
+ case 'P':
+ sResourceDir = optarg;
+ break;
+ case 'p':
+ sComponentNamePrefix = optarg;
+ break;
+ case 'h':
+ printUsage(argv[0]);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
// Test the codecs for NullBuffer, Empty Input Buffer with(out) flags set
void testInputBuffer(const std::shared_ptr<android::Codec2Client::Component>& component,
std::mutex& queueLock, std::list<std::unique_ptr<C2Work>>& workQueue,
@@ -92,8 +134,7 @@
for (size_t i = 0; i < updates.size(); ++i) {
C2Param* param = updates[i].get();
if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
- C2StreamInitDataInfo::output* csdBuffer =
- (C2StreamInitDataInfo::output*)(param);
+ C2StreamInitDataInfo::output* csdBuffer = (C2StreamInitDataInfo::output*)(param);
size_t csdSize = csdBuffer->flexCount();
if (csdSize > 0) csd = true;
} else if ((param->index() == C2StreamSampleRateInfo::output::PARAM_TYPE) ||
@@ -118,8 +159,7 @@
typedef std::unique_lock<std::mutex> ULock;
ULock l(queueLock);
workQueue.push_back(std::move(work));
- if (!flushedIndices.empty() &&
- (frameIndexIt != flushedIndices.end())) {
+ if (!flushedIndices.empty() && (frameIndexIt != flushedIndices.end())) {
flushedIndices.erase(frameIndexIt);
}
queueCondition.notify_all();
@@ -136,15 +176,15 @@
}
// Return all test parameters, a list of tuple of <instance, component>
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters() {
+const std::vector<TestParameters>& getTestParameters() {
return getTestParameters(C2Component::DOMAIN_OTHER, C2Component::KIND_OTHER);
}
// Return all test parameters, a list of tuple of <instance, component> with matching domain and
// kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
- C2Component::domain_t domain, C2Component::kind_t kind) {
- static std::vector<std::tuple<std::string, std::string>> parameters;
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+ C2Component::kind_t kind) {
+ static std::vector<TestParameters> parameters;
auto instances = android::Codec2Client::GetServiceNames();
for (std::string instance : instances) {
@@ -157,11 +197,18 @@
(traits.domain != domain || traits.kind != kind)) {
continue;
}
-
+ if (traits.name.rfind(sComponentNamePrefix, 0) != 0) {
+ ALOGD("Skipping tests for %s. Prefix specified is %s", traits.name.c_str(),
+ sComponentNamePrefix.c_str());
+ continue;
+ }
parameters.push_back(std::make_tuple(instance, traits.name));
}
}
+ if (parameters.empty()) {
+ ALOGE("No test parameters added. Verify component prefix passed to the test");
+ }
return parameters;
}
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index 50e3ac5..e74f247 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -40,7 +40,14 @@
using namespace ::std::chrono;
-static std::vector<std::tuple<std::string, std::string>> kTestParameters;
+using TestParameters = std::tuple<std::string, std::string>;
+static std::vector<TestParameters> kTestParameters;
+
+// Resource directory
+extern std::string sResourceDir;
+
+// Component name prefix
+extern std::string sComponentNamePrefix;
struct FrameInfo {
int bytesCount;
@@ -48,6 +55,18 @@
int64_t timestamp;
};
+template <typename... T>
+static inline std::string PrintInstanceTupleNameToString(
+ const testing::TestParamInfo<std::tuple<T...>>& info) {
+ std::stringstream ss;
+ std::apply([&ss](auto&&... elems) { ((ss << elems << '_'), ...); }, info.param);
+ ss << info.index;
+ std::string param_string = ss.str();
+ auto isNotAlphaNum = [](char c) { return !std::isalnum(c); };
+ std::replace_if(param_string.begin(), param_string.end(), isNotAlphaNum, '_');
+ return param_string;
+}
+
/*
* Handle Callback functions onWorkDone(), onTripped(),
* onError(), onDeath(), onFramesRendered()
@@ -105,13 +124,15 @@
std::function<void(std::list<std::unique_ptr<C2Work>>& workItems)> callBack;
};
+void parseArgs(int argc, char** argv);
+
// Return all test parameters, a list of tuple of <instance, component>.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters();
+const std::vector<TestParameters>& getTestParameters();
// Return all test parameters, a list of tuple of <instance, component> with matching domain and
// kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
- C2Component::domain_t domain, C2Component::kind_t kind);
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+ C2Component::kind_t kind);
/*
* common functions declarations
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 6122225..29acd33 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -53,9 +53,8 @@
}
namespace {
-
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kInputTestParameters;
+using InputTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<InputTestParameters> kInputTestParameters;
// google.codec2 Component test setup
class Codec2ComponentHidlTestBase : public ::testing::Test {
@@ -120,9 +119,8 @@
}
};
-class Codec2ComponentHidlTest
- : public Codec2ComponentHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2ComponentHidlTest : public Codec2ComponentHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -317,10 +315,8 @@
ASSERT_EQ(err, C2_OK);
}
-class Codec2ComponentInputTests
- : public Codec2ComponentHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2ComponentInputTests : public Codec2ComponentHidlTestBase,
+ public ::testing::WithParamInterface<InputTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -330,8 +326,8 @@
TEST_P(Codec2ComponentInputTests, InputBufferTest) {
description("Tests for different inputs");
- uint32_t flags = std::stoul(std::get<2>(GetParam()));
- bool isNullBuffer = !std::get<3>(GetParam()).compare("true");
+ uint32_t flags = std::get<2>(GetParam());
+ bool isNullBuffer = std::get<3>(GetParam());
if (isNullBuffer)
ALOGD("Testing for null input buffer with flag : %u", flags);
else
@@ -350,31 +346,28 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2ComponentHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_CASE_P(NonStdInputs, Codec2ComponentInputTests,
- testing::ValuesIn(kInputTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ testing::ValuesIn(kInputTestParameters), PrintInstanceTupleNameToString<>);
} // anonymous namespace
// TODO: Add test for Invalid work,
// TODO: Add test for Invalid states
int main(int argc, char** argv) {
+ parseArgs(argc, argv);
kTestParameters = getTestParameters();
for (auto params : kTestParameters) {
kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_END_OF_STREAM, true));
kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_END_OF_STREAM), "true"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_CODEC_CONFIG), "false"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_END_OF_STREAM), "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_CODEC_CONFIG, false));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_END_OF_STREAM, false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index b520c17..d0a1c31 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -40,13 +40,44 @@
#include "media_c2_hidl_test_common.h"
#include "media_c2_video_hidl_test_common.h"
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kDecodeTestParameters;
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
-// Resource directory
-static std::string sResourceDir = "";
+struct CompToURL {
+ std::string mime;
+ std::string mURL;
+ std::string info;
+ std::string chksum;
+};
+std::vector<CompToURL> kCompToURL = {
+ {"avc", "bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_176x144_300kbps_60fps.info",
+ "bbb_avc_176x144_300kbps_60fps_chksum.md5"},
+ {"avc", "bbb_avc_640x360_768kbps_30fps.h264", "bbb_avc_640x360_768kbps_30fps.info",
+ "bbb_avc_640x360_768kbps_30fps_chksum.md5"},
+ {"hevc", "bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_176x144_176kbps_60fps.info",
+ "bbb_hevc_176x144_176kbps_60fps_chksum.md5"},
+ {"hevc", "bbb_hevc_640x360_1600kbps_30fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.info",
+ "bbb_hevc_640x360_1600kbps_30fps_chksum.md5"},
+ {"mpeg2", "bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_176x144_105kbps_25fps.info",
+ ""},
+ {"mpeg2", "bbb_mpeg2_352x288_1mbps_60fps.m2v", "bbb_mpeg2_352x288_1mbps_60fps.info", ""},
+ {"3gpp", "bbb_h263_352x288_300kbps_12fps.h263", "bbb_h263_352x288_300kbps_12fps.info", ""},
+ {"mp4v-es", "bbb_mpeg4_352x288_512kbps_30fps.m4v", "bbb_mpeg4_352x288_512kbps_30fps.info",
+ ""},
+ {"vp8", "bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_176x144_240kbps_60fps.info", ""},
+ {"vp8", "bbb_vp8_640x360_2mbps_30fps.vp8", "bbb_vp8_640x360_2mbps_30fps.info",
+ "bbb_vp8_640x360_2mbps_30fps_chksm.md5"},
+ {"vp9", "bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_176x144_285kbps_60fps.info", ""},
+ {"vp9", "bbb_vp9_640x360_1600kbps_30fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.info",
+ "bbb_vp9_640x360_1600kbps_30fps_chksm.md5"},
+ {"vp9", "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9",
+ "bbb_vp9_704x480_280kbps_24fps_altref_2.info", ""},
+ {"av01", "bbb_av1_640_360.av1", "bbb_av1_640_360.info", "bbb_av1_640_360_chksum.md5"},
+ {"av01", "bbb_av1_176_144.av1", "bbb_av1_176_144.info", "bbb_av1_176_144_chksm.md5"},
+};
class LinearBuffer : public C2Buffer {
public:
@@ -85,26 +116,11 @@
mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, mBlockPoolId++);
ASSERT_NE(mLinearPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::input::PARAM_TYPE}, C2_DONT_BLOCK, &queried);
+ ASSERT_GT(queried.size(), 0);
- const StringToName kStringToName[] = {
- {"h263", h263}, {"avc", avc}, {"mpeg2", mpeg2}, {"mpeg4", mpeg4},
- {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9}, {"av1", av1},
- };
-
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
-
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::input*)queried[0].get())->m.value;
mEos = false;
mFramesReceived = 0;
mTimestampUs = 0u;
@@ -114,11 +130,11 @@
mMd5Offset = 0;
mMd5Enable = false;
mRefMd5 = nullptr;
- if (mCompName == unknown_comp) mDisableTest = true;
C2SecureModeTuning secureModeTuning{};
mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
- if (secureModeTuning.value == C2Config::SM_READ_PROTECTED) {
+ if (secureModeTuning.value == C2Config::SM_READ_PROTECTED ||
+ secureModeTuning.value == C2Config::SM_READ_PROTECTED_WITH_ENCRYPTED) {
mDisableTest = true;
}
@@ -136,6 +152,9 @@
// Get the test parameters from GetParam call.
virtual void getParams() {}
+ void GetURLChksmForComponent(char* mURL, char* info, char* chksum, size_t streamIndex);
+ void GetURLForComponent(char* mURL, char* info, size_t streamIndex = 0);
+
/* Calculate the CKSUM for the data in inbuf */
void calc_md5_cksum(uint8_t* pu1_inbuf, uint32_t u4_stride, uint32_t u4_width,
uint32_t u4_height, uint8_t* pu1_cksum_p) {
@@ -220,8 +239,7 @@
if (!codecConfig && !work->worklets.front()->output.buffers.empty()) {
if (mReorderDepth < 0) {
C2PortReorderBufferDepthTuning::output reorderBufferDepth;
- mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK,
- nullptr);
+ mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK, nullptr);
mReorderDepth = reorderBufferDepth.value;
if (mReorderDepth > 0) {
// TODO: Add validation for reordered output
@@ -267,18 +285,7 @@
}
}
- enum standardComp {
- h263,
- avc,
- mpeg2,
- mpeg4,
- hevc,
- vp8,
- vp9,
- av1,
- unknown_comp,
- };
-
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
@@ -291,7 +298,6 @@
char* mRefMd5;
std::list<uint64_t> mTimestampUslist;
std::list<uint64_t> mFlushedIndices;
- standardComp mCompName;
int32_t mWorkResult;
int32_t mReorderDepth;
@@ -314,9 +320,8 @@
}
};
-class Codec2VideoDecHidlTest
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoDecHidlTest : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -324,7 +329,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2VideoDecHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -351,83 +356,32 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2VideoDecHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
// number of elementary streams per component
#define STREAM_COUNT 3
// LookUpTable of clips, metadata and chksum for component testing
-void GetURLChksmForComponent(Codec2VideoDecHidlTest::standardComp comp, char* mURL, char* info,
- char* chksum, size_t streamIndex = 1) {
- struct CompToURL {
- Codec2VideoDecHidlTest::standardComp comp;
- const char mURL[STREAM_COUNT][512];
- const char info[STREAM_COUNT][512];
- const char chksum[STREAM_COUNT][512];
- };
- ASSERT_TRUE(streamIndex < STREAM_COUNT);
-
- static const CompToURL kCompToURL[] = {
- {Codec2VideoDecHidlTest::standardComp::avc,
- {"bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_640x360_768kbps_30fps.h264", ""},
- {"bbb_avc_176x144_300kbps_60fps.info", "bbb_avc_640x360_768kbps_30fps.info", ""},
- {"bbb_avc_176x144_300kbps_60fps_chksum.md5",
- "bbb_avc_640x360_768kbps_30fps_chksum.md5", ""}},
- {Codec2VideoDecHidlTest::standardComp::hevc,
- {"bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.hevc", ""},
- {"bbb_hevc_176x144_176kbps_60fps.info", "bbb_hevc_640x360_1600kbps_30fps.info", ""},
- {"bbb_hevc_176x144_176kbps_60fps_chksum.md5",
- "bbb_hevc_640x360_1600kbps_30fps_chksum.md5", ""}},
- {Codec2VideoDecHidlTest::standardComp::mpeg2,
- {"bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_352x288_1mbps_60fps.m2v", ""},
- {"bbb_mpeg2_176x144_105kbps_25fps.info", "bbb_mpeg2_352x288_1mbps_60fps.info", ""},
- {"", "", ""}},
- {Codec2VideoDecHidlTest::standardComp::h263,
- {"", "bbb_h263_352x288_300kbps_12fps.h263", ""},
- {"", "bbb_h263_352x288_300kbps_12fps.info", ""},
- {"", "", ""}},
- {Codec2VideoDecHidlTest::standardComp::mpeg4,
- {"", "bbb_mpeg4_352x288_512kbps_30fps.m4v", ""},
- {"", "bbb_mpeg4_352x288_512kbps_30fps.info", ""},
- {"", "", ""}},
- {Codec2VideoDecHidlTest::standardComp::vp8,
- {"bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_640x360_2mbps_30fps.vp8", ""},
- {"bbb_vp8_176x144_240kbps_60fps.info", "bbb_vp8_640x360_2mbps_30fps.info", ""},
- {"", "bbb_vp8_640x360_2mbps_30fps_chksm.md5", ""}},
- {Codec2VideoDecHidlTest::standardComp::vp9,
- {"bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.vp9",
- "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9"},
- {"bbb_vp9_176x144_285kbps_60fps.info", "bbb_vp9_640x360_1600kbps_30fps.info",
- "bbb_vp9_704x480_280kbps_24fps_altref_2.info"},
- {"", "bbb_vp9_640x360_1600kbps_30fps_chksm.md5", ""}},
- {Codec2VideoDecHidlTest::standardComp::av1,
- {"bbb_av1_640_360.av1", "bbb_av1_176_144.av1", ""},
- {"bbb_av1_640_360.info", "bbb_av1_176_144.info", ""},
- {"bbb_av1_640_360_chksum.md5", "bbb_av1_176_144_chksm.md5", ""}},
- };
-
- for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
- if (kCompToURL[i].comp == comp) {
- strcat(mURL, kCompToURL[i].mURL[streamIndex]);
- strcat(info, kCompToURL[i].info[streamIndex]);
- strcat(chksum, kCompToURL[i].chksum[streamIndex]);
- return;
+void Codec2VideoDecHidlTestBase::GetURLChksmForComponent(char* mURL, char* info, char* chksum,
+ size_t streamIndex) {
+ int streamCount = 0;
+ for (size_t i = 0; i < kCompToURL.size(); ++i) {
+ if (mMime.find(kCompToURL[i].mime) != std::string::npos) {
+ if (streamCount == streamIndex) {
+ strcat(mURL, kCompToURL[i].mURL.c_str());
+ strcat(info, kCompToURL[i].info.c_str());
+ strcat(chksum, kCompToURL[i].chksum.c_str());
+ return;
+ }
+ streamCount++;
}
}
}
-void GetURLForComponent(Codec2VideoDecHidlTest::standardComp comp, char* mURL, char* info,
- size_t streamIndex = 1) {
+void Codec2VideoDecHidlTestBase::GetURLForComponent(char* mURL, char* info, size_t streamIndex) {
char chksum[512];
strcpy(chksum, sResourceDir.c_str());
- GetURLChksmForComponent(comp, mURL, info, chksum, streamIndex);
+ GetURLChksmForComponent(mURL, info, chksum, streamIndex);
}
void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
@@ -517,7 +471,7 @@
TEST_P(Codec2VideoDecHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid video component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
@@ -573,10 +527,8 @@
return false;
}
-class Codec2VideoDecDecodeTest
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2VideoDecDecodeTest : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<DecodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -588,8 +540,8 @@
description("Decodes input file");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
- uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ uint32_t streamIndex = std::get<2>(GetParam());
+ bool signalEOS = std::get<3>(GetParam());
mTimestampDevTest = true;
char mURL[512], info[512], chksum[512];
@@ -599,7 +551,7 @@
strcpy(info, sResourceDir.c_str());
strcpy(chksum, sResourceDir.c_str());
- GetURLChksmForComponent(mCompName, mURL, info, chksum, streamIndex);
+ GetURLChksmForComponent(mURL, info, chksum, streamIndex);
if (!(strcmp(mURL, sResourceDir.c_str())) || !(strcmp(info, sResourceDir.c_str()))) {
ALOGV("Skipping Test, Stream not available");
return;
@@ -688,9 +640,11 @@
TEST_P(Codec2VideoDecHidlTest, AdaptiveDecodeTest) {
description("Adaptive Decode Test");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
- if (!(mCompName == avc || mCompName == hevc || mCompName == vp8 || mCompName == vp9 ||
- mCompName == mpeg2))
+ if (!(strcasestr(mMime.c_str(), "avc") || strcasestr(mMime.c_str(), "hevc") ||
+ strcasestr(mMime.c_str(), "vp8") || strcasestr(mMime.c_str(), "vp9") ||
+ strcasestr(mMime.c_str(), "mpeg2"))) {
return;
+ }
typedef std::unique_lock<std::mutex> ULock;
ASSERT_EQ(mComponent->start(), C2_OK);
@@ -705,7 +659,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info, i % STREAM_COUNT);
+ GetURLForComponent(mURL, info, i % STREAM_COUNT);
if (!(strcmp(mURL, sResourceDir.c_str())) || !(strcmp(info, sResourceDir.c_str()))) {
ALOGV("Stream not available, skipping this index");
continue;
@@ -801,7 +755,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
int32_t numCsds = populateInfoVector(info, &Info, mTimestampDevTest, &mTimestampUslist);
ASSERT_GE(numCsds, 0) << "Error in parsing input info file: " << info;
@@ -888,7 +842,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
mFlushedIndices.clear();
@@ -964,7 +918,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
eleInfo.open(info);
ASSERT_EQ(eleInfo.is_open(), true) << mURL << " - file not found";
@@ -1017,9 +971,8 @@
}
}
-class Codec2VideoDecCsdInputTests
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2VideoDecCsdInputTests : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<CsdFlushTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -1038,7 +991,7 @@
strcpy(mURL, sResourceDir.c_str());
strcpy(info, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL, info);
+ GetURLForComponent(mURL, info);
int32_t numCsds = populateInfoVector(info, &Info, mTimestampDevTest, &mTimestampUslist);
ASSERT_GE(numCsds, 0) << "Error in parsing input info file";
@@ -1052,7 +1005,7 @@
bool flushedDecoder = false;
bool signalEOS = false;
bool keyFrame = false;
- bool flushCsd = !std::get<2>(GetParam()).compare("true");
+ bool flushCsd = std::get<2>(GetParam());
ALOGV("sending %d csd data ", numCsds);
int framesToDecode = numCsds;
@@ -1122,49 +1075,41 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoDecHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// DecodeTest with StreamIndex and EOS / No EOS
INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2VideoDecDecodeTest,
testing::ValuesIn(kDecodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2VideoDecCsdInputTests,
testing::ValuesIn(kCsdFlushTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
// TODO : Video specific configuration Test
int main(int argc, char** argv) {
+ parseArgs(argc, argv);
kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_DECODER);
for (auto params : kTestParameters) {
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 2, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 2, true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
- }
-
- // Set the resource directory based on command line args.
- // Test will fail to set up if the argument is not set.
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-P") == 0 && i < argc - 1) {
- sResourceDir = argv[i + 1];
- break;
- }
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 5bcea5b..23ceff4 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -41,13 +41,11 @@
: C2Buffer({block->share(C2Rect(block->width(), block->height()), ::C2Fence())}) {}
};
-static std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>>
- kEncodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kEncodeResolutionTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, bool, bool>;
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
-// Resource directory
-static std::string sResourceDir = "";
+using EncodeResolutionTestParameters = std::tuple<std::string, std::string, int32_t, int32_t>;
+static std::vector<EncodeResolutionTestParameters> kEncodeResolutionTestParameters;
namespace {
@@ -78,26 +76,13 @@
mGraphicPool = std::make_shared<C2PooledBlockPool>(mGraphicAllocator, mBlockPoolId++);
ASSERT_NE(mGraphicPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+ &queried);
+ ASSERT_GT(queried.size(), 0);
- const StringToName kStringToName[] = {
- {"h263", h263}, {"avc", avc}, {"mpeg4", mpeg4},
- {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9},
- };
-
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
-
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
+ std::cout << "mime : " << mMime << "\n";
mEos = false;
mCsd = false;
mConfigBPictures = false;
@@ -106,11 +91,11 @@
mTimestampUs = 0u;
mOutputSize = 0u;
mTimestampDevTest = false;
- if (mCompName == unknown_comp) mDisableTest = true;
C2SecureModeTuning secureModeTuning{};
mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
- if (secureModeTuning.value == C2Config::SM_READ_PROTECTED) {
+ if (secureModeTuning.value == C2Config::SM_READ_PROTECTED ||
+ secureModeTuning.value == C2Config::SM_READ_PROTECTED_WITH_ENCRYPTED) {
mDisableTest = true;
}
@@ -187,16 +172,7 @@
}
}
- enum standardComp {
- h263,
- avc,
- mpeg4,
- hevc,
- vp8,
- vp9,
- unknown_comp,
- };
-
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
bool mEos;
@@ -204,7 +180,6 @@
bool mDisableTest;
bool mConfigBPictures;
bool mTimestampDevTest;
- standardComp mCompName;
uint32_t mFramesReceived;
uint32_t mFailedWorkReceived;
uint64_t mTimestampUs;
@@ -231,9 +206,8 @@
}
};
-class Codec2VideoEncHidlTest
- : public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoEncHidlTest : public Codec2VideoEncHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -241,7 +215,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2VideoEncHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -268,13 +242,6 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2VideoEncHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
@@ -405,14 +372,12 @@
TEST_P(Codec2VideoEncHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid video component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
-class Codec2VideoEncEncodeTest
- : public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string, std::string>> {
+class Codec2VideoEncEncodeTest : public Codec2VideoEncHidlTestBase,
+ public ::testing::WithParamInterface<EncodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -426,10 +391,10 @@
char mURL[512];
int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ bool signalEOS = std::get<3>(GetParam());
// Send an empty frame to receive CSD data from encoder.
- bool sendEmptyFirstFrame = !std::get<3>(GetParam()).compare("true");
- mConfigBPictures = !std::get<4>(GetParam()).compare("true");
+ bool sendEmptyFirstFrame = std::get<3>(GetParam());
+ mConfigBPictures = std::get<4>(GetParam());
strcpy(mURL, sResourceDir.c_str());
GetURLForComponent(mURL);
@@ -517,9 +482,9 @@
ASSERT_TRUE(false);
}
- if (mCompName == vp8 || mCompName == h263) {
+ if ((mMime.find("vp8") != std::string::npos) || (mMime.find("3gpp") != std::string::npos)) {
ASSERT_FALSE(mCsd) << "CSD Buffer not expected";
- } else if (mCompName != vp9) {
+ } else if (mMime.find("vp9") == std::string::npos) {
ASSERT_TRUE(mCsd) << "CSD Buffer not received";
}
@@ -697,8 +662,7 @@
class Codec2VideoEncResolutionTest
: public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+ public ::testing::WithParamInterface<EncodeResolutionTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -710,8 +674,8 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
std::ifstream eleStream;
- int32_t nWidth = std::stoi(std::get<2>(GetParam()));
- int32_t nHeight = std::stoi(std::get<3>(GetParam()));
+ int32_t nWidth = std::get<2>(GetParam());
+ int32_t nHeight = std::get<3>(GetParam());
ALOGD("Trying encode for width %d height %d", nWidth, nHeight);
mEos = false;
@@ -743,14 +707,16 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoEncHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(NonStdSizes, Codec2VideoEncResolutionTest,
- ::testing::ValuesIn(kEncodeResolutionTestParameters));
+ ::testing::ValuesIn(kEncodeResolutionTestParameters),
+ PrintInstanceTupleNameToString<>);
// EncodeTest with EOS / No EOS
INSTANTIATE_TEST_SUITE_P(EncodeTestwithEOS, Codec2VideoEncEncodeTest,
- ::testing::ValuesIn(kEncodeTestParameters));
+ ::testing::ValuesIn(kEncodeTestParameters),
+ PrintInstanceTupleNameToString<>);
TEST_P(Codec2VideoEncHidlTest, AdaptiveBitrateTest) {
description("Encodes input file for different bitrates");
@@ -841,38 +807,26 @@
} // anonymous namespace
int main(int argc, char** argv) {
+ parseArgs(argc, argv);
kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER);
for (auto params : kTestParameters) {
- constexpr char const* kBoolString[] = { "false", "true" };
for (size_t i = 0; i < 1 << 3; ++i) {
kEncodeTestParameters.push_back(std::make_tuple(
- std::get<0>(params), std::get<1>(params),
- kBoolString[i & 1],
- kBoolString[(i >> 1) & 1],
- kBoolString[(i >> 2) & 1]));
+ std::get<0>(params), std::get<1>(params), i & 1, (i >> 1) & 1, (i >> 2) & 1));
}
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "52", "18"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 52, 18));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "365", "365"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 365, 365));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "484", "362"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 484, 362));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "244", "488"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 244, 488));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "852", "608"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 852, 608));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1400", "442"));
- }
-
- // Set the resource directory based on command line args.
- // Test will fail to set up if the argument is not set.
- for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-P") == 0 && i < argc - 1) {
- sResourceDir = argv[i + 1];
- break;
- }
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1400, 442));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 0296004..71857e0 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -1482,7 +1482,8 @@
c2_status_t Codec2Client::Component::setOutputSurface(
C2BlockPool::local_id_t blockPoolId,
const sp<IGraphicBufferProducer>& surface,
- uint32_t generation) {
+ uint32_t generation,
+ int maxDequeueCount) {
uint64_t bqId = 0;
sp<IGraphicBufferProducer> nullIgbp;
sp<HGraphicBufferProducer2> nullHgbp;
@@ -1496,15 +1497,15 @@
std::shared_ptr<SurfaceSyncObj> syncObj;
if (!surface) {
- mOutputBufferQueue->configure(nullIgbp, generation, 0, nullptr);
+ mOutputBufferQueue->configure(nullIgbp, generation, 0, maxDequeueCount, nullptr);
} else if (surface->getUniqueId(&bqId) != OK) {
LOG(ERROR) << "setOutputSurface -- "
"cannot obtain bufferqueue id.";
bqId = 0;
- mOutputBufferQueue->configure(nullIgbp, generation, 0, nullptr);
+ mOutputBufferQueue->configure(nullIgbp, generation, 0, maxDequeueCount, nullptr);
} else {
- mOutputBufferQueue->configure(surface, generation, bqId,
- mBase1_2 ? &syncObj : nullptr);
+ mOutputBufferQueue->configure(surface, generation, bqId, maxDequeueCount, mBase1_2 ?
+ &syncObj : nullptr);
}
ALOGD("surface generation remote change %u HAL ver: %s",
generation, syncObj ? "1.2" : "1.0");
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index eca268e..347e58a 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -384,7 +384,8 @@
c2_status_t setOutputSurface(
C2BlockPool::local_id_t blockPoolId,
const sp<IGraphicBufferProducer>& surface,
- uint32_t generation);
+ uint32_t generation,
+ int maxDequeueBufferCount);
// Extract a slot number from of the block, then call
// IGraphicBufferProducer::queueBuffer().
diff --git a/media/codec2/hidl/client/include/codec2/hidl/output.h b/media/codec2/hidl/client/include/codec2/hidl/output.h
index 0f03b36..877148a 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/output.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/output.h
@@ -47,6 +47,7 @@
bool configure(const sp<IGraphicBufferProducer>& igbp,
uint32_t generation,
uint64_t bqId,
+ int maxDequeueBufferCount,
std::shared_ptr<V1_2::SurfaceSyncObj> *syncObj);
// Render a graphic block to current surface.
diff --git a/media/codec2/hidl/client/output.cpp b/media/codec2/hidl/client/output.cpp
index 7df0da2..283ed8d 100644
--- a/media/codec2/hidl/client/output.cpp
+++ b/media/codec2/hidl/client/output.cpp
@@ -178,6 +178,7 @@
bool OutputBufferQueue::configure(const sp<IGraphicBufferProducer>& igbp,
uint32_t generation,
uint64_t bqId,
+ int maxDequeueBufferCount,
std::shared_ptr<V1_2::SurfaceSyncObj> *syncObj) {
uint64_t consumerUsage = 0;
if (igbp->getConsumerUsage(&consumerUsage) != OK) {
@@ -219,6 +220,20 @@
{
std::scoped_lock<std::mutex> l(mMutex);
if (generation == mGeneration) {
+ // case of old BlockPool destruction
+ C2SyncVariables *var = mSyncMem ? mSyncMem->mem() : nullptr;
+ if (var) {
+ *syncObj = std::make_shared<V1_2::SurfaceSyncObj>();
+ (*syncObj)->bqId = bqId;
+ (*syncObj)->syncMemory = mSyncMem->handle();
+ (*syncObj)->generationId = generation;
+ (*syncObj)->consumerUsage = consumerUsage;
+ mMaxDequeueBufferCount = maxDequeueBufferCount;
+ var->lock();
+ var->setSyncStatusLocked(C2SyncVariables::STATUS_INIT);
+ var->setInitialDequeueCountLocked(mMaxDequeueBufferCount, 0);
+ var->unlock();
+ }
return false;
}
std::shared_ptr<C2SurfaceSyncMemory> oldMem = mSyncMem;
@@ -238,6 +253,7 @@
mGeneration = generation;
mBqId = bqId;
mOwner = std::make_shared<int>(0);
+ mMaxDequeueBufferCount = maxDequeueBufferCount;
for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
if (mBqId == 0 || !mBuffers[i]) {
continue;
@@ -288,7 +304,9 @@
mPoolDatas[i] = poolDatas[i];
}
if (newSync) {
- newSync->setInitialDequeueCount(mMaxDequeueBufferCount, success);
+ newSync->lock();
+ newSync->setInitialDequeueCountLocked(mMaxDequeueBufferCount, success);
+ newSync->unlock();
}
}
ALOGD("remote graphic buffer migration %zu/%zu",
@@ -452,6 +470,7 @@
syncVar->unlock();
}
mMutex.unlock();
+ ALOGD("set max dequeue count %d from update", maxDequeueBufferCount);
}
} // namespace c2
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 0b38bc1..bed8aeb 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -19,7 +19,6 @@
#include <android-base/logging.h>
#include <set>
-#include <sstream>
#include <dlfcn.h>
@@ -383,6 +382,9 @@
// Configure the next interface with the params.
std::vector<C2Param *> configParams;
for (size_t i = 0; i < heapParams.size(); ++i) {
+ if (!heapParams[i]) {
+ continue;
+ }
if (heapParams[i]->forStream()) {
heapParams[i] = C2Param::CopyAsStream(
*heapParams[i], false /* output */, heapParams[i]->stream());
@@ -782,10 +784,7 @@
if (C2_OK != mStore->createComponent(filter.traits.name, &comp)) {
return {};
}
- if (C2_OK != mStore->createInterface(filter.traits.name, &intf)) {
- return {};
- }
- filters.push_back({comp, intf, filter.traits, filter.desc});
+ filters.push_back({comp, comp->intf(), filter.traits, filter.desc});
}
return filters;
}
@@ -869,7 +868,7 @@
}
std::vector<Component> filters = createFilters();
std::shared_ptr wrapped = std::make_shared<WrappedDecoder>(
- comp, std::move(filters), weak_from_this());
+ comp, std::vector(filters), weak_from_this());
{
std::unique_lock lock(mWrappedComponentsMutex);
std::vector<std::weak_ptr<const C2Component>> &components =
diff --git a/media/codec2/hidl/plugin/FilterWrapperStub.cpp b/media/codec2/hidl/plugin/FilterWrapperStub.cpp
index 1b94a1a..01ca596 100644
--- a/media/codec2/hidl/plugin/FilterWrapperStub.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapperStub.cpp
@@ -42,10 +42,10 @@
}
c2_status_t FilterWrapper::createBlockPool(
- C2PlatformAllocatorStore::id_t,
- std::shared_ptr<const C2Component>,
- std::shared_ptr<C2BlockPool> *) {
- return C2_OMITTED;
+ C2PlatformAllocatorStore::id_t allocatorId,
+ std::shared_ptr<const C2Component> component,
+ std::shared_ptr<C2BlockPool> *pool) {
+ return CreateCodec2BlockPool(allocatorId, component, pool);
}
} // namespace android
diff --git a/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
index f701987..5d0284f 100644
--- a/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
+++ b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
@@ -35,7 +35,7 @@
# on ARM is statically loaded at 0xffff 0000. See
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
# for more details.
-mremap: arg3 == 3
+mremap: arg3 == 3 || arg3 == MREMAP_MAYMOVE
munmap: 1
prctl: 1
writev: 1
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index f66dc11..c049187 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -90,6 +90,10 @@
}
}
+ void setPriority(int priority) {
+ androidSetThreadPriority(getTid(), priority);
+ }
+
protected:
bool threadLoop() override {
constexpr nsecs_t kIntervalNs = nsecs_t(10) * 1000 * 1000; // 10ms
@@ -529,4 +533,8 @@
return *mDataspace.lock();
}
+void C2OMXNode::setPriority(int priority) {
+ mQueueThread->setPriority(priority);
+}
+
} // namespace android
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
index 9c04969..6669318 100644
--- a/media/codec2/sfplugin/C2OMXNode.h
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -98,6 +98,11 @@
*/
android_dataspace getDataspace();
+ /**
+ * Sets priority of the queue thread.
+ */
+ void setPriority(int priority);
+
private:
std::weak_ptr<Codec2Client::Component> mComp;
sp<IOMXBufferSource> mBufferSource;
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 02f7cb8..0a895b0 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -397,6 +397,14 @@
// consumer usage is queried earlier.
+ // priority
+ if (mConfig.mPriority != config.mPriority) {
+ if (config.mPriority != INT_MAX) {
+ mNode->setPriority(config.mPriority);
+ }
+ mConfig.mPriority = config.mPriority;
+ }
+
if (status.str().empty()) {
ALOGD("ISConfig not changed");
} else {
@@ -944,6 +952,7 @@
}
}
config->mISConfig->mUsage = 0;
+ config->mISConfig->mPriority = INT_MAX;
}
/*
@@ -997,7 +1006,15 @@
// needed for decoders.
if (!(config->mDomain & Config::IS_ENCODER)) {
if (surface == nullptr) {
- format = flexPixelFormat.value_or(COLOR_FormatYUV420Flexible);
+ const char *prefix = "";
+ if (flexSemiPlanarPixelFormat) {
+ format = COLOR_FormatYUV420SemiPlanar;
+ prefix = "semi-";
+ } else {
+ format = COLOR_FormatYUV420Planar;
+ }
+ ALOGD("Client requested ByteBuffer mode decoder w/o color format set: "
+ "using default %splanar color format", prefix);
} else {
format = COLOR_FormatSurface;
}
@@ -1127,6 +1144,16 @@
configUpdate.push_back(std::move(qp));
}
+ int32_t background = 0;
+ if ((config->mDomain & Config::IS_VIDEO)
+ && msg->findInt32("android._background-mode", &background)
+ && background) {
+ androidSetThreadPriority(gettid(), ANDROID_PRIORITY_BACKGROUND);
+ if (config->mISConfig) {
+ config->mISConfig->mPriority = ANDROID_PRIORITY_BACKGROUND;
+ }
+ }
+
err = config->setParameters(comp, configUpdate, C2_DONT_BLOCK);
if (err != OK) {
ALOGW("failed to configure c2 params");
@@ -1790,17 +1817,19 @@
}
status_t CCodec::setSurface(const sp<Surface> &surface) {
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mTunneled && config->mSidebandHandle != nullptr) {
- sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
- status_t err = native_window_set_sideband_stream(
- nativeWindow.get(),
- const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
- if (err != OK) {
- ALOGE("NativeWindow(%p) native_window_set_sideband_stream(%p) failed! (err %d).",
- nativeWindow.get(), config->mSidebandHandle->handle(), err);
- return err;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ if (config->mTunneled && config->mSidebandHandle != nullptr) {
+ sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
+ status_t err = native_window_set_sideband_stream(
+ nativeWindow.get(),
+ const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
+ if (err != OK) {
+ ALOGE("NativeWindow(%p) native_window_set_sideband_stream(%p) failed! (err %d).",
+ nativeWindow.get(), config->mSidebandHandle->handle(), err);
+ return err;
+ }
}
}
return mChannel->setSurface(surface);
@@ -1935,6 +1964,12 @@
params->removeEntryAt(params->findEntryByName(KEY_BIT_RATE));
}
+ int32_t syncId = 0;
+ if (params->findInt32("audio-hw-sync", &syncId)
+ || params->findInt32("hw-av-sync-id", &syncId)) {
+ configureTunneledVideoPlayback(comp, nullptr, params);
+ }
+
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
@@ -2135,80 +2170,92 @@
}
// handle configuration changes in work done
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- Config::Watcher<C2StreamInitDataInfo::output> initData =
- config->watch<C2StreamInitDataInfo::output>();
- if (!work->worklets.empty()
- && (work->worklets.front()->output.flags
- & C2FrameData::FLAG_DISCARD_FRAME) == 0) {
+ std::unique_ptr<C2Param> initData;
+ sp<AMessage> outputFormat = nullptr;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ Config::Watcher<C2StreamInitDataInfo::output> initDataWatcher =
+ config->watch<C2StreamInitDataInfo::output>();
+ if (!work->worklets.empty()
+ && (work->worklets.front()->output.flags
+ & C2FrameData::FLAG_DISCARD_FRAME) == 0) {
- // copy buffer info to config
- std::vector<std::unique_ptr<C2Param>> updates;
- for (const std::unique_ptr<C2Param> ¶m
- : work->worklets.front()->output.configUpdate) {
- updates.push_back(C2Param::Copy(*param));
- }
- unsigned stream = 0;
- for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
- for (const std::shared_ptr<const C2Info> &info : buf->info()) {
- // move all info into output-stream #0 domain
- updates.emplace_back(C2Param::CopyAsStream(*info, true /* output */, stream));
+ // copy buffer info to config
+ std::vector<std::unique_ptr<C2Param>> updates;
+ for (const std::unique_ptr<C2Param> ¶m
+ : work->worklets.front()->output.configUpdate) {
+ updates.push_back(C2Param::Copy(*param));
+ }
+ unsigned stream = 0;
+ std::vector<std::shared_ptr<C2Buffer>> &outputBuffers =
+ work->worklets.front()->output.buffers;
+ for (const std::shared_ptr<C2Buffer> &buf : outputBuffers) {
+ for (const std::shared_ptr<const C2Info> &info : buf->info()) {
+ // move all info into output-stream #0 domain
+ updates.emplace_back(
+ C2Param::CopyAsStream(*info, true /* output */, stream));
+ }
+
+ const std::vector<C2ConstGraphicBlock> blocks = buf->data().graphicBlocks();
+ // for now only do the first block
+ if (!blocks.empty()) {
+ // ALOGV("got output buffer with crop %u,%u+%u,%u and size %u,%u",
+ // block.crop().left, block.crop().top,
+ // block.crop().width, block.crop().height,
+ // block.width(), block.height());
+ const C2ConstGraphicBlock &block = blocks[0];
+ updates.emplace_back(new C2StreamCropRectInfo::output(
+ stream, block.crop()));
+ updates.emplace_back(new C2StreamPictureSizeInfo::output(
+ stream, block.crop().width, block.crop().height));
+ }
+ ++stream;
}
- const std::vector<C2ConstGraphicBlock> blocks = buf->data().graphicBlocks();
- // for now only do the first block
- if (!blocks.empty()) {
- // ALOGV("got output buffer with crop %u,%u+%u,%u and size %u,%u",
- // block.crop().left, block.crop().top,
- // block.crop().width, block.crop().height,
- // block.width(), block.height());
- const C2ConstGraphicBlock &block = blocks[0];
- updates.emplace_back(new C2StreamCropRectInfo::output(stream, block.crop()));
- updates.emplace_back(new C2StreamPictureSizeInfo::output(
- stream, block.crop().width, block.crop().height));
- }
- ++stream;
- }
+ sp<AMessage> oldFormat = config->mOutputFormat;
+ config->updateConfiguration(updates, config->mOutputDomain);
+ RevertOutputFormatIfNeeded(oldFormat, config->mOutputFormat);
- sp<AMessage> outputFormat = config->mOutputFormat;
- config->updateConfiguration(updates, config->mOutputDomain);
- RevertOutputFormatIfNeeded(outputFormat, config->mOutputFormat);
-
- // copy standard infos to graphic buffers if not already present (otherwise, we
- // may overwrite the actual intermediate value with a final value)
- stream = 0;
- const static C2Param::Index stdGfxInfos[] = {
- C2StreamRotationInfo::output::PARAM_TYPE,
- C2StreamColorAspectsInfo::output::PARAM_TYPE,
- C2StreamDataSpaceInfo::output::PARAM_TYPE,
- C2StreamHdrStaticInfo::output::PARAM_TYPE,
- C2StreamHdr10PlusInfo::output::PARAM_TYPE,
- C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
- C2StreamSurfaceScalingInfo::output::PARAM_TYPE
- };
- for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
- if (buf->data().graphicBlocks().size()) {
- for (C2Param::Index ix : stdGfxInfos) {
- if (!buf->hasInfo(ix)) {
- const C2Param *param =
- config->getConfigParameterValue(ix.withStream(stream));
- if (param) {
- std::shared_ptr<C2Param> info(C2Param::Copy(*param));
- buf->setInfo(std::static_pointer_cast<C2Info>(info));
+ // copy standard infos to graphic buffers if not already present (otherwise, we
+ // may overwrite the actual intermediate value with a final value)
+ stream = 0;
+ const static C2Param::Index stdGfxInfos[] = {
+ C2StreamRotationInfo::output::PARAM_TYPE,
+ C2StreamColorAspectsInfo::output::PARAM_TYPE,
+ C2StreamDataSpaceInfo::output::PARAM_TYPE,
+ C2StreamHdrStaticInfo::output::PARAM_TYPE,
+ C2StreamHdr10PlusInfo::output::PARAM_TYPE,
+ C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
+ C2StreamSurfaceScalingInfo::output::PARAM_TYPE
+ };
+ for (const std::shared_ptr<C2Buffer> &buf : outputBuffers) {
+ if (buf->data().graphicBlocks().size()) {
+ for (C2Param::Index ix : stdGfxInfos) {
+ if (!buf->hasInfo(ix)) {
+ const C2Param *param =
+ config->getConfigParameterValue(ix.withStream(stream));
+ if (param) {
+ std::shared_ptr<C2Param> info(C2Param::Copy(*param));
+ buf->setInfo(std::static_pointer_cast<C2Info>(info));
+ }
}
}
}
+ ++stream;
}
- ++stream;
}
- }
- if (config->mInputSurface) {
- config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ if (config->mInputSurface) {
+ config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ }
+ if (initDataWatcher.hasChanged()) {
+ initData = C2Param::Copy(*initDataWatcher.update().get());
+ }
+ outputFormat = config->mOutputFormat;
}
mChannel->onWorkDone(
- std::move(work), config->mOutputFormat,
- initData.hasChanged() ? initData.update().get() : nullptr);
+ std::move(work), outputFormat,
+ initData ? (C2StreamInitDataInfo::output *)initData.get() : nullptr);
break;
}
case kWhatWatch: {
@@ -2258,6 +2305,10 @@
return UNKNOWN_ERROR;
}
+ if (sidebandHandle == nullptr) {
+ return OK;
+ }
+
std::vector<std::unique_ptr<C2Param>> params;
c2err = comp->query({}, {C2PortTunnelHandleTuning::output::PARAM_TYPE}, C2_DONT_BLOCK, ¶ms);
if (c2err == C2_OK && params.size() == 1u) {
@@ -2289,9 +2340,13 @@
pendingDeadline = true;
}
}
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mTunneled == false && name.empty()) {
+ bool tunneled = false;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ tunneled = config->mTunneled;
+ }
+ if (!tunneled && name.empty()) {
constexpr std::chrono::steady_clock::duration kWorkDurationThreshold = 3s;
std::chrono::steady_clock::duration elapsed = mChannel->elapsed();
if (elapsed >= kWorkDurationThreshold) {
@@ -2311,7 +2366,13 @@
return;
}
- ALOGW("previous call to %s exceeded timeout", name.c_str());
+ C2String compName;
+ {
+ Mutexed<State>::Locked state(mState);
+ compName = state->comp->getName();
+ }
+ ALOGW("[%s] previous call to %s exceeded timeout", compName.c_str(), name.c_str());
+
initiateRelease(false);
mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index c4f9d84..e33a5ba 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1189,9 +1189,6 @@
}
outputGeneration = output->generation;
}
- if (maxDequeueCount > 0) {
- mComponent->setOutputSurfaceMaxDequeueCount(maxDequeueCount);
- }
bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
C2BlockPool::local_id_t outputPoolId_;
@@ -1331,7 +1328,8 @@
mComponent->setOutputSurface(
outputPoolId_,
outputSurface,
- outputGeneration);
+ outputGeneration,
+ maxDequeueCount);
}
if (oStreamFormat.value == C2BufferData::LINEAR) {
@@ -1368,7 +1366,7 @@
// about buffers from the previous generation do not interfere with the
// newly initialized pipeline capacity.
- {
+ if (inputFormat || outputFormat) {
Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
watcher->inputDelay(inputDelayValue)
.pipelineDelay(pipelineDelayValue)
@@ -1468,14 +1466,14 @@
void CCodecBufferChannel::stop() {
mSync.stop();
mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
- if (mInputSurface != nullptr) {
- mInputSurface.reset();
- }
- mPipelineWatcher.lock()->flush();
}
void CCodecBufferChannel::reset() {
stop();
+ if (mInputSurface != nullptr) {
+ mInputSurface.reset();
+ }
+ mPipelineWatcher.lock()->flush();
{
Mutexed<Input>::Locked input(mInput);
input->buffers.reset(new DummyInputBuffers(""));
@@ -1503,8 +1501,10 @@
void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
ALOGV("[%s] flush", mName);
+ std::vector<uint64_t> indices;
std::list<std::unique_ptr<C2Work>> configs;
for (const std::unique_ptr<C2Work> &work : flushedWork) {
+ indices.push_back(work->input.ordinal.frameIndex.peeku());
if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
continue;
}
@@ -1517,6 +1517,7 @@
std::unique_ptr<C2Work> copy(new C2Work);
copy->input.flags = C2FrameData::flags_t(work->input.flags | C2FrameData::FLAG_DROP_FRAME);
copy->input.ordinal = work->input.ordinal;
+ copy->input.ordinal.frameIndex = mFrameIndex++;
copy->input.buffers.insert(
copy->input.buffers.begin(),
work->input.buffers.begin(),
@@ -1545,7 +1546,12 @@
output->buffers->flushStash();
}
}
- mPipelineWatcher.lock()->flush();
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ for (uint64_t index : indices) {
+ watcher->onWorkDone(index);
+ }
+ }
}
void CCodecBufferChannel::onWorkDone(
@@ -1939,10 +1945,11 @@
& ((1 << 10) - 1));
sp<IGraphicBufferProducer> producer;
+ int maxDequeueCount = mOutputSurface.lock()->maxDequeueBuffers;
if (newSurface) {
newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
newSurface->setDequeueTimeout(kDequeueTimeoutNs);
- newSurface->setMaxDequeuedBufferCount(mOutputSurface.lock()->maxDequeueBuffers);
+ newSurface->setMaxDequeuedBufferCount(maxDequeueCount);
producer = newSurface->getIGraphicBufferProducer();
producer->setGenerationNumber(generation);
} else {
@@ -1962,7 +1969,8 @@
if (mComponent->setOutputSurface(
outputPoolId,
producer,
- generation) != C2_OK) {
+ generation,
+ maxDequeueCount) != C2_OK) {
ALOGI("[%s] setSurface: component setOutputSurface failed", mName);
return INVALID_OPERATION;
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index ad28545..27e87e6 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -362,7 +362,10 @@
.limitTo(D::OUTPUT & D::READ));
add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
- .limitTo(D::ENCODER & D::OUTPUT));
+ .limitTo(D::ENCODER & D::CODED));
+ // Some audio decoders require bitrate information to be set
+ add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
+ .limitTo(D::AUDIO & D::DECODER & D::CODED));
// we also need to put the bitrate in the max bitrate field
add(ConfigMapper(KEY_MAX_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
.limitTo(D::ENCODER & D::READ & D::OUTPUT));
@@ -730,6 +733,17 @@
return C2Value();
}));
+ add(ConfigMapper(KEY_AAC_PROFILE, C2_PARAMKEY_PROFILE_LEVEL, "profile")
+ .limitTo(D::AUDIO & D::ENCODER & (D::CONFIG | D::PARAM))
+ .withMapper([mapper](C2Value v) -> C2Value {
+ C2Config::profile_t c2 = PROFILE_UNUSED;
+ int32_t sdk;
+ if (mapper && v.get(&sdk) && mapper->mapProfile(sdk, &c2)) {
+ return c2;
+ }
+ return PROFILE_UNUSED;
+ }));
+
// convert to dBFS and add default
add(ConfigMapper(KEY_AAC_DRC_TARGET_REFERENCE_LEVEL, C2_PARAMKEY_DRC_TARGET_REFERENCE_LEVEL, "value")
.limitTo(D::AUDIO & D::DECODER & (D::CONFIG | D::PARAM | D::READ))
@@ -1174,11 +1188,14 @@
bool changed = false;
if (domain & mInputDomain) {
- sp<AMessage> oldFormat = mInputFormat->dup();
+ sp<AMessage> oldFormat = mInputFormat;
+ mInputFormat = mInputFormat->dup(); // trigger format changed
mInputFormat->extend(getFormatForDomain(reflected, mInputDomain));
if (mInputFormat->countEntries() != oldFormat->countEntries()
|| mInputFormat->changesFrom(oldFormat)->countEntries() > 0) {
changed = true;
+ } else {
+ mInputFormat = oldFormat; // no change
}
}
if (domain & mOutputDomain) {
@@ -1319,6 +1336,14 @@
}
}
+ // Remove KEY_AAC_SBR_MODE from SDK message if it is outside supported range
+ // as SDK doesn't have a way to signal default sbr mode based on profile and
+ // requires that the key isn't present in format to signal that
+ int sbrMode;
+ if (msg->findInt32(KEY_AAC_SBR_MODE, &sbrMode) && (sbrMode < 0 || sbrMode > 2)) {
+ msg->removeEntryAt(msg->findEntryByName(KEY_AAC_SBR_MODE));
+ }
+
{ // convert color info
// move default color to color aspect if not read from the component
int32_t tmp;
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
index 9cec23f..af054c7 100644
--- a/media/codec2/sfplugin/FrameReassembler.cpp
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -143,6 +143,7 @@
if (buffer->size() > 0) {
mCurrentOrdinal.timestamp = timeUs;
+ mCurrentOrdinal.customOrdinal = timeUs;
}
size_t frameSizeBytes = mFrameSize.value() * mChannelCount * bytesPerSample();
@@ -219,6 +220,7 @@
++mCurrentOrdinal.frameIndex;
mCurrentOrdinal.timestamp += mFrameSize.value() * 1000000 / mSampleRate;
+ mCurrentOrdinal.customOrdinal = mCurrentOrdinal.timestamp;
mCurrentBlock.reset();
mWriteView.reset();
}
diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h
index 50d600c..44ba78a 100644
--- a/media/codec2/sfplugin/InputSurfaceWrapper.h
+++ b/media/codec2/sfplugin/InputSurfaceWrapper.h
@@ -79,6 +79,7 @@
float mFixedAdjustedFps = 0.0; // fixed fps via PTS manipulation
float mMinAdjustedFps = 0.0; // minimum fps via PTS manipulation
uint64_t mUsage = 0; // consumer usage
+ int mPriority = INT_MAX; // priority of queue thread (if any); INT_MAX for no-op
};
/**
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
index 0ee9056..bc9197c 100644
--- a/media/codec2/sfplugin/PipelineWatcher.cpp
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -95,6 +95,7 @@
}
void PipelineWatcher::flush() {
+ ALOGV("flush");
mFramesInPipeline.clear();
}
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 74e7ef1..2f4d6b1 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -33,11 +33,13 @@
"libcodec2_vndk",
"libcutils",
"liblog",
+ "libnativewindow",
"libstagefright_foundation",
"libutils",
],
static_libs: [
+ "libarect",
"libyuv_static",
],
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index a54af83..0966988 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -23,6 +23,7 @@
#include <list>
#include <mutex>
+#include <android/hardware_buffer.h>
#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/AUtils.h>
@@ -136,31 +137,56 @@
int width = view.crop().width;
int height = view.crop().height;
- if ((IsNV12(view) && IsI420(img)) || (IsI420(view) && IsNV12(img))) {
- // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
- if (IsNV12(view) && IsI420(img)) {
+ if (IsNV12(view)) {
+ if (IsNV12(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+ return OK;
+ } else if (IsNV21(img)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(img)) {
if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
return OK;
}
- } else {
+ }
+ } else if (IsNV21(view)) {
+ if (IsNV12(img)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+ return OK;
+ }
+ } else if (IsNV21(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+ return OK;
+ } else if (IsI420(img)) {
+ if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ }
+ } else if (IsI420(view)) {
+ if (IsNV12(img)) {
if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
return OK;
}
+ } else if (IsNV21(img)) {
+ if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+ return OK;
}
}
- if (IsNV12(view) && IsNV12(img)) {
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
- return OK;
- }
- if (IsI420(view) && IsI420(img)) {
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
- libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
- return OK;
- }
return _ImageCopy<true>(view, img, imgBase);
}
@@ -182,33 +208,56 @@
int32_t dst_stride_v = view.layout().planes[2].rowInc;
int width = view.crop().width;
int height = view.crop().height;
- if ((IsNV12(img) && IsI420(view)) || (IsI420(img) && IsNV12(view))) {
- // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
- if (IsNV12(img) && IsI420(view)) {
+ if (IsNV12(img)) {
+ if (IsNV12(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+ return OK;
+ } else if (IsNV21(view)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(view)) {
if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
return OK;
}
- } else {
+ }
+ } else if (IsNV21(img)) {
+ if (IsNV12(view)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+ return OK;
+ }
+ } else if (IsNV21(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+ return OK;
+ } else if (IsI420(view)) {
+ if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ }
+ } else if (IsI420(img)) {
+ if (IsNV12(view)) {
if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
return OK;
}
+ } else if (IsNV21(view)) {
+ if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+ return OK;
}
}
- if (IsNV12(img) && IsNV12(view)) {
- // For NV12, copy Y and UV plane
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
- return OK;
- }
- if (IsI420(img) && IsI420(view)) {
- // For I420, copy Y, U and V plane.
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
- libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
- return OK;
- }
return _ImageCopy<false>(view, img, imgBase);
}
@@ -250,6 +299,20 @@
&& layout.planes[layout.PLANE_V].offset == 1);
}
+bool IsNV21(const C2GraphicView &view) {
+ if (!IsYUV420(view)) {
+ return false;
+ }
+ const C2PlanarLayout &layout = view.layout();
+ return (layout.rootPlanes == 2
+ && layout.planes[layout.PLANE_U].colInc == 2
+ && layout.planes[layout.PLANE_U].rootIx == layout.PLANE_V
+ && layout.planes[layout.PLANE_U].offset == 1
+ && layout.planes[layout.PLANE_V].colInc == 2
+ && layout.planes[layout.PLANE_V].rootIx == layout.PLANE_V
+ && layout.planes[layout.PLANE_V].offset == 0);
+}
+
bool IsI420(const C2GraphicView &view) {
if (!IsYUV420(view)) {
return false;
@@ -283,7 +346,16 @@
}
return (img->mPlane[1].mColInc == 2
&& img->mPlane[2].mColInc == 2
- && (img->mPlane[2].mOffset - img->mPlane[1].mOffset == 1));
+ && (img->mPlane[2].mOffset == img->mPlane[1].mOffset + 1));
+}
+
+bool IsNV21(const MediaImage2 *img) {
+ if (!IsYUV420(img)) {
+ return false;
+ }
+ return (img->mPlane[1].mColInc == 2
+ && img->mPlane[2].mColInc == 2
+ && (img->mPlane[1].mOffset == img->mPlane[2].mOffset + 1));
}
bool IsI420(const MediaImage2 *img) {
@@ -295,6 +367,76 @@
&& img->mPlane[2].mOffset > img->mPlane[1].mOffset);
}
+FlexLayout GetYuv420FlexibleLayout() {
+ static FlexLayout sLayout = []{
+ AHardwareBuffer_Desc desc = {
+ 16, // width
+ 16, // height
+ 1, // layers
+ AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420,
+ AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ 0, // stride
+ 0, // rfu0
+ 0, // rfu1
+ };
+ AHardwareBuffer *buffer = nullptr;
+ int ret = AHardwareBuffer_allocate(&desc, &buffer);
+ if (ret != 0) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ class AutoCloser {
+ public:
+ AutoCloser(AHardwareBuffer *buffer) : mBuffer(buffer), mLocked(false) {}
+ ~AutoCloser() {
+ if (mLocked) {
+ AHardwareBuffer_unlock(mBuffer, nullptr);
+ }
+ AHardwareBuffer_release(mBuffer);
+ }
+
+ void setLocked() { mLocked = true; }
+
+ private:
+ AHardwareBuffer *mBuffer;
+ bool mLocked;
+ } autoCloser(buffer);
+ AHardwareBuffer_Planes planes;
+ ret = AHardwareBuffer_lockPlanes(
+ buffer,
+ AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ -1, // fence
+ nullptr, // rect
+ &planes);
+ if (ret != 0) {
+ AHardwareBuffer_release(buffer);
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ autoCloser.setLocked();
+ if (planes.planeCount != 3) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ if (planes.planes[0].pixelStride != 1) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ if (planes.planes[1].pixelStride == 1 && planes.planes[2].pixelStride == 1) {
+ return FLEX_LAYOUT_PLANAR;
+ }
+ if (planes.planes[1].pixelStride == 2 && planes.planes[2].pixelStride == 2) {
+ ssize_t uvDist =
+ static_cast<uint8_t *>(planes.planes[2].data) -
+ static_cast<uint8_t *>(planes.planes[1].data);
+ if (uvDist == 1) {
+ return FLEX_LAYOUT_SEMIPLANAR_UV;
+ } else if (uvDist == -1) {
+ return FLEX_LAYOUT_SEMIPLANAR_VU;
+ }
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ return FLEX_LAYOUT_UNKNOWN;
+ }();
+ return sLayout;
+}
+
MediaImage2 CreateYUV420PlanarMediaImage2(
uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
return MediaImage2 {
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index afadf00..af29e81 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -96,6 +96,11 @@
bool IsNV12(const C2GraphicView &view);
/**
+ * Returns true iff a view has a NV21 layout.
+ */
+bool IsNV21(const C2GraphicView &view);
+
+/**
* Returns true iff a view has a I420 layout.
*/
bool IsI420(const C2GraphicView &view);
@@ -111,10 +116,26 @@
bool IsNV12(const MediaImage2 *img);
/**
+ * Returns true iff a MediaImage2 has a NV21 layout.
+ */
+bool IsNV21(const MediaImage2 *img);
+
+/**
* Returns true iff a MediaImage2 has a I420 layout.
*/
bool IsI420(const MediaImage2 *img);
+enum FlexLayout {
+ FLEX_LAYOUT_UNKNOWN,
+ FLEX_LAYOUT_PLANAR,
+ FLEX_LAYOUT_SEMIPLANAR_UV,
+ FLEX_LAYOUT_SEMIPLANAR_VU,
+};
+/**
+ * Returns layout of YCBCR_420_888 pixel format.
+ */
+FlexLayout GetYuv420FlexibleLayout();
+
/**
* A raw memory block to use for internal buffers.
*
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 1390642..00bf84f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -311,6 +311,8 @@
{ C2Config::PCM_8, kAudioEncodingPcm8bit },
{ C2Config::PCM_16, kAudioEncodingPcm16bit },
{ C2Config::PCM_FLOAT, kAudioEncodingPcmFloat },
+ { C2Config::PCM_24, kAudioEncodingPcm24bitPacked },
+ { C2Config::PCM_32, kAudioEncodingPcm32bit },
};
ALookup<C2Config::level_t, int32_t> sVp9Levels = {
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index bee6b7f..4ffa3f1 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -42,7 +42,9 @@
* Usage mask that is passed through from gralloc to Codec 2.0 usage.
*/
PASSTHROUGH_USAGE_MASK =
- ~(GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_PROTECTED)
+ ~static_cast<uint64_t>(GRALLOC_USAGE_SW_READ_MASK |
+ GRALLOC_USAGE_SW_WRITE_MASK |
+ GRALLOC_USAGE_PROTECTED)
};
// verify that passthrough mask is within the platform mask
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 85623b8..a8528df 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -30,10 +30,15 @@
#include <C2ErrnoUtils.h>
#include <C2HandleIonInternal.h>
+#include <android-base/properties.h>
+
namespace android {
namespace {
constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+ // max padding after ion/dmabuf allocations in bytes
+ constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
}
/* size_t <=> int(lo), int(hi) conversions */
@@ -376,14 +381,34 @@
unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
int bufferFd = -1;
ion_user_handle_t buffer = -1;
- size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
+ // NOTE: read this property directly from the property as this code has to run on
+ // Android Q, but the sysprop was only introduced in Android S.
+ static size_t sPadding =
+ base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+ if (sPadding > SIZE_MAX - size) {
+ ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
+ // use ImplV2 as there is no allocation anyways
+ return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+ }
+
+ size_t allocSize = size + sPadding;
+ if (align) {
+ if (align - 1 > SIZE_MAX - allocSize) {
+ ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
+ size, sPadding, align);
+ // use ImplV2 as there is no allocation anyways
+ return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+ }
+ allocSize += align - 1;
+ allocSize &= ~(align - 1);
+ }
int ret;
if (ion_is_legacy(ionFd)) {
- ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
+ ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
"returned (%d) ; buffer = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, buffer);
+ ionFd, allocSize, align, heapMask, flags, ret, buffer);
if (ret == 0) {
// get buffer fd for native handle constructor
ret = ion_share(ionFd, buffer, &bufferFd);
@@ -392,15 +417,15 @@
buffer = -1;
}
}
- return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
+ return new Impl(ionFd, allocSize, bufferFd, buffer, id, ret);
} else {
- ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
+ ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
"returned (%d) ; bufferFd = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
+ ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
- return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
+ return new ImplV2(ionFd, allocSize, bufferFd, id, ret);
}
}
diff --git a/media/codec2/vndk/C2DmaBufAllocator.cpp b/media/codec2/vndk/C2DmaBufAllocator.cpp
index 750aa31..6d8552a 100644
--- a/media/codec2/vndk/C2DmaBufAllocator.cpp
+++ b/media/codec2/vndk/C2DmaBufAllocator.cpp
@@ -16,11 +16,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "C2DmaBufAllocator"
+
#include <BufferAllocator/BufferAllocator.h>
#include <C2Buffer.h>
#include <C2Debug.h>
#include <C2DmaBufAllocator.h>
#include <C2ErrnoUtils.h>
+
#include <linux/ion.h>
#include <sys/mman.h>
#include <unistd.h> // getpagesize, size_t, close, dup
@@ -28,14 +30,15 @@
#include <list>
-#ifdef __ANDROID_APEX__
#include <android-base/properties.h>
-#endif
namespace android {
namespace {
-constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+ constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+ // max padding after ion/dmabuf allocations in bytes
+ constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
}
/* =========================== BUFFER HANDLE =========================== */
@@ -250,8 +253,11 @@
int ret = 0;
bufferFd = alloc.Alloc(heap_name, size, flags);
- if (bufferFd < 0) ret = bufferFd;
+ if (bufferFd < 0) {
+ ret = bufferFd;
+ }
+ // this may be a non-working handle if bufferFd is negative
mHandle = C2HandleBuf(bufferFd, size);
mId = id;
mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
@@ -360,8 +366,22 @@
return ret;
}
+ // TODO: should we pad before mapping usage?
+
+ // NOTE: read this property directly from the property as this code has to run on
+ // Android Q, but the sysprop was only introduced in Android S.
+ static size_t sPadding =
+ base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+ if (sPadding > SIZE_MAX - capacity) {
+ // size would overflow
+ ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
+ return C2_NO_MEMORY;
+ }
+
+ size_t allocSize = (size_t)capacity + sPadding;
+ // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
- mBufferAllocator, capacity, heap_name, flags, getId());
+ mBufferAllocator, allocSize, heap_name, flags, getId());
ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
diff --git a/media/codec2/vndk/include/C2SurfaceSyncObj.h b/media/codec2/vndk/include/C2SurfaceSyncObj.h
index 16e9a9d..ac87fe4 100644
--- a/media/codec2/vndk/include/C2SurfaceSyncObj.h
+++ b/media/codec2/vndk/include/C2SurfaceSyncObj.h
@@ -53,7 +53,7 @@
* \param maxDequeueCount Initial value of # of max dequeued buffer count
* \param curDequeueCount Initial value of # of current dequeued buffer count
*/
- void setInitialDequeueCount(int32_t maxDequeueCount, int32_t curDequeueCount);
+ void setInitialDequeueCountLocked(int32_t maxDequeueCount, int32_t curDequeueCount);
/**
* Get a waitId which will be used to implement fence.
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 2944925..169de0c 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -316,12 +316,15 @@
}
return C2_BLOCKING;
}
+ syncVar->notifyDequeuedLocked();
+ syncVar->unlock();
c2Status = dequeueBuffer(width, height, format, androidUsage,
&slot, &bufferNeedsReallocation, &fence);
- if (c2Status == C2_OK) {
- syncVar->notifyDequeuedLocked();
+ if (c2Status != C2_OK) {
+ syncVar->lock();
+ syncVar->notifyQueuedLocked();
+ syncVar->unlock();
}
- syncVar->unlock();
} else {
c2Status = dequeueBuffer(width, height, format, usage,
&slot, &bufferNeedsReallocation, &fence);
@@ -789,7 +792,7 @@
sp<GraphicBuffer> newBuffer = new GraphicBuffer(
graphicBuffer->handle, GraphicBuffer::CLONE_HANDLE,
graphicBuffer->width, graphicBuffer->height, graphicBuffer->format,
- graphicBuffer->layerCount, toUsage, graphicBuffer->stride);
+ graphicBuffer->layerCount, toUsage | graphicBuffer->getUsage(), graphicBuffer->stride);
if (newBuffer->initCheck() == android::NO_ERROR) {
graphicBuffer = std::move(newBuffer);
} else {
diff --git a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
index 587992e..e55bdc0 100644
--- a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
+++ b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
@@ -157,12 +157,10 @@
return 0;
}
-void C2SyncVariables::setInitialDequeueCount(
+void C2SyncVariables::setInitialDequeueCountLocked(
int32_t maxDequeueCount, int32_t curDequeueCount) {
- lock();
mMaxDequeueCount = maxDequeueCount;
mCurDequeueCount = curDequeueCount;
- unlock();
}
uint32_t C2SyncVariables::getWaitIdLocked() {
diff --git a/media/codecs/m4v_h263/dec/test/Android.bp b/media/codecs/m4v_h263/dec/test/Android.bp
index 9459ed1..6eed66f 100644
--- a/media/codecs/m4v_h263/dec/test/Android.bp
+++ b/media/codecs/m4v_h263/dec/test/Android.bp
@@ -28,7 +28,24 @@
cc_test {
name: "Mpeg4H263DecoderTest",
gtest: true,
- test_suites: ["device-tests"],
+
+ test_suites: [
+ "device-tests",
+ "mts",
+ ],
+
+ // Support multilib variants (using different suffix per sub-architecture), which is needed on
+ // build targets with secondary architectures, as the MTS test suite packaging logic flattens
+ // all test artifacts into a single `testcases` directory.
+ compile_multilib: "both",
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
srcs: [
"Mpeg4H263DecoderTest.cpp",
diff --git a/media/codecs/m4v_h263/dec/test/AndroidTest.xml b/media/codecs/m4v_h263/dec/test/AndroidTest.xml
index f572b0c..8bb4d1c 100755
--- a/media/codecs/m4v_h263/dec/test/AndroidTest.xml
+++ b/media/codecs/m4v_h263/dec/test/AndroidTest.xml
@@ -15,9 +15,10 @@
-->
<configuration description="Test module config for Mpeg4H263 Decoder unit tests">
<option name="test-suite-tag" value="Mpeg4H263DecoderTest" />
- <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
<option name="cleanup" value="true" />
<option name="push" value="Mpeg4H263DecoderTest->/data/local/tmp/Mpeg4H263DecoderTest" />
+ <option name="append-bitness" value="true" />
<option name="push-file"
key="https://storage.googleapis.com/android_media/frameworks/av/media/libstagefright/codecs/m4v_h263/dec/test/Mpeg4H263Decoder-1.1.zip?unzip=true"
value="/data/local/tmp/Mpeg4H263DecoderTestRes/" />
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 314a822..416884e 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -62,6 +62,16 @@
#define ALAC_SPECIFIC_INFO_SIZE (36)
+// TODO : Remove the defines once mainline media is built against NDK >= 31.
+// The mp4 extractor is part of mainline and builds against NDK 29 as of
+// writing. These keys are available only from NDK 31:
+#define AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION \
+ "mpegh-profile-level-indication"
+#define AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT \
+ "mpegh-reference-channel-layout"
+#define AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS \
+ "mpegh-compatible-sets"
+
namespace android {
enum {
@@ -139,6 +149,7 @@
bool mIsHEVC;
bool mIsDolbyVision;
bool mIsAC4;
+ bool mIsMpegH = false;
bool mIsPcm;
size_t mNALLengthSize;
@@ -378,6 +389,10 @@
case FOURCC(".mp3"):
case 0x6D730055: // "ms U" mp3 audio
return MEDIA_MIMETYPE_AUDIO_MPEG;
+ case FOURCC("mha1"):
+ return MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
+ case FOURCC("mhm1"):
+ return MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -1778,6 +1793,8 @@
case FOURCC("fLaC"):
case FOURCC(".mp3"):
case 0x6D730055: // "ms U" mp3 audio
+ case FOURCC("mha1"):
+ case FOURCC("mhm1"):
{
if (mIsQT && depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
@@ -1977,7 +1994,94 @@
}
break;
}
+ case FOURCC("mhaC"):
+ {
+ // See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord
+ constexpr uint32_t mhac_header_size = 4 /* size */ + 4 /* boxtype 'mhaC' */
+ + 1 /* configurationVersion */ + 1 /* mpegh3daProfileLevelIndication */
+ + 1 /* referenceChannelLayout */ + 2 /* mpegh3daConfigLength */;
+ uint8_t mhac_header[mhac_header_size];
+ off64_t data_offset = *offset;
+ if (chunk_size < sizeof(mhac_header)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(data_offset, mhac_header, sizeof(mhac_header))
+ < (ssize_t)sizeof(mhac_header)) {
+ return ERROR_IO;
+ }
+
+ //get mpegh3daProfileLevelIndication
+ const uint32_t mpegh3daProfileLevelIndication = mhac_header[9];
+ AMediaFormat_setInt32(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION,
+ mpegh3daProfileLevelIndication);
+
+ //get referenceChannelLayout
+ const uint32_t referenceChannelLayout = mhac_header[10];
+ AMediaFormat_setInt32(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT,
+ referenceChannelLayout);
+
+ // get mpegh3daConfigLength
+ const uint32_t mhac_config_size = U16_AT(&mhac_header[11]);
+ if (chunk_size != sizeof(mhac_header) + mhac_config_size) {
+ return ERROR_MALFORMED;
+ }
+
+ data_offset += sizeof(mhac_header);
+ uint8_t mhac_config[mhac_config_size];
+ if (mDataSource->readAt(data_offset, mhac_config, sizeof(mhac_config))
+ < (ssize_t)sizeof(mhac_config)) {
+ return ERROR_IO;
+ }
+
+ AMediaFormat_setBuffer(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_CSD_0, mhac_config, sizeof(mhac_config));
+ data_offset += sizeof(mhac_config);
+ *offset = data_offset;
+ break;
+ }
+ case FOURCC("mhaP"):
+ {
+ // FDAmd_2 of ISO_IEC_23008-3;2019 MHAProfileAndLevelCompatibilitySetBox
+ constexpr uint32_t mhap_header_size = 4 /* size */ + 4 /* boxtype 'mhaP' */
+ + 1 /* numCompatibleSets */;
+
+ uint8_t mhap_header[mhap_header_size];
+ off64_t data_offset = *offset;
+
+ if (chunk_size < (ssize_t)mhap_header_size) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(data_offset, mhap_header, sizeof(mhap_header))
+ < (ssize_t)sizeof(mhap_header)) {
+ return ERROR_IO;
+ }
+
+ // mhap_compatible_sets_size = numCompatibleSets * sizeof(uint8_t)
+ const uint32_t mhap_compatible_sets_size = mhap_header[8];
+ if (chunk_size != sizeof(mhap_header) + mhap_compatible_sets_size) {
+ return ERROR_MALFORMED;
+ }
+
+ data_offset += sizeof(mhap_header);
+ uint8_t mhap_compatible_sets[mhap_compatible_sets_size];
+ if (mDataSource->readAt(
+ data_offset, mhap_compatible_sets, sizeof(mhap_compatible_sets))
+ < (ssize_t)sizeof(mhap_compatible_sets)) {
+ return ERROR_IO;
+ }
+
+ AMediaFormat_setBuffer(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS,
+ mhap_compatible_sets, sizeof(mhap_compatible_sets));
+ data_offset += sizeof(mhap_compatible_sets);
+ *offset = data_offset;
+ break;
+ }
case FOURCC("mp4v"):
case FOURCC("encv"):
case FOURCC("s263"):
@@ -2345,7 +2449,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_ESDS, &buffer[4], chunk_data_size - 4);
if (mPath.size() >= 2
@@ -2427,7 +2531,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_AVC, buffer.get(), chunk_data_size);
break;
@@ -2449,7 +2553,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_HEVC, buffer.get(), chunk_data_size);
*offset += chunk_size;
@@ -4021,13 +4125,13 @@
// custom genre string
buffer[size] = '\0';
- AMediaFormat_setString(mFileMetaData,
+ AMediaFormat_setString(mFileMetaData,
metadataKey, (const char *)buffer + 8);
}
} else {
buffer[size] = '\0';
- AMediaFormat_setString(mFileMetaData,
+ AMediaFormat_setString(mFileMetaData,
metadataKey, (const char *)buffer + 8);
}
}
@@ -4568,6 +4672,9 @@
if (objectTypeIndication == 0x6B || objectTypeIndication == 0x69) {
// mp3 audio
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
AMediaFormat_setString(mLastTrack->meta,AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
return OK;
}
@@ -4658,6 +4765,10 @@
if (offset >= csd_size || csd[offset] != 0x01) {
return ERROR_MALFORMED;
}
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
// formerly kKeyVorbisInfo
AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_0, &csd[offset], len1);
@@ -4994,6 +5105,8 @@
bool success = AMediaFormat_getString(mFormat, AMEDIAFORMAT_KEY_MIME, &mime);
CHECK(success);
+ mIsMpegH = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1);
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
@@ -6065,10 +6178,11 @@
}
uint32_t syncSampleIndex = sampleIndex;
- // assume every non-USAC audio sample is a sync sample. This works around
+ // assume every non-USAC/non-MPEGH audio sample is a sync sample.
+ // This works around
// seek issues with files that were incorrectly written with an
// empty or single-sample stss block for the audio track
- if (err == OK && (!mIsAudio || mIsUsac)) {
+ if (err == OK && (!mIsAudio || mIsUsac || mIsMpegH)) {
err = mSampleTable->findSyncSampleNear(
sampleIndex, &syncSampleIndex, findFlags);
}
@@ -6187,9 +6301,13 @@
if (newBuffer) {
if (mIsPcm) {
// The twos' PCM block reader assumes that all samples has the same size.
-
- uint32_t samplesToRead = mSampleTable->getLastSampleIndexInChunk()
- - mCurrentSampleIndex + 1;
+ uint32_t lastSampleIndexInChunk = mSampleTable->getLastSampleIndexInChunk();
+ if (lastSampleIndexInChunk < mCurrentSampleIndex) {
+ mBuffer->release();
+ mBuffer = nullptr;
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ uint32_t samplesToRead = lastSampleIndexInChunk - mCurrentSampleIndex + 1;
if (samplesToRead > kMaxPcmFrameSize) {
samplesToRead = kMaxPcmFrameSize;
}
@@ -6198,13 +6316,17 @@
samplesToRead, size, mCurrentSampleIndex,
mSampleTable->getLastSampleIndexInChunk());
- size_t totalSize = samplesToRead * size;
+ size_t totalSize = samplesToRead * size;
+ if (mBuffer->size() < totalSize) {
+ mBuffer->release();
+ mBuffer = nullptr;
+ return AMEDIA_ERROR_UNKNOWN;
+ }
uint8_t* buf = (uint8_t *)mBuffer->data();
ssize_t bytesRead = mDataSource->readAt(offset, buf, totalSize);
if (bytesRead < (ssize_t)totalSize) {
mBuffer->release();
mBuffer = NULL;
-
return AMEDIA_ERROR_IO;
}
@@ -6258,7 +6380,19 @@
if (isSyncSample) {
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
-
+
+ AMediaFormat_setInt64(
+ meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/,
+ offset);
+
+ if (mSampleTable != nullptr &&
+ mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+ AMediaFormat_setInt64(
+ meta,
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ mSampleTable->getLastSampleIndexInChunk());
+ }
+
++mCurrentSampleIndex;
}
}
@@ -6408,6 +6542,17 @@
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
+ AMediaFormat_setInt64(
+ meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/, offset);
+
+ if (mSampleTable != nullptr &&
+ mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+ AMediaFormat_setInt64(
+ meta,
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ mSampleTable->getLastSampleIndexInChunk());
+ }
+
++mCurrentSampleIndex;
*out = mBuffer;
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index 901b29d..9e94587 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -440,19 +440,22 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
- int64_t pos = 0;
-
+ int64_t pos;
+ int64_t sampleNumber;
+ bool overflowed = __builtin_mul_overflow(seekTimeUs, mSampleRate, &sampleNumber);
+ sampleNumber /= 1000000;
if (mWaveFormat == WAVE_FORMAT_MSGSM) {
// 65 bytes decode to 320 8kHz samples
- int64_t samplenumber = (seekTimeUs * mSampleRate) / 1000000;
- int64_t framenumber = samplenumber / 320;
- pos = framenumber * 65;
+ pos = sampleNumber / 320 * 65;
} else {
- pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
+ int64_t bytesPerFrame;
+ overflowed |= __builtin_mul_overflow(mNumChannels, mBitsPerSample >> 3, &bytesPerFrame);
+ overflowed |= __builtin_mul_overflow(bytesPerFrame, sampleNumber, &pos);
}
- if (pos > (off64_t)mSize) {
- pos = mSize;
+ if (overflowed) {
+ return AMEDIA_ERROR_MALFORMED;
}
+ pos = std::clamp(pos, (int64_t) 0, (int64_t) mSize);
mCurrentPos = pos + mOffset;
}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 22cf254..3333925 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -74,8 +74,9 @@
* The nominal range of the data is [-1.0f, 1.0f).
* Values outside that range may be clipped.
*
- * See also 'floatData' at
- * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+ * See also the float Data in
+ * <a href="/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)">
+ * write(float[], int, int, int)</a>.
*/
AAUDIO_FORMAT_PCM_FLOAT,
@@ -196,21 +197,69 @@
};
typedef int32_t aaudio_result_t;
+/**
+ * AAudio Stream states, for details, refer to
+ * <a href="/ndk/guides/audio/aaudio/aaudio#using-streams">Using an Audio Stream</a>
+ */
enum
{
+
+ /**
+ * The stream is created but not initialized yet.
+ */
AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+ /**
+ * The stream is in an unrecognized state.
+ */
AAUDIO_STREAM_STATE_UNKNOWN,
+
+ /**
+ * The stream is open and ready to use.
+ */
AAUDIO_STREAM_STATE_OPEN,
+ /**
+ * The stream is just starting up.
+ */
AAUDIO_STREAM_STATE_STARTING,
+ /**
+ * The stream has started.
+ */
AAUDIO_STREAM_STATE_STARTED,
+ /**
+ * The stream is pausing.
+ */
AAUDIO_STREAM_STATE_PAUSING,
+ /**
+ * The stream has paused, could be restarted or flushed.
+ */
AAUDIO_STREAM_STATE_PAUSED,
+ /**
+ * The stream is being flushed.
+ */
AAUDIO_STREAM_STATE_FLUSHING,
+ /**
+ * The stream is flushed, ready to be restarted.
+ */
AAUDIO_STREAM_STATE_FLUSHED,
+ /**
+ * The stream is stopping.
+ */
AAUDIO_STREAM_STATE_STOPPING,
+ /**
+ * The stream has been stopped.
+ */
AAUDIO_STREAM_STATE_STOPPED,
+ /**
+ * The stream is closing.
+ */
AAUDIO_STREAM_STATE_CLOSING,
+ /**
+ * The stream has been closed.
+ */
AAUDIO_STREAM_STATE_CLOSED,
+ /**
+ * The stream is disconnected from audio device.
+ */
AAUDIO_STREAM_STATE_DISCONNECTED
};
typedef int32_t aaudio_stream_state_t;
@@ -260,7 +309,8 @@
* This information is used by certain platforms or routing policies
* to make more refined volume or routing decisions.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 28.
@@ -361,7 +411,8 @@
* an audio book application) this information might be used by the audio framework to
* enforce audio focus.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 28.
@@ -441,7 +492,8 @@
/**
* Specifying if audio may or may not be captured by other apps or the system.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 29.
@@ -453,10 +505,11 @@
* For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
* AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
*
- * On {@link android.os.Build.VERSION_CODES#Q}, this means only {@link #AAUDIO_USAGE_MEDIA}
- * and {@link #AAUDIO_USAGE_GAME} may be captured.
+ * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+ * this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
*
- * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_ALL}.
+ * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
+ * ALLOW_CAPTURE_BY_ALL</a>.
*/
AAUDIO_ALLOW_CAPTURE_BY_ALL = 1,
/**
@@ -464,8 +517,9 @@
*
* System apps can capture for many purposes like accessibility, user guidance...
* but have strong restriction. See
- * {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_SYSTEM} for what the system apps
- * can do with the capture audio.
+ * <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_SYSTEM">
+ * ALLOW_CAPTURE_BY_SYSTEM</a>
+ * for what the system apps can do with the capture audio.
*/
AAUDIO_ALLOW_CAPTURE_BY_SYSTEM = 2,
/**
@@ -473,7 +527,8 @@
*
* It is encouraged to use {@link #AAUDIO_ALLOW_CAPTURE_BY_SYSTEM} instead of this value as system apps
* provide significant and useful features for the user (eg. accessibility).
- * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_NONE}.
+ * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_NONE">
+ * ALLOW_CAPTURE_BY_NONE</a>.
*/
AAUDIO_ALLOW_CAPTURE_BY_NONE = 3,
};
@@ -803,7 +858,9 @@
* The default is {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}.
*
* Note that an application can also set its global policy, in which case the most restrictive
- * policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
+ * policy is always applied. See
+ * <a href="/reference/android/media/AudioManager#setAllowedCapturePolicy(int)">
+ * setAllowedCapturePolicy(int)</a>
*
* Available since API level 29.
*
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 5d311fc..f4a40a8 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -20,6 +20,7 @@
#include <algorithm>
#include <audio_utils/primitives.h>
#include <aaudio/AAudio.h>
+#include <media/MediaMetricsItem.h>
#include "client/AudioStreamInternalCapture.h"
#include "utility/AudioClock.h"
@@ -268,7 +269,7 @@
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
- result = systemStopFromCallback();
+ result = systemStopInternal();
break;
}
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index b81e5e4..71bde90 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -19,6 +19,7 @@
#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <media/MediaMetricsItem.h>
#include <utils/Trace.h>
#include "client/AudioStreamInternalPlay.h"
@@ -301,7 +302,7 @@
}
} else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
- result = systemStopFromCallback();
+ result = systemStopInternal();
break;
}
}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 53523c5..ef83c8e 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -59,6 +59,10 @@
if (!mMetricsId.empty()) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM)
+ .set(AMEDIAMETRICS_PROP_ENCODINGREQUESTED,
+ android::toString(mDeviceFormat).c_str())
+ .set(AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL,
+ AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
.record();
}
@@ -124,7 +128,12 @@
.set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
.set(AMEDIAMETRICS_PROP_SHARINGMODE,
- AudioGlobal_convertSharingModeToText(getSharingMode()));
+ AudioGlobal_convertSharingModeToText(getSharingMode()))
+ .set(AMEDIAMETRICS_PROP_BUFFERCAPACITYFRAMES, getBufferCapacity())
+ .set(AMEDIAMETRICS_PROP_BURSTFRAMES, getFramesPerBurst())
+ .set(AMEDIAMETRICS_PROP_DIRECTION,
+ AudioGlobal_convertDirectionToText(getDirection()));
+
if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
item.set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerBase->getPlayerIId());
}
@@ -143,13 +152,13 @@
}
aaudio_result_t AudioStream::systemStart() {
- std::lock_guard<std::mutex> lock(mStreamLock);
-
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
+
switch (getState()) {
// Is this a good time to start?
case AAUDIO_STREAM_STATE_OPEN:
@@ -187,7 +196,6 @@
}
aaudio_result_t AudioStream::systemPause() {
- std::lock_guard<std::mutex> lock(mStreamLock);
if (!isPauseSupported()) {
return AAUDIO_ERROR_UNIMPLEMENTED;
@@ -198,6 +206,7 @@
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
switch (getState()) {
// Proceed with pausing.
case AAUDIO_STREAM_STATE_STARTING:
@@ -242,12 +251,12 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("stream cannot be flushed from a callback!");
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
aaudio_result_t result = AAudio_isFlushAllowed(getState());
if (result != AAUDIO_OK) {
return result;
@@ -256,7 +265,7 @@
return requestFlush_l();
}
-aaudio_result_t AudioStream::systemStopFromCallback() {
+aaudio_result_t AudioStream::systemStopInternal() {
std::lock_guard<std::mutex> lock(mStreamLock);
aaudio_result_t result = safeStop_l();
if (result == AAUDIO_OK) {
@@ -267,17 +276,12 @@
}
aaudio_result_t AudioStream::systemStopFromApp() {
- std::lock_guard<std::mutex> lock(mStreamLock);
+ // This check can and should be done outside the lock.
if (collidesWithCallback()) {
ALOGE("stream cannot be stopped by calling from a callback!");
return AAUDIO_ERROR_INVALID_STATE;
}
- aaudio_result_t result = safeStop_l();
- if (result == AAUDIO_OK) {
- // We only call this for logging in "dumpsys audio". So ignore return code.
- (void) mPlayerBase->stopWithStatus();
- }
- return result;
+ return systemStopInternal();
}
aaudio_result_t AudioStream::safeStop_l() {
@@ -316,12 +320,12 @@
}
aaudio_result_t AudioStream::safeRelease() {
- // This may get temporarily unlocked in the MMAP release() when joining callback threads.
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
+ // This may get temporarily unlocked in the MMAP release() when joining callback threads.
+ std::lock_guard<std::mutex> lock(mStreamLock);
if (getState() == AAUDIO_STREAM_STATE_CLOSING) { // already released?
return AAUDIO_OK;
}
@@ -329,23 +333,36 @@
}
aaudio_result_t AudioStream::safeReleaseClose() {
- // This get temporarily unlocked in the MMAP release() when joining callback threads.
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
- releaseCloseFinal_l();
- return AAUDIO_OK;
+ return safeReleaseCloseInternal();
}
-aaudio_result_t AudioStream::safeReleaseCloseFromCallback() {
+aaudio_result_t AudioStream::safeReleaseCloseInternal() {
// This get temporarily unlocked in the MMAP release() when joining callback threads.
std::lock_guard<std::mutex> lock(mStreamLock);
releaseCloseFinal_l();
return AAUDIO_OK;
}
+void AudioStream::close_l() {
+ // Releasing the stream will set the state to CLOSING.
+ assert(getState() == AAUDIO_STREAM_STATE_CLOSING);
+ // setState() prevents a transition from CLOSING to any state other than CLOSED.
+ // State is checked by destructor.
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+
+ if (!mMetricsId.empty()) {
+ android::mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_FRAMESTRANSFERRED,
+ getDirection() == AAUDIO_DIRECTION_INPUT ? getFramesWritten()
+ : getFramesRead())
+ .record();
+ }
+}
+
void AudioStream::setState(aaudio_stream_state_t state) {
ALOGD("%s(s#%d) from %d to %d", __func__, getId(), mState, state);
if (state == mState) {
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 333e665..3930964 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -146,13 +146,7 @@
* Free any resources not already freed by release_l().
* Assume release_l() already called.
*/
- virtual void close_l() REQUIRES(mStreamLock) {
- // Releasing the stream will set the state to CLOSING.
- assert(getState() == AAUDIO_STREAM_STATE_CLOSING);
- // setState() prevents a transition from CLOSING to any state other than CLOSED.
- // State is checked by destructor.
- setState(AAUDIO_STREAM_STATE_CLOSED);
- }
+ virtual void close_l() REQUIRES(mStreamLock);
public:
// This is only used to identify a stream in the logs without
@@ -408,7 +402,7 @@
/**
* This is called internally when an app callback returns AAUDIO_CALLBACK_RESULT_STOP.
*/
- aaudio_result_t systemStopFromCallback();
+ aaudio_result_t systemStopInternal();
/**
* Safely RELEASE a stream after taking mStreamLock and checking
@@ -424,7 +418,7 @@
*/
aaudio_result_t safeReleaseClose();
- aaudio_result_t safeReleaseCloseFromCallback();
+ aaudio_result_t safeReleaseCloseInternal();
protected:
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index fdaa2ab..60eb73a 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -124,7 +124,7 @@
__func__, callbackResult);
}
audioBuffer->size = 0;
- systemStopFromCallback();
+ systemStopInternal();
// Disable the callback just in case the system keeps trying to call us.
mCallbackEnabled.store(false);
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 7733a04..e3ac6ff 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -308,11 +308,19 @@
}
void AudioStreamRecord::close_l() {
+ // The callbacks are normally joined in the AudioRecord destructor.
+ // But if another object has a reference to the AudioRecord then
+ // it will not get deleted here.
+ // So we should join callbacks explicitly before returning.
+ // Unlock around the join to avoid deadlocks if the callback tries to lock.
+ // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
+ mStreamLock.unlock();
+ mAudioRecord->stopAndJoinCallbacks();
+ mStreamLock.lock();
+
mAudioRecord.clear();
- // Do not close mFixedBlockWriter because a data callback
- // thread might still be running if someone else has a reference
- // to mAudioRecord.
- // It has a unique_ptr to its buffer so it will clean up by itself.
+ // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
+ // so it will clean up by itself.
AudioStream::close_l();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 142a85c..df97658 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -259,12 +259,18 @@
}
void AudioStreamTrack::close_l() {
- // Stop callbacks before deleting mFixedBlockReader memory.
+ // The callbacks are normally joined in the AudioTrack destructor.
+ // But if another object has a reference to the AudioTrack then
+ // it will not get deleted here.
+ // So we should join callbacks explicitly before returning.
+ // Unlock around the join to avoid deadlocks if the callback tries to lock.
+ // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
+ mStreamLock.unlock();
+ mAudioTrack->stopAndJoinCallbacks();
+ mStreamLock.lock();
mAudioTrack.clear();
- // Do not close mFixedBlockReader because a data callback
- // thread might still be running if someone else has a reference
- // to mAudioRecord.
- // It has a unique_ptr to its buffer so it will clean up by itself.
+ // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
+ // so it will clean up by itself.
AudioStream::close_l();
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 62c9b46..98e9727 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -209,9 +209,9 @@
}
cc_test {
- name: "test_stop_hang",
+ name: "test_callback_race",
defaults: ["libaaudio_tests_defaults"],
- srcs: ["test_stop_hang.cpp"],
+ srcs: ["test_callback_race.cpp"],
shared_libs: [
"libaaudio",
"libbinder",
@@ -250,3 +250,16 @@
"libutils",
],
}
+
+
+cc_test {
+ name: "test_disconnect_race",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_disconnect_race.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_callback_race.cpp b/media/libaaudio/tests/test_callback_race.cpp
new file mode 100644
index 0000000..843d5d7
--- /dev/null
+++ b/media/libaaudio/tests/test_callback_race.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test whether the callback is joined before the close finishes.
+ *
+ * Start a stream with a callback.
+ * The callback just sleeps for a long time.
+ * While the callback is sleeping, close() the stream from the main thread.
+ * Then check to make sure the callback was joined before the close() returns.
+ *
+ * This can hang if there are deadlocks. So make sure you get a PASSED result.
+ */
+
+#include <atomic>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <gtest/gtest.h>
+
+#include <aaudio/AAudio.h>
+
+// Sleep long enough that the foreground has a change to call close.
+static constexpr int kCallbackSleepMicros = 600 * 1000;
+
+class AudioEngine {
+public:
+
+ // Check for a crash or late callback if we close without stopping.
+ void checkCloseJoins(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode,
+ aaudio_data_callback_result_t callbackResult) {
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ mCallbackResult = callbackResult;
+ startStreamForStall(direction, perfMode);
+ // When the callback starts it will go to sleep.
+ waitForCallbackToStart();
+
+ printf("call AAudioStream_close()\n");
+ ASSERT_FALSE(mCallbackFinished); // Still sleeping?
+ aaudio_result_t result = AAudioStream_close(mStream); // May hang here!
+ ASSERT_TRUE(mCallbackFinished);
+ ASSERT_EQ(AAUDIO_OK, result);
+ printf("AAudioStream_close() returned %d\n", result);
+
+ ASSERT_EQ(AAUDIO_OK, mError.load());
+ // Did calling stop() from callback fail? It should have.
+ ASSERT_NE(AAUDIO_OK, mStopResult.load());
+ }
+
+private:
+ void startStreamForStall(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode) {
+ AAudioStreamBuilder* builder = nullptr;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&builder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(builder, direction);
+ AAudioStreamBuilder_setPerformanceMode(builder, perfMode);
+ AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+ AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(builder, &mStream);
+ AAudioStreamBuilder_delete(builder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Check to see what kind of stream we actually got.
+ int32_t deviceId = AAudioStream_getDeviceId(mStream);
+ aaudio_performance_mode_t
+ actualPerfMode = AAudioStream_getPerformanceMode(mStream);
+ printf("-------- opened: deviceId = %3d, perfMode = %d\n",
+ deviceId,
+ actualPerfMode);
+
+ // Start stream.
+ result = AAudioStream_requestStart(mStream);
+ ASSERT_EQ(AAUDIO_OK, result);
+ }
+
+ void waitForCallbackToStart() {
+ // Wait for callback to say it has been called.
+ int countDownMillis = 2000;
+ constexpr int countDownPeriodMillis = 50;
+ while (!mCallbackStarted && countDownMillis > 0) {
+ printf("Waiting for callback to start, %d\n", countDownMillis);
+ usleep(countDownPeriodMillis * 1000);
+ countDownMillis -= countDownPeriodMillis;
+ }
+ ASSERT_LT(0, countDownMillis);
+ ASSERT_TRUE(mCallbackStarted);
+ }
+
+// Callback function that fills the audio output buffer.
+ static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void * /*audioData */,
+ int32_t /* numFrames */
+ ) {
+ AudioEngine* engine = (AudioEngine*) userData;
+ engine->mCallbackStarted = true;
+ usleep(kCallbackSleepMicros);
+ // it is illegal to call stop() from the callback. It should
+ // return an error and not hang.
+ engine->mStopResult = AAudioStream_requestStop(stream);
+ engine->mCallbackFinished = true;
+ return engine->mCallbackResult;
+ }
+
+ static void s_myErrorCallbackProc(
+ AAudioStream * /* stream */,
+ void *userData,
+ aaudio_result_t error) {
+ AudioEngine *engine = (AudioEngine *)userData;
+ engine->mError = error;
+ }
+
+ AAudioStream* mStream = nullptr;
+
+ std::atomic<aaudio_result_t> mError{AAUDIO_OK}; // written by error callback
+ std::atomic<bool> mCallbackStarted{false}; // written by data callback
+ std::atomic<bool> mCallbackFinished{false}; // written by data callback
+ std::atomic<aaudio_data_callback_result_t> mCallbackResult{AAUDIO_CALLBACK_RESULT_CONTINUE};
+ std::atomic<aaudio_result_t> mStopResult{AAUDIO_OK};
+};
+
+/*********************************************************************/
+// Tell the callback to return AAUDIO_CALLBACK_RESULT_CONTINUE.
+
+TEST(test_close_timing, aaudio_close_joins_input_none) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_none) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_input_lowlat) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_lowlat) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+/*********************************************************************/
+// Tell the callback to return AAUDIO_CALLBACK_RESULT_STOP.
+
+TEST(test_close_timing, aaudio_close_joins_input_lowlat_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_lowlat_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_none_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_input_none_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
diff --git a/media/libaaudio/tests/test_disconnect_race.cpp b/media/libaaudio/tests/test_disconnect_race.cpp
new file mode 100644
index 0000000..6dbe165
--- /dev/null
+++ b/media/libaaudio/tests/test_disconnect_race.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test whether an error callback is joined before the close finishes.
+ *
+ * Start a stream with a callback.
+ * The callback just sleeps for a long time.
+ * While the callback is sleeping, close() the stream from the main thread.
+ * Then check to make sure the callback was joined before the close() returns.
+ *
+ * This can hang if there are deadlocks. So make sure you get a PASSED result.
+ */
+
+#include <atomic>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+// Sleep long enough that the foreground has a chance to call close.
+static constexpr int kCallbackSleepMillis = 1000;
+static constexpr int kPollSleepMillis = 100;
+
+static int sErrorCount = 0;
+
+#define MY_ASSERT_TRUE(statement) \
+ if (!(statement)) { \
+ printf("ERROR line:%d - " #statement "\n", __LINE__); \
+ sErrorCount++; \
+ return false; \
+ }
+
+#define MY_ASSERT_EQ(aa,bb) MY_ASSERT_TRUE(((aa) == (bb)))
+#define MY_ASSERT_NE(aa,bb) MY_ASSERT_TRUE(((aa) != (bb)))
+
+class AudioEngine {
+public:
+
+ // Check for a crash or late callback if we close without stopping.
+ bool checkCloseJoins(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode,
+ bool callStopFromCallback) {
+ mCallStopFromCallback = callStopFromCallback;
+
+ if (!startStreamForStall(direction, perfMode)) return false;
+
+ printf("--------------------------------------------------------\n");
+ printf("%s() - direction = %d, perfMode = %d, callStop = %d\n",
+ __func__, direction, perfMode, callStopFromCallback);
+
+ // When the callback starts it will go to sleep.
+ if (!waitForCallbackToStart()) return false;
+
+ printf("call AAudioStream_close()\n");
+ MY_ASSERT_TRUE(!mCallbackFinished); // Still sleeping?
+ aaudio_result_t result = AAudioStream_close(mStream); // May hang here!
+ if (mCallbackStarted) {
+ MY_ASSERT_TRUE(mCallbackFinished);
+ }
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+ printf("AAudioStream_close() returned %d\n", result);
+
+ MY_ASSERT_EQ(AAUDIO_ERROR_DISCONNECTED, mError.load());
+ if (mCallStopFromCallback) {
+ // Did calling stop() from callback fail? It should have.
+ MY_ASSERT_NE(AAUDIO_OK, mStopResult.load());
+ }
+
+ return true;
+ }
+
+private:
+ bool startStreamForStall(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode) {
+ AAudioStreamBuilder* builder = nullptr;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&builder);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(builder, direction);
+ AAudioStreamBuilder_setPerformanceMode(builder, perfMode);
+ AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+ AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(builder, &mStream);
+ AAudioStreamBuilder_delete(builder);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ // Check to see what kind of stream we actually got.
+ int32_t deviceId = AAudioStream_getDeviceId(mStream);
+ aaudio_performance_mode_t
+ actualPerfMode = AAudioStream_getPerformanceMode(mStream);
+ printf("-------- opened: deviceId = %3d, perfMode = %d\n",
+ deviceId,
+ actualPerfMode);
+
+ // Start stream.
+ result = AAudioStream_requestStart(mStream);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ return true;
+ }
+
+ bool waitForCallbackToStart() {
+ // Wait for callback to say it has been called.
+ int countDown = 10 * 1000 / kPollSleepMillis;
+ while (!mCallbackStarted && countDown > 0) {
+ if ((countDown % 5) == 0) {
+ printf("===== Please PLUG or UNPLUG headphones! ======= %d\n", countDown);
+ }
+ usleep(kPollSleepMillis * 1000);
+ countDown--;
+ }
+ MY_ASSERT_TRUE(countDown > 0);
+ MY_ASSERT_TRUE(mCallbackStarted);
+ return true;
+ }
+
+// Callback function that fills the audio output buffer.
+ static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream * /* stream */,
+ void * /* userData */,
+ void * /* audioData */,
+ int32_t /* numFrames */
+ ) {
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+ static void s_myErrorCallbackProc(
+ AAudioStream * stream,
+ void *userData,
+ aaudio_result_t error) {
+ AudioEngine *engine = (AudioEngine *)userData;
+ engine->mError = error;
+ engine->mCallbackStarted = true;
+ usleep(kCallbackSleepMillis * 1000);
+ // it is illegal to call stop() from the callback. It should
+ // return an error and not hang.
+ if (engine->mCallStopFromCallback) {
+ engine->mStopResult = AAudioStream_requestStop(stream);
+ }
+ engine->mCallbackFinished = true;
+ }
+
+ AAudioStream* mStream = nullptr;
+
+ std::atomic<aaudio_result_t> mError{AAUDIO_OK}; // written by error callback
+ std::atomic<bool> mCallStopFromCallback{false};
+ std::atomic<bool> mCallbackStarted{false}; // written by error callback
+ std::atomic<bool> mCallbackFinished{false}; // written by error callback
+ std::atomic<aaudio_result_t> mStopResult{AAUDIO_OK};
+};
+
+int main(int, char **) {
+ // Parameters to test.
+ static aaudio_direction_t directions[] = {AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_DIRECTION_INPUT};
+ static aaudio_performance_mode_t perfModes[] =
+ {AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_PERFORMANCE_MODE_NONE};
+ static bool callStops[] = { false, true };
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Disconnect Race V1.0\n");
+ printf("\n");
+
+ for (auto callStop : callStops) {
+ for (auto direction : directions) {
+ for (auto perfMode : perfModes) {
+ AudioEngine engine;
+ engine.checkCloseJoins(direction, perfMode, callStop);
+ }
+ }
+ }
+
+ printf("Error Count = %d, %s\n", sErrorCount,
+ ((sErrorCount == 0) ? "PASS" : "FAIL"));
+}
diff --git a/media/libaaudio/tests/test_stop_hang.cpp b/media/libaaudio/tests/test_stop_hang.cpp
deleted file mode 100644
index 982ff4a..0000000
--- a/media/libaaudio/tests/test_stop_hang.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Return stop from the callback
- * and then close the stream immediately.
- */
-
-#include <atomic>
-#include <mutex>
-#include <stdio.h>
-#include <thread>
-#include <unistd.h>
-
-#include <aaudio/AAudio.h>
-
-#define DURATION_SECONDS 5
-
-struct AudioEngine {
- AAudioStreamBuilder *builder = nullptr;
- AAudioStream *stream = nullptr;
- std::thread *thread = nullptr;
-
- std::atomic<bool> started{false};
- std::mutex doneLock; // Use a mutex so we can sleep on it while join()ing.
- std::atomic<bool> done{false};
-
- aaudio_result_t join() {
- aaudio_result_t result = AAUDIO_ERROR_INVALID_STATE;
- if (stream != nullptr) {
- while (true) {
- {
- // Will block if the thread is running.
- // This mutex is used to close() immediately after the callback returns
- // and before the requestStop_l() is called.
- std::lock_guard<std::mutex> lock(doneLock);
- if (done) break;
- }
- printf("join() got mutex but stream not done!");
- usleep(10 * 1000); // sleep then check again
- }
- result = AAudioStream_close(stream);
- stream = nullptr;
- }
- return result;
- }
-};
-
-// Callback function that fills the audio output buffer.
-static aaudio_data_callback_result_t s_myDataCallbackProc(
- AAudioStream *stream,
- void *userData,
- void *audioData,
- int32_t numFrames
-) {
- (void) stream;
- (void) audioData;
- (void) numFrames;
- AudioEngine *engine = (struct AudioEngine *)userData;
- std::lock_guard<std::mutex> lock(engine->doneLock);
- engine->started = true;
- usleep(DURATION_SECONDS * 1000 * 1000); // Mimic SynthMark procedure.
- engine->done = true;
- return AAUDIO_CALLBACK_RESULT_STOP;
-}
-
-static void s_myErrorCallbackProc(
- AAudioStream *stream __unused,
- void *userData __unused,
- aaudio_result_t error) {
- printf("%s() - error = %d\n", __func__, error);
-}
-
-static aaudio_result_t s_OpenAudioStream(struct AudioEngine *engine) {
- // Use an AAudioStreamBuilder to contain requested parameters.
- aaudio_result_t result = AAudio_createStreamBuilder(&engine->builder);
- if (result != AAUDIO_OK) {
- printf("AAudio_createStreamBuilder returned %s",
- AAudio_convertResultToText(result));
- return result;
- }
-
- // Request stream properties.
- AAudioStreamBuilder_setPerformanceMode(engine->builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
- AAudioStreamBuilder_setDataCallback(engine->builder, s_myDataCallbackProc, engine);
- AAudioStreamBuilder_setErrorCallback(engine->builder, s_myErrorCallbackProc, engine);
-
- // Create an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(engine->builder, &engine->stream);
- if (result != AAUDIO_OK) {
- printf("AAudioStreamBuilder_openStream returned %s",
- AAudio_convertResultToText(result));
- return result;
- }
-
- return result;
-}
-
-int main(int argc, char **argv) {
- (void) argc;
- (void) argv;
- struct AudioEngine engine;
- aaudio_result_t result = AAUDIO_OK;
- int errorCount = 0;
-
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
-
- printf("Test Return Stop Hang V1.0\n");
-
- result = s_OpenAudioStream(&engine);
- if (result != AAUDIO_OK) {
- printf("s_OpenAudioStream returned %s\n",
- AAudio_convertResultToText(result));
- errorCount++;
- }
-
- // Check to see what kind of stream we actually got.
- int32_t deviceId = AAudioStream_getDeviceId(engine.stream);
- aaudio_performance_mode_t actualPerfMode = AAudioStream_getPerformanceMode(engine.stream);
- printf("-------- opened: deviceId = %3d, perfMode = %d\n", deviceId, actualPerfMode);
-
- // Start stream.
- result = AAudioStream_requestStart(engine.stream);
- printf("AAudioStream_requestStart() returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
- if (result != AAUDIO_OK) {
- errorCount++;
- } else {
- int counter = 0;
- while (!engine.started) {
- printf("Waiting for stream to start, %d\n", counter++);
- usleep(5 * 1000);
- }
- printf("You should see more messages %d seconds after this. If not then the test failed!\n",
- DURATION_SECONDS);
- result = engine.join(); // This might hang!
- AAudioStreamBuilder_delete(engine.builder);
- engine.builder = nullptr;
- }
-
- printf("aaudio result = %d = %s\n", result, AAudio_convertResultToText(result));
- printf("test %s\n", errorCount ? "FAILED" : "PASSED");
-
- return errorCount ? EXIT_FAILURE : EXIT_SUCCESS;
-}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 64a335a..19d68a0 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -348,6 +348,7 @@
"aidl/android/media/AudioUniqueIdUse.aidl",
"aidl/android/media/AudioUsage.aidl",
"aidl/android/media/AudioUuid.aidl",
+ "aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index e15ef3d..1a4bde9 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -181,21 +181,9 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
.record();
+ stopAndJoinCallbacks(); // checks mStatus
+
if (mStatus == NO_ERROR) {
- // Make sure that callback function exits in the case where
- // it is looping on buffer empty condition in obtainBuffer().
- // Otherwise the callback thread will never exit.
- stop();
- if (mAudioRecordThread != 0) {
- mProxy->interrupt();
- mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
- mAudioRecordThread->requestExitAndWait();
- mAudioRecordThread.clear();
- }
- // No lock here: worst case we remove a NULL callback which will be a nop
- if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
- }
IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
mAudioRecord.clear();
mCblkMemory.clear();
@@ -208,6 +196,27 @@
}
}
+void AudioRecord::stopAndJoinCallbacks() {
+ // Prevent nullptr crash if it did not open properly.
+ if (mStatus != NO_ERROR) return;
+
+ // Make sure that callback function exits in the case where
+ // it is looping on buffer empty condition in obtainBuffer().
+ // Otherwise the callback thread will never exit.
+ stop();
+ if (mAudioRecordThread != 0) {
+ mProxy->interrupt();
+ mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
+ mAudioRecordThread->requestExitAndWait();
+ mAudioRecordThread.clear();
+ }
+ // No lock here: worst case we remove a NULL callback which will be a nop
+ if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+ // This may not stop all of these device callbacks!
+ // TODO: Add some sort of protection.
+ AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
+ }
+}
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
@@ -226,7 +235,8 @@
const audio_attributes_t* pAttributes,
audio_port_handle_t selectedDeviceId,
audio_microphone_direction_t selectedMicDirection,
- float microphoneFieldDimension)
+ float microphoneFieldDimension,
+ int32_t maxSharedAudioHistoryMs)
{
status_t status = NO_ERROR;
uint32_t channelCount;
@@ -259,6 +269,7 @@
mSelectedDeviceId = selectedDeviceId;
mSelectedMicDirection = selectedMicDirection;
mSelectedMicFieldDimension = microphoneFieldDimension;
+ mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -807,6 +818,7 @@
input.selectedDeviceId = mSelectedDeviceId;
input.sessionId = mSessionId;
originalSessionId = mSessionId;
+ input.maxSharedAudioHistoryMs = mMaxSharedAudioHistoryMs;
do {
media::CreateRecordResponse response;
@@ -828,7 +840,7 @@
usleep((20 + rand() % 30) * 10000);
} while (1);
- ALOG_ASSERT(record != 0);
+ ALOG_ASSERT(output.audioRecord != 0);
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
@@ -916,6 +928,10 @@
AudioSystem::addAudioDeviceCallback(this, output.inputId, output.portId);
}
+ if (!mSharedAudioPackageName.empty()) {
+ mAudioRecord->shareAudioHistory(mSharedAudioPackageName, mSharedAudioStartMs);
+ }
+
mPortId = output.portId;
// We retain a copy of the I/O handle, but don't own the reference
mInput = output.inputId;
@@ -1569,7 +1585,7 @@
void AudioRecord::setLogSessionId(const char *logSessionId)
{
- AutoMutex lock(mLock);
+ AutoMutex lock(mLock);
if (logSessionId == nullptr) logSessionId = ""; // an empty string is an unset session id.
if (mLogSessionId == logSessionId) return;
@@ -1580,6 +1596,22 @@
.record();
}
+status_t AudioRecord::shareAudioHistory(const std::string& sharedPackageName,
+ int64_t sharedStartMs)
+{
+ AutoMutex lock(mLock);
+ if (mAudioRecord == 0) {
+ return NO_INIT;
+ }
+ status_t status = statusTFromBinderStatus(
+ mAudioRecord->shareAudioHistory(sharedPackageName, sharedStartMs));
+ if (status == NO_ERROR) {
+ mSharedAudioPackageName = sharedPackageName;
+ mSharedAudioStartMs = sharedStartMs;
+ }
+ return status;
+}
+
// =========================================================================
void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f476b7d..0bc592d 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -2258,6 +2258,15 @@
return NO_ERROR;
}
+status_t AudioSystem::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->setVibratorInfos(vibratorInfos);
+}
+
// ---------------------------------------------------------------------------
int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6c9e85c..1bc3baa 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -327,21 +327,9 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
.record();
+ stopAndJoinCallbacks(); // checks mStatus
+
if (mStatus == NO_ERROR) {
- // Make sure that callback function exits in the case where
- // it is looping on buffer full condition in obtainBuffer().
- // Otherwise the callback thread will never exit.
- stop();
- if (mAudioTrackThread != 0) {
- mProxy->interrupt();
- mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
- mAudioTrackThread->requestExitAndWait();
- mAudioTrackThread.clear();
- }
- // No lock here: worst case we remove a NULL callback which will be a nop
- if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
- }
IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
mAudioTrack.clear();
mCblkMemory.clear();
@@ -355,6 +343,29 @@
}
}
+void AudioTrack::stopAndJoinCallbacks() {
+ // Prevent nullptr crash if it did not open properly.
+ if (mStatus != NO_ERROR) return;
+
+ // Make sure that callback function exits in the case where
+ // it is looping on buffer full condition in obtainBuffer().
+ // Otherwise the callback thread will never exit.
+ stop();
+ if (mAudioTrackThread != 0) { // not thread safe
+ mProxy->interrupt();
+ mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
+ mAudioTrackThread->requestExitAndWait();
+ mAudioTrackThread.clear();
+ }
+ // No lock here: worst case we remove a NULL callback which will be a nop
+ if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+ // This may not stop all of these device callbacks!
+ // TODO: Add some sort of protection.
+ AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ mDeviceCallback.clear();
+ }
+}
+
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 4103630..7656307 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -138,6 +138,8 @@
aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
+ aidl.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
+ convertIntegral<int32_t>(maxSharedAudioHistoryMs));
aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
@@ -151,10 +153,13 @@
IAudioFlinger::CreateRecordInput::fromAidl(
const media::CreateRecordRequest& aidl) {
IAudioFlinger::CreateRecordInput legacy;
- legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
+ legacy.attr = VALUE_OR_RETURN(
+ aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
+ legacy.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
+ convertIntegral<int32_t>(aidl.maxSharedAudioHistoryMs));
legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_input_flags_t_mask(aidl.flags));
legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
legacy.notificationFrameCount = VALUE_OR_RETURN(
@@ -733,6 +738,11 @@
return statusTFromBinderStatus(mDelegate->setAudioHalPids(pidsAidl));
}
+status_t AudioFlingerClientAdapter::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ return statusTFromBinderStatus(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
@@ -1174,4 +1184,9 @@
return Status::ok();
}
+Status AudioFlingerServerAdapter::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ return Status::fromStatusT(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
new file mode 100644
index 0000000..f88fc3c
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * {@hide}
+ * A class for vibrator information. The information will be used in HapticGenerator effect.
+ */
+parcelable AudioVibratorInfo {
+ int id;
+ float resonantFrequency;
+ float qFactor;
+}
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 62007da..5b26d22 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -34,6 +34,7 @@
AudioClient clientInfo;
/** Interpreted as audio_unique_id_t. */
int riid;
+ int maxSharedAudioHistoryMs;
/** Bitmask, indexed by AudioInputFlags. */
int flags;
long frameCount;
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index e63f391..abbced5 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -23,6 +23,7 @@
import android.media.AudioStreamType;
import android.media.AudioUniqueIdUse;
import android.media.AudioUuid;
+import android.media.AudioVibratorInfo;
import android.media.CreateEffectRequest;
import android.media.CreateEffectResponse;
import android.media.CreateRecordRequest;
@@ -202,4 +203,8 @@
MicrophoneInfoData[] getMicrophones();
void setAudioHalPids(in int[] /* pid_t[] */ pids);
+
+ // Set vibrators' information.
+ // The value will be used to initialize HapticGenerator.
+ void setVibratorInfos(in AudioVibratorInfo[] vibratorInfos);
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index 1772653..44ef80b 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -48,4 +48,6 @@
/* Set the microphone zoom (for processing).
*/
void setPreferredMicrophoneFieldDimension(float zoom);
+
+ void shareAudioHistory(@utf8InCpp String sharedAudioPackageName, long sharedAudioStartMs);
}
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 82a29d4..9965e25 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -241,7 +241,8 @@
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
audio_microphone_direction_t
selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
- float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
+ int32_t maxSharedAudioHistoryMs = 0);
/* Result of constructing the AudioRecord. This must be checked for successful initialization
* before using any AudioRecord API (except for set()), because using
@@ -303,6 +304,19 @@
void stop();
bool stopped() const;
+ /* Calls stop() and then wait for all of the callbacks to return.
+ * It is safe to call this if stop() or pause() has already been called.
+ *
+ * This function is called from the destructor. But since AudioRecord
+ * is ref counted, the destructor may be called later than desired.
+ * This can be called explicitly as part of closing an AudioRecord
+ * if you want to be certain that callbacks have completely finished.
+ *
+ * This is not thread safe and should only be called from one thread,
+ * ideally as the AudioRecord is being closed.
+ */
+ void stopAndJoinCallbacks();
+
/* Return the sink sample rate for this record track in Hz.
* If specified as zero in constructor or set(), this will be the source sample rate.
* Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
@@ -583,6 +597,10 @@
*/
void setLogSessionId(const char *logSessionId);
+
+ status_t shareAudioHistory(const std::string& sharedPackageName,
+ int64_t sharedStartMs);
+
/*
* Dumps the state of an audio record.
*/
@@ -766,6 +784,10 @@
audio_microphone_direction_t mSelectedMicDirection;
float mSelectedMicFieldDimension;
+ int32_t mMaxSharedAudioHistoryMs = 0;
+ std::string mSharedAudioPackageName = {};
+ int64_t mSharedAudioStartMs = 0;
+
private:
class MediaMetrics {
public:
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index c63d29f..4c99dbd 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,9 +19,10 @@
#include <sys/types.h>
-#include <android/media/permission/Identity.h>
+#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerClient.h>
#include <android/media/BnAudioPolicyServiceClient.h>
+#include <android/media/permission/Identity.h>
#include <media/AidlConversionUtil.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
@@ -553,6 +554,8 @@
static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+ static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index d167c40..c293343 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -479,6 +479,19 @@
void stop();
bool stopped() const;
+ /* Call stop() and then wait for all of the callbacks to return.
+ * It is safe to call this if stop() or pause() has already been called.
+ *
+ * This function is called from the destructor. But since AudioTrack
+ * is ref counted, the destructor may be called later than desired.
+ * This can be called explicitly as part of closing an AudioTrack
+ * if you want to be certain that callbacks have completely finished.
+ *
+ * This is not thread safe and should only be called from one thread,
+ * ideally as the AudioTrack is being closed.
+ */
+ void stopAndJoinCallbacks();
+
/* Flush a stopped or paused track. All previously buffered data is discarded immediately.
* This has the effect of draining the buffers without mixing or output.
* Flush is intended for streaming mode, for example before switching to non-contiguous content.
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index efd7fed..3a5d164 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -35,6 +35,7 @@
#include <string>
#include <vector>
+#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerService.h>
#include <android/media/BpAudioFlingerService.h>
#include <android/media/permission/Identity.h>
@@ -129,6 +130,7 @@
AudioClient clientInfo;
media::permission::Identity identity;
audio_unique_id_t riid;
+ int32_t maxSharedAudioHistoryMs;
/* input/output */
audio_input_flags_t flags;
@@ -331,6 +333,11 @@
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids) = 0;
+
+ // Set vibrators' information.
+ // The values will be used to initialize HapticGenerator.
+ virtual status_t setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) = 0;
};
/**
@@ -422,6 +429,7 @@
size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
+ status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -504,6 +512,7 @@
GET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_getMasterBalance,
SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
+ SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
};
/**
@@ -605,6 +614,7 @@
Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
+ Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 03a0d86..ca4f663 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -354,7 +354,8 @@
return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
}
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+template <typename HalPort>
+status_t DeviceHalHidl::getAudioPortImpl(HalPort *port) {
if (mDevice == 0) return NO_INIT;
AudioPort hidlPort;
HidlUtils::audioPortFromHal(*port, &hidlPort);
@@ -370,31 +371,28 @@
return processReturn("getAudioPort", ret, retval);
}
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+ return getAudioPortImpl(port);
+}
+
status_t DeviceHalHidl::getAudioPort(struct audio_port_v7 *port) {
- if (mDevice == 0) return NO_INIT;
- status_t status = NO_ERROR;
#if MAJOR_VERSION >= 7
- AudioPort hidlPort;
- HidlUtils::audioPortFromHal(*port, &hidlPort);
- Result retval;
- Return<void> ret = mDevice->getAudioPort(
- hidlPort,
- [&](Result r, const AudioPort& p) {
- retval = r;
- if (retval == Result::OK) {
- HidlUtils::audioPortToHal(p, port);
- }
- });
- status = processReturn("getAudioPort", ret, retval);
+ return getAudioPortImpl(port);
#else
struct audio_port audioPort = {};
- audio_populate_audio_port(port, &audioPort);
- status = getAudioPort(&audioPort);
+ status_t result = NO_ERROR;
+ if (!audio_populate_audio_port(port, &audioPort)) {
+ ALOGE("Failed to populate legacy audio port from audio_port_v7");
+ result = BAD_VALUE;
+ }
+ status_t status = getAudioPort(&audioPort);
if (status == NO_ERROR) {
audio_populate_audio_port_v7(&audioPort, port);
+ } else {
+ result = status;
}
+ return result;
#endif
- return status;
}
status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index abd4ad5..2c847cf 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -131,6 +131,8 @@
// The destructor automatically closes the device.
virtual ~DeviceHalHidl();
+
+ template <typename HalPort> status_t getAudioPortImpl(HalPort *port);
};
} // namespace CPP_VERSION
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index aa9e477..af7dc1a 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -181,6 +181,12 @@
}
status_t DeviceHalLocal::getAudioPort(struct audio_port_v7 *port) {
+#if MAJOR_VERSION >= 7
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_2) {
+ // get_audio_port_v7 is mandatory if legacy HAL support this API version.
+ return mDev->get_audio_port_v7(mDev, port);
+ }
+#endif
struct audio_port audioPort = {};
audio_populate_audio_port(port, &audioPort);
status_t status = getAudioPort(&audioPort);
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 2a3e2b6..539a149 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -57,8 +57,7 @@
// Note: This assumes channel mask, format, and sample rate do not change after creation.
audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
if (/* mStreamPowerLog.isUserDebugOrEngBuild() && */
- StreamHalHidl::getAudioProperties(
- &config.sample_rate, &config.channel_mask, &config.format) == NO_ERROR) {
+ StreamHalHidl::getAudioProperties(&config) == NO_ERROR) {
mStreamPowerLog.init(config.sample_rate, config.channel_mask, config.format);
}
}
@@ -69,14 +68,6 @@
hardware::IPCThreadState::self()->flushCommands();
}
-// Note: this method will be removed
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *rate = config.sample_rate;
- return status;
-}
-
status_t StreamHalHidl::getBufferSize(size_t *size) {
if (!mStream) return NO_INIT;
status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
@@ -86,48 +77,28 @@
return status;
}
-// Note: this method will be removed
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *mask = config.channel_mask;
- return status;
-}
-
-// Note: this method will be removed
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *format = config.format;
- return status;
-}
-
-status_t StreamHalHidl::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+status_t StreamHalHidl::getAudioProperties(audio_config_base_t *configBase) {
+ *configBase = AUDIO_CONFIG_BASE_INITIALIZER;
if (!mStream) return NO_INIT;
#if MAJOR_VERSION <= 6
Return<void> ret = mStream->getAudioProperties(
[&](uint32_t sr, auto m, auto f) {
- *sampleRate = sr;
- *mask = static_cast<audio_channel_mask_t>(m);
- *format = static_cast<audio_format_t>(f);
+ configBase->sample_rate = sr;
+ configBase->channel_mask = static_cast<audio_channel_mask_t>(m);
+ configBase->format = static_cast<audio_format_t>(f);
});
return processReturn("getAudioProperties", ret);
#else
Result retval;
status_t conversionStatus = BAD_VALUE;
- audio_config_base_t halConfig = AUDIO_CONFIG_BASE_INITIALIZER;
Return<void> ret = mStream->getAudioProperties(
[&](Result r, const AudioConfigBase& config) {
retval = r;
if (retval == Result::OK) {
- conversionStatus = HidlUtils::audioConfigBaseToHal(config, &halConfig);
+ conversionStatus = HidlUtils::audioConfigBaseToHal(config, configBase);
}
});
if (status_t status = processReturn("getAudioProperties", ret, retval); status == NO_ERROR) {
- *sampleRate = halConfig.sample_rate;
- *mask = halConfig.channel_mask;
- *format = halConfig.format;
return conversionStatus;
} else {
return status;
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index c6db6d6..970903b 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -49,21 +49,14 @@
class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
{
public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size);
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase);
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index e89b288..d0c375e 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -45,31 +45,15 @@
mDevice.clear();
}
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
- *rate = mStream->get_sample_rate(mStream);
- return OK;
-}
-
status_t StreamHalLocal::getBufferSize(size_t *size) {
*size = mStream->get_buffer_size(mStream);
return OK;
}
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
- *mask = mStream->get_channels(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- *sampleRate = mStream->get_sample_rate(mStream);
- *mask = mStream->get_channels(mStream);
- *format = mStream->get_format(mStream);
+status_t StreamHalLocal::getAudioProperties(audio_config_base_t *configBase) {
+ configBase->sample_rate = mStream->get_sample_rate(mStream);
+ configBase->channel_mask = mStream->get_channels(mStream);
+ configBase->format = mStream->get_format(mStream);
return OK;
}
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index e228104..b260495 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -28,21 +28,14 @@
class StreamHalLocal : public virtual StreamHalInterface
{
public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size);
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase);
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index b47f536..2be12fb 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -31,25 +31,27 @@
class StreamHalInterface : public virtual RefBase
{
public:
- // TODO(mnaganov): Remove
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate) = 0;
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size) = 0;
- // TODO(mnaganov): Remove
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase) = 0;
- // TODO(mnaganov): Remove
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format) = 0;
-
- // TODO(mnaganov): Change to use audio_config_base_t
// Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
+ inline status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ const status_t result = getAudioProperties(&config);
+ if (result == NO_ERROR) {
+ if (sampleRate != nullptr) *sampleRate = config.sample_rate;
+ if (mask != nullptr) *mask = config.channel_mask;
+ if (format != nullptr) *format = config.format;
+ }
+ return result;
+ }
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs) = 0;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index f2245b1..65a20a7 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -26,11 +26,16 @@
#include <errno.h>
#include <inttypes.h>
+#include <math.h>
#include <audio_effects/effect_hapticgenerator.h>
#include <audio_utils/format.h>
#include <system/audio.h>
+static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
+static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
+static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+
// This is the only symbol that needs to be exported
__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
@@ -101,11 +106,11 @@
context->param.audioChannelCount = 0;
context->param.maxHapticIntensity = os::HapticScale::MUTE;
- context->param.resonantFrequency = 150.0f;
+ context->param.resonantFrequency = DEFAULT_RESONANT_FREQUENCY;
context->param.bpfQ = 1.0f;
context->param.slowEnvNormalizationPower = -0.8f;
- context->param.bsfZeroQ = 8.0f;
- context->param.bsfPoleQ = 4.0f;
+ context->param.bsfZeroQ = DEFAULT_BSF_ZERO_Q;
+ context->param.bsfPoleQ = DEFAULT_BSF_POLE_Q;
context->param.distortionCornerFrequency = 300.0f;
context->param.distortionInputGain = 0.3f;
context->param.distortionCubeThreshold = 0.1f;
@@ -173,6 +178,7 @@
addBiquadFilter(processingChain, processorsRecord, lpf);
auto bpf = createBPF(param->resonantFrequency, param->bpfQ, sampleRate, channelCount);
+ processorsRecord.bpf = bpf;
addBiquadFilter(processingChain, processorsRecord, bpf);
float normalizationPower = param->slowEnvNormalizationPower;
@@ -191,6 +197,7 @@
auto bsf = createBSF(
param->resonantFrequency, param->bsfZeroQ, param->bsfPoleQ, sampleRate, channelCount);
+ processorsRecord.bsf = bsf;
addBiquadFilter(processingChain, processorsRecord, bsf);
// The process chain captures the shared pointer of the Distortion in lambda. It will
@@ -279,7 +286,32 @@
}
break;
}
+ case HG_PARAM_VIBRATOR_INFO: {
+ if (value == nullptr || size != 2 * sizeof(float)) {
+ return -EINVAL;
+ }
+ const float resonantFrequency = *(float*) value;
+ const float qFactor = *((float *) value + 1);
+ context->param.resonantFrequency =
+ isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
+ context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
+ context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+ if (context->processorsRecord.bpf != nullptr) {
+ context->processorsRecord.bpf->setCoefficients(
+ bpfCoefs(context->param.resonantFrequency,
+ context->param.bpfQ,
+ context->config.inputCfg.samplingRate));
+ }
+ if (context->processorsRecord.bsf != nullptr) {
+ context->processorsRecord.bsf->setCoefficients(
+ bsfCoefs(context->param.resonantFrequency,
+ context->param.bsfZeroQ,
+ context->param.bsfPoleQ,
+ context->config.inputCfg.samplingRate));
+ }
+ HapticGenerator_Reset(context);
+ } break;
default:
ALOGW("Unknown param: %d", param);
return -EINVAL;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index d2d7afe..96b744a 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -69,6 +69,11 @@
std::vector<std::shared_ptr<Ramp>> ramps;
std::vector<std::shared_ptr<SlowEnvelope>> slowEnvs;
std::vector<std::shared_ptr<Distortion>> distortions;
+
+ // Cache band-pass filter and band-stop filter for updating parameters
+ // according to vibrator info
+ std::shared_ptr<HapticBiquadFilter> bpf;
+ std::shared_ptr<HapticBiquadFilter> bsf;
};
// A structure to keep all the context for HapticGenerator.
diff --git a/media/libeffects/hapticgenerator/Processors.cpp b/media/libeffects/hapticgenerator/Processors.cpp
index 79a4e2c..4fe3a75 100644
--- a/media/libeffects/hapticgenerator/Processors.cpp
+++ b/media/libeffects/hapticgenerator/Processors.cpp
@@ -211,9 +211,9 @@
}
BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
- const float sampleRate,
const float zq,
- const float pq) {
+ const float pq,
+ const float sampleRate) {
BiquadFilterCoefficients coefficient;
const auto [zeroReal, zeroImg] = getComplexPoleZ(ringingFrequency, zq, sampleRate);
float zeroCoeff1 = -2 * zeroReal;
@@ -275,7 +275,7 @@
const float pq,
const float sampleRate,
const size_t channelCount) {
- BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, sampleRate, zq, pq);
+ BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, zq, pq, sampleRate);
return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
}
diff --git a/media/libeffects/hapticgenerator/Processors.h b/media/libeffects/hapticgenerator/Processors.h
index 452a985..74ca77d 100644
--- a/media/libeffects/hapticgenerator/Processors.h
+++ b/media/libeffects/hapticgenerator/Processors.h
@@ -102,9 +102,9 @@
const float sampleRate);
BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
- const float sampleRate,
const float zq,
- const float pq);
+ const float pq,
+ const float sampleRate);
std::shared_ptr<HapticBiquadFilter> createLPF(const float cornerFrequency,
const float sampleRate,
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 5d75055..7998879 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -63,7 +63,6 @@
"Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp",
"Common/src/Copy_16.cpp",
"Common/src/MonoTo2I_32.cpp",
- "Common/src/LoadConst_32.cpp",
"Common/src/dB_to_Lin32.cpp",
"Common/src/Shift_Sat_v16xv16.cpp",
"Common/src/Shift_Sat_v32xv32.cpp",
@@ -148,7 +147,6 @@
"Reverb/src/LVREV_Process.cpp",
"Reverb/src/LVREV_SetControlParameters.cpp",
"Reverb/src/LVREV_Tables.cpp",
- "Common/src/LoadConst_32.cpp",
"Common/src/From2iToMono_32.cpp",
"Common/src/Mult3s_32x16.cpp",
"Common/src/Copy_16.cpp",
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
index 9f5f448..12b86f3 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
@@ -137,9 +137,9 @@
pInstance->pBufferManagement->pScratch = (LVM_FLOAT*)pInstance->pScratch;
- LoadConst_Float(0, /* Clear the input delay buffer */
- (LVM_FLOAT*)&pInstance->pBufferManagement->InDelayBuffer,
- (LVM_INT16)(LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE));
+ memset(pInstance->pBufferManagement->InDelayBuffer, 0,
+ LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE *
+ sizeof(pInstance->pBufferManagement->InDelayBuffer[0]));
pInstance->pBufferManagement->InDelaySamples =
MIN_INTERNAL_BLOCKSIZE; /* Set the number of delay samples */
pInstance->pBufferManagement->OutDelaySamples = 0; /* No samples in the output buffer */
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
index 20058a1..4eea04f 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
@@ -23,6 +23,7 @@
#include <system/audio.h>
#include "LVM_Private.h"
+#include "ScalarArithmetic.h"
#include "VectorArithmetic.h"
#include "LVM_Coeffs.h"
@@ -178,6 +179,9 @@
* Apply the filter
*/
pInstance->pTEBiquad->process(pProcessed, pProcessed, NrFrames);
+ for (auto i = 0; i < NrChannels * NrFrames; i++) {
+ pProcessed[i] = LVM_Clamp(pProcessed[i]);
+ }
}
/*
* Volume balance
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 18de85b..10f351e 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -24,8 +24,6 @@
VARIOUS FUNCTIONS
***********************************************************************************/
-void LoadConst_Float(const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n);
-
void Copy_Float(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 n);
void Copy_Float_Mc_Stereo(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
index be19fa0..5a67bda 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
@@ -19,6 +19,7 @@
INCLUDE FILES
***********************************************************************************/
+#include <string.h>
#include "LVC_Mixer_Private.h"
#include "VectorArithmetic.h"
#include "ScalarArithmetic.h"
@@ -68,7 +69,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0.0, dst, n);
+ memset(dst, 0, n * sizeof(*dst));
else {
if ((pInstance->Target) != 1.0f)
Mult3s_Float(src, (pInstance->Target), dst, n);
@@ -150,7 +151,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0.0, dst, NrFrames * NrChannels);
+ memset(dst, 0, NrFrames * NrChannels * sizeof(*dst));
else {
if ((pInstance->Target) != 1.0f)
Mult3s_Float(src, (pInstance->Target), dst, NrFrames * NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp b/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp
deleted file mode 100644
index df7a558..0000000
--- a/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2004-2010 NXP Software
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**********************************************************************************
- INCLUDE FILES
-***********************************************************************************/
-
-#include "VectorArithmetic.h"
-
-/**********************************************************************************
- FUNCTION LoadConst_32
-***********************************************************************************/
-void LoadConst_Float(const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n) {
- LVM_INT16 ii;
-
- for (ii = n; ii != 0; ii--) {
- *dst = val;
- dst++;
- }
-
- return;
-}
-
-/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp b/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
index 8408962..58a9102 100644
--- a/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
+++ b/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
@@ -19,6 +19,7 @@
INCLUDE FILES
***********************************************************************************/
+#include <string.h>
#include "Mixer_private.h"
#include "VectorArithmetic.h"
@@ -61,7 +62,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0, dst, n);
+ memset(dst, 0, n * sizeof(*dst));
else if ((pInstance->Target) == 1.0f) {
if (src != dst) Copy_Float((LVM_FLOAT*)src, (LVM_FLOAT*)dst, (LVM_INT16)(n));
} else
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp b/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
index d4b321f..be3505f 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
@@ -60,7 +60,8 @@
pLVREV_Private->pRevLPFBiquad->clear();
for (size_t i = 0; i < pLVREV_Private->InstanceParams.NumDelays; i++) {
pLVREV_Private->revLPFBiquad[i]->clear();
- LoadConst_Float(0, pLVREV_Private->pDelay_T[i], LVREV_MAX_T_DELAY[i]);
+ memset(pLVREV_Private->pDelay_T[i], 0, LVREV_MAX_T_DELAY[i] *
+ sizeof(pLVREV_Private->pDelay_T[i][0]));
}
return LVREV_SUCCESS;
}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
index c5b6598..de23d07 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
@@ -81,10 +81,7 @@
pConfig->DelaySize =
(pParams->NrChannels == FCC_1) ? (LVM_INT16)Delay : (LVM_INT16)(FCC_2 * Delay);
pConfig->DelayOffset = 0;
- LoadConst_Float(0, /* Value */
- (LVM_FLOAT*)&pConfig->StereoSamples[0], /* Destination */
- /* Number of words */
- (LVM_UINT16)(sizeof(pConfig->StereoSamples) / sizeof(LVM_FLOAT)));
+ memset(pConfig->StereoSamples, 0, sizeof(pConfig->StereoSamples));
/*
* Setup the filters
*/
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index df7ca5a..7571a24 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -53,16 +53,16 @@
flags_arr=(
"-csE"
"-eqE"
- "-tE"
- "-csE -tE -eqE"
+ "-tE -trebleLvl:15"
+ "-csE -tE -trebleLvl:15 -eqE"
"-bE -M"
- "-csE -tE"
- "-csE -eqE" "-tE -eqE"
- "-csE -tE -bE -M -eqE"
- "-tE -eqE -vcBal:96 -M"
- "-tE -eqE -vcBal:-96 -M"
- "-tE -eqE -vcBal:0 -M"
- "-tE -eqE -bE -vcBal:30 -M"
+ "-csE -tE -trebleLvl:15"
+ "-csE -eqE" "-tE -trebleLvl:15 -eqE"
+ "-csE -tE -trebleLvl:15 -bE -M -eqE"
+ "-tE -trebleLvl:15 -eqE -vcBal:96 -M"
+ "-tE -trebleLvl:15 -eqE -vcBal:-96 -M"
+ "-tE -trebleLvl:15 -eqE -vcBal:0 -M"
+ "-tE -trebleLvl:15 -eqE -bE -vcBal:30 -M"
)
fs_arr=(
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index e484a1a..e65228c 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -79,6 +79,7 @@
int bassEffectLevel = 0;
int eqPresetLevel = 0;
int frameLength = 256;
+ int trebleEffectLevel = 0;
LVM_BE_Mode_en bassEnable = LVM_BE_OFF;
LVM_TE_Mode_en trebleEnable = LVM_TE_OFF;
LVM_EQNB_Mode_en eqEnable = LVM_EQNB_OFF;
@@ -303,10 +304,6 @@
params->PSA_Enable = LVM_PSA_OFF;
params->PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
- /* TE Control parameters */
- params->TE_OperatingMode = LVM_TE_OFF;
- params->TE_EffectLevel = 0;
-
/* Activate the initial settings */
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
@@ -445,6 +442,7 @@
/* Treble Enhancement parameters */
params->TE_OperatingMode = plvmConfigParams->trebleEnable;
+ params->TE_EffectLevel = plvmConfigParams->trebleEffectLevel;
/* PSA Control parameters */
params->PSA_Enable = LVM_PSA_ON;
@@ -604,6 +602,15 @@
return -1;
}
lvmConfigParams.eqPresetLevel = eqPresetLevel;
+ } else if (!strncmp(argv[i], "-trebleLvl:", 11)) {
+ const int trebleEffectLevel = atoi(argv[i] + 11);
+ if (trebleEffectLevel > LVM_TE_MAX_EFFECTLEVEL ||
+ trebleEffectLevel < LVM_TE_MIN_EFFECTLEVEL) {
+ printf("Error: Unsupported Treble Effect Level : %d\n", trebleEffectLevel);
+ printUsage();
+ return -1;
+ }
+ lvmConfigParams.trebleEffectLevel = trebleEffectLevel;
} else if (!strcmp(argv[i], "-bE")) {
lvmConfigParams.bassEnable = LVM_BE_ON;
} else if (!strcmp(argv[i], "-eqE")) {
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index b2056ad..e471c7b 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -45,6 +45,12 @@
enabled: false,
},
},
+ header_libs: [
+ "libbinder_headers",
+ ],
+ export_header_lib_headers: [
+ "libbinder_headers",
+ ],
apex_available: [
"//apex_available:platform",
"com.android.media",
diff --git a/media/libmedia/tests/fuzzer/Android.bp b/media/libmedia/tests/fuzzer/Android.bp
index e58c396..c03b5b1 100644
--- a/media/libmedia/tests/fuzzer/Android.bp
+++ b/media/libmedia/tests/fuzzer/Android.bp
@@ -1,3 +1,12 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_media_libmedia_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_media_libmedia_license"],
+}
+
cc_fuzz {
name: "libmedia_metadata_fuzzer",
srcs: [
@@ -7,4 +16,4 @@
"libmedia",
"libbinder",
],
-}
\ No newline at end of file
+}
diff --git a/media/libmediaformatshaper/Android.bp b/media/libmediaformatshaper/Android.bp
index 3107e12..bdd1465 100644
--- a/media/libmediaformatshaper/Android.bp
+++ b/media/libmediaformatshaper/Android.bp
@@ -56,6 +56,10 @@
"include",
],
+ header_libs: [
+ "libstagefright_headers",
+ ],
+
shared_libs: [
"liblog",
"libutils",
diff --git a/media/libmediaformatshaper/CodecProperties.cpp b/media/libmediaformatshaper/CodecProperties.cpp
index d733c57..315b3ec 100644
--- a/media/libmediaformatshaper/CodecProperties.cpp
+++ b/media/libmediaformatshaper/CodecProperties.cpp
@@ -19,8 +19,15 @@
#include <utils/Log.h>
#include <string>
+#include <stdlib.h>
-#include <media/formatshaper/CodecProperties.h>
+#include "CodecProperties.h"
+
+#include <media/stagefright/MediaCodecConstants.h>
+
+
+// we aren't going to mess with shaping points dimensions beyond this
+static const int32_t DIMENSION_LIMIT = 16384;
namespace android {
namespace mediaformatshaper {
@@ -63,17 +70,12 @@
ALOGD("setFeatureValue(%s,%d)", key.c_str(), value);
mFeatures.insert({key, value});
- if (!strcmp(key.c_str(), "vq-minimum-quality")) {
- setSupportedMinimumQuality(value);
- } else if (!strcmp(key.c_str(), "vq-supports-qp")) { // key from prototyping
+ if (!strcmp(key.c_str(), FEATURE_QpBounds)) {
setSupportsQp(1);
- } else if (!strcmp(key.c_str(), "qp-bounds")) { // official key
- setSupportsQp(1);
- } else if (!strcmp(key.c_str(), "vq-target-qpmax")) {
- setTargetQpMax(value);
- } else if (!strcmp(key.c_str(), "vq-target-bppx100")) {
- double bpp = value / 100.0;
- setBpp(bpp);
+ } else if (!strcmp(key.c_str(), "video-minimum-quality")) {
+ setSupportedMinimumQuality(1);
+ } else if (!strcmp(key.c_str(), "vq-minimum-quality")) { // from prototyping
+ setSupportedMinimumQuality(1);
}
}
@@ -90,6 +92,182 @@
return false;
}
+// Tuning values (which differ from Features)
+// this is where we set up things like target bitrates and QP ranges
+// NB the tuning values arrive as a string, allowing us to convert it into an appropriate
+// format (int, float, ranges, other combinations)
+//
+void CodecProperties::setTuningValue(std::string key, std::string value) {
+ ALOGD("setTuningValue(%s,%s)", key.c_str(), value.c_str());
+ mTunings.insert({key, value});
+
+ bool legal = false;
+ // NB: old school strtol() because std::stoi() throws exceptions
+ if (!strcmp(key.c_str(), "vq-target-qpmax")) {
+ const char *p = value.c_str();
+ char *q;
+ int32_t iValue = strtol(p, &q, 0);
+ if (q != p) {
+ setTargetQpMax(iValue);
+ legal = true;
+ }
+ } else if (!strcmp(key.c_str(), "vq-target-bpp")) {
+ const char *p = value.c_str();
+ char *q;
+ double bpp = strtod(p, &q);
+ if (q != p) {
+ setBpp(bpp);
+ legal = true;
+ }
+ } else if (!strncmp(key.c_str(), "vq-target-bpp-", strlen("vq-target-bpp-"))) {
+ std::string resolution = key.substr(strlen("vq-target-bpp-"));
+ if (bppPoint(resolution, value)) {
+ legal = true;
+ }
+ } else if (!strcmp(key.c_str(), "vq-target-bppx100")) {
+ // legacy, prototyping
+ const char *p = value.c_str();
+ char *q;
+ int32_t iValue = strtol(p, &q, 0);
+ if (q != p) {
+ double bpp = iValue / 100.0;
+ setBpp(bpp);
+ legal = true;
+ }
+ } else {
+ legal = true;
+ }
+
+ if (!legal) {
+ ALOGW("setTuningValue() unable to apply tuning '%s' with value '%s'",
+ key.c_str(), value.c_str());
+ }
+ return;
+}
+
+bool CodecProperties::getTuningValue(std::string key, std::string &value) {
+ ALOGV("getTuningValue(%s)", key.c_str());
+ auto mapped = mFeatures.find(key);
+ if (mapped != mFeatures.end()) {
+ value = mapped->second;
+ return true;
+ }
+ return false;
+}
+
+bool CodecProperties::bppPoint(std::string resolution, std::string value) {
+
+ int32_t width = 0;
+ int32_t height = 0;
+ double bpp = -1;
+
+ // resolution is "WxH", "W*H" or a standard name like "720p"
+ if (resolution == "1080p") {
+ width = 1080; height = 1920;
+ } else if (resolution == "720p") {
+ width = 720; height = 1280;
+ } else if (resolution == "540p") {
+ width = 540; height = 960;
+ } else if (resolution == "480p") {
+ width = 480; height = 854;
+ } else {
+ size_t sep = resolution.find('x');
+ if (sep == std::string::npos) {
+ sep = resolution.find('*');
+ }
+ if (sep == std::string::npos) {
+ ALOGW("unable to parse resolution: '%s'", resolution.c_str());
+ return false;
+ }
+ std::string w = resolution.substr(0, sep);
+ std::string h = resolution.substr(sep+1);
+
+ char *q;
+ const char *p = w.c_str();
+ width = strtol(p, &q, 0);
+ if (q == p) {
+ width = -1;
+ }
+ p = h.c_str();
+ height = strtol(p, &q, 0);
+ if (q == p) {
+ height = -1;
+ }
+ if (width <= 0 || height <= 0 || width > DIMENSION_LIMIT || height > DIMENSION_LIMIT) {
+ ALOGW("unparseable: width, height '%s'", resolution.c_str());
+ return false;
+ }
+ }
+
+ const char *p = value.c_str();
+ char *q;
+ bpp = strtod(p, &q);
+ if (q == p) {
+ ALOGW("unparseable bpp '%s'", value.c_str());
+ return false;
+ }
+
+ struct bpp_point *point = (struct bpp_point*) malloc(sizeof(*point));
+ if (point == nullptr) {
+ ALOGW("unable to allocate memory for bpp point");
+ return false;
+ }
+
+ point->pixels = width * height;
+ point->width = width;
+ point->height = height;
+ point->bpp = bpp;
+
+ if (mBppPoints == nullptr) {
+ point->next = nullptr;
+ mBppPoints = point;
+ } else if (point->pixels < mBppPoints->pixels) {
+ // at the front
+ point->next = mBppPoints;
+ mBppPoints = point;
+ } else {
+ struct bpp_point *after = mBppPoints;
+ while (after->next) {
+ if (point->pixels > after->next->pixels) {
+ after = after->next;
+ continue;
+ }
+
+ // insert before after->next
+ point->next = after->next;
+ after->next = point;
+ break;
+ }
+ if (after->next == nullptr) {
+ // hasn't gone in yet
+ point->next = nullptr;
+ after->next = point;
+ }
+ }
+
+ return true;
+}
+
+double CodecProperties::getBpp(int32_t width, int32_t height) {
+ // look in the per-resolution list
+
+ int32_t pixels = width * height;
+
+ if (mBppPoints) {
+ struct bpp_point *point = mBppPoints;
+ while (point && point->pixels < pixels) {
+ point = point->next;
+ }
+ if (point) {
+ ALOGV("getBpp(w=%d,h=%d) returns %f from bpppoint w=%d h=%d",
+ width, height, point->bpp, point->width, point->height);
+ return point->bpp;
+ }
+ }
+
+ ALOGV("defaulting to %f bpp", mBpp);
+ return mBpp;
+}
std::string CodecProperties::getMapping(std::string key, std::string kind) {
ALOGV("getMapping(key %s, kind %s )", key.c_str(), kind.c_str());
diff --git a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h b/media/libmediaformatshaper/CodecProperties.h
similarity index 81%
rename from media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
rename to media/libmediaformatshaper/CodecProperties.h
index e5cc9cf..ff7051f 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
+++ b/media/libmediaformatshaper/CodecProperties.h
@@ -21,6 +21,8 @@
#include <mutex>
#include <string>
+#include <inttypes.h>
+
#include <utils/RefBase.h>
namespace android {
@@ -56,6 +58,10 @@
void setFeatureValue(std::string key, int32_t value);
bool getFeatureValue(std::string key, int32_t *valuep);
+ // keep a map of all tunings and their parameters
+ void setTuningValue(std::string key, std::string value);
+ bool getTuningValue(std::string key, std::string &value);
+
// does the codec support the Android S minimum quality rules
void setSupportedMinimumQuality(int vmaf);
int supportedMinimumQuality();
@@ -69,7 +75,7 @@
// This is used to calculate a minimum bitrate for any particular resolution.
// A 1080p (1920*1080 = 2073600 pixels) to be encoded at 5Mbps has a bpp == 2.41
void setBpp(double bpp) { mBpp = bpp;}
- double getBpp() {return mBpp;}
+ double getBpp(int32_t width, int32_t height);
// Does this codec support QP bounding
// The getMapping() methods provide any needed mapping to non-standard keys.
@@ -88,15 +94,31 @@
std::string mMediaType;
int mApi = 0;
int mMinimumQuality = 0;
- int mTargetQpMax = 0;
+ int mTargetQpMax = INT32_MAX;
bool mSupportsQp = false;
double mBpp = 0.0;
+ // allow different target bits-per-pixel based on resolution
+ // similar to codec 'performance points'
+ // uses 'next largest' (by pixel count) point as minimum bpp
+ struct bpp_point {
+ struct bpp_point *next;
+ int32_t pixels;
+ int32_t width, height;
+ double bpp;
+ };
+ struct bpp_point *mBppPoints = nullptr;
+ bool bppPoint(std::string resolution, std::string value);
+
std::mutex mMappingLock;
// XXX figure out why I'm having problems getting compiler to like GUARDED_BY
std::map<std::string, std::string> mMappings /*GUARDED_BY(mMappingLock)*/ ;
std::map<std::string, int32_t> mFeatures /*GUARDED_BY(mMappingLock)*/ ;
+ std::map<std::string, std::string> mTunings /*GUARDED_BY(mMappingLock)*/ ;
+
+ // Seed() and Finish() use this as the underlying implementation
+ void addMediaDefaults(bool overrideable);
bool mIsRegistered = false;
diff --git a/media/libmediaformatshaper/CodecSeeding.cpp b/media/libmediaformatshaper/CodecSeeding.cpp
index 629b405..7fe1075 100644
--- a/media/libmediaformatshaper/CodecSeeding.cpp
+++ b/media/libmediaformatshaper/CodecSeeding.cpp
@@ -20,62 +20,75 @@
#include <string>
-#include <media/formatshaper/CodecProperties.h>
+#include "CodecProperties.h"
namespace android {
namespace mediaformatshaper {
/*
- * a block of pre-loads; things the library seeds into the codecproperties based
+ * a block of pre-loaded tunings for codecs.
+ *
+ * things the library seeds into the codecproperties based
* on the mediaType.
* XXX: parsing from a file is likely better than embedding in code.
*/
typedef struct {
+ bool overrideable;
const char *key;
- int32_t value;
-} preloadFeature_t;
+ const char *value;
+} preloadTuning_t;
typedef struct {
const char *mediaType;
- preloadFeature_t *features;
-} preloadProperties_t;
+ preloadTuning_t *features;
+} preloadTunings_t;
/*
* 240 = 2.4 bits per pixel-per-second == 5mbps@1080, 2.3mbps@720p, which is about where
* we want our initial floor for now.
*/
-static preloadFeature_t featuresAvc[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresAvc[] = {
+ // {true, "vq-target-bpp", "2.45"},
+ {true, "vq-target-bpp-1080p", "2.40"},
+ {true, "vq-target-bpp-540p", "2.60"},
+ {true, "vq-target-bpp-480p", "3.00"},
+ {true, "vq-target-qpmax", "40"},
+ {true, nullptr, 0}
};
-static preloadFeature_t featuresHevc[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresHevc[] = {
+ // {true, "vq-target-bpp", "1.80"},
+ {true, "vq-target-bpp-1080p", "1.50"},
+ {true, "vq-target-bpp-720p", "1.80"},
+ {true, "vq-target-bpp-540p", "2.10"},
+ // no qp for hevc, at least for now
+ {true, nullptr, 0}
};
-static preloadFeature_t featuresGenericVideo[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresGenericVideo[] = {
+ {true, "vq-target-bpp", "2.40"},
+ {true, nullptr, 0}
};
-static preloadProperties_t preloadProperties[] = {
+static preloadTunings_t preloadTunings[] = {
{ "video/avc", featuresAvc},
{ "video/hevc", &featuresHevc[0]},
// wildcard for any video format not already captured
{ "video/*", &featuresGenericVideo[0]},
+
{ nullptr, nullptr}
};
-void CodecProperties::Seed() {
- ALOGV("Seed: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
+void CodecProperties::addMediaDefaults(bool overrideable) {
+ ALOGD("Seed: codec %s, mediatype %s, overrideable %d",
+ mName.c_str(), mMediaType.c_str(), overrideable);
// load me up with initial configuration data
int count = 0;
- for (int i=0;; i++) {
- preloadProperties_t *p = &preloadProperties[i];
+ for (int i = 0; ; i++) {
+ preloadTunings_t *p = &preloadTunings[i];
if (p->mediaType == nullptr) {
break;
}
@@ -100,11 +113,14 @@
// walk through, filling things
if (p->features != nullptr) {
for (int j=0;; j++) {
- preloadFeature_t *q = &p->features[j];
+ preloadTuning_t *q = &p->features[j];
if (q->key == nullptr) {
break;
}
- setFeatureValue(q->key, q->value);
+ if (q->overrideable != overrideable) {
+ continue;
+ }
+ setTuningValue(q->key, q->value);
count++;
}
break;
@@ -113,13 +129,18 @@
ALOGV("loaded %d preset values", count);
}
-// a chance, as we register the codec and accept no further updates, to
-// override any poor configuration that arrived from the device's XML files.
+// a chance, as we create the codec to inject any default behaviors we want.
+// XXX: consider whether we need pre/post or just post. it affects what can be
+// overridden by way of the codec XML
//
+void CodecProperties::Seed() {
+ ALOGV("Seed: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
+ addMediaDefaults(true);
+}
+
void CodecProperties::Finish() {
ALOGV("Finish: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
-
- // currently a no-op
+ addMediaDefaults(false);
}
} // namespace mediaformatshaper
diff --git a/media/libmediaformatshaper/FormatShaper.cpp b/media/libmediaformatshaper/FormatShaper.cpp
index a52edc2..451f772 100644
--- a/media/libmediaformatshaper/FormatShaper.cpp
+++ b/media/libmediaformatshaper/FormatShaper.cpp
@@ -23,10 +23,11 @@
#include <media/NdkMediaFormat.h>
-#include <media/formatshaper/VQops.h>
-#include <media/formatshaper/CodecProperties.h>
+#include "CodecProperties.h"
+#include "VideoShaper.h"
+#include "VQops.h"
+
#include <media/formatshaper/FormatShaper.h>
-#include <media/formatshaper/VideoShaper.h>
namespace android {
namespace mediaformatshaper {
@@ -99,6 +100,23 @@
return 0;
}
+int setTuning(shaperHandle_t shaper, const char *tuning, const char *value) {
+ ALOGV("setTuning: tuning %s value %s", tuning, value);
+ CodecProperties *codec = (CodecProperties*) shaper;
+ if (codec == nullptr) {
+ return -1;
+ }
+ // must not yet be registered
+ if (codec->isRegistered()) {
+ return -1;
+ }
+
+ // save a map of all features
+ codec->setTuningValue(tuning, value);
+
+ return 0;
+}
+
/*
* The routines that manage finding, creating, and registering the shapers.
*/
@@ -176,6 +194,8 @@
.shapeFormat = shapeFormat,
.getMappings = getMappings,
.getReverseMappings = getReverseMappings,
+
+ .setTuning = setTuning,
};
} // namespace mediaformatshaper
diff --git a/media/libmediaformatshaper/ManageShapingCodecs.cpp b/media/libmediaformatshaper/ManageShapingCodecs.cpp
index bdc395f..3061d0b 100644
--- a/media/libmediaformatshaper/ManageShapingCodecs.cpp
+++ b/media/libmediaformatshaper/ManageShapingCodecs.cpp
@@ -23,7 +23,8 @@
#include <inttypes.h>
#include <media/NdkMediaFormat.h>
-#include <media/formatshaper/CodecProperties.h>
+
+#include "CodecProperties.h"
namespace android {
namespace mediaformatshaper {
diff --git a/media/libmediaformatshaper/VQApply.cpp b/media/libmediaformatshaper/VQApply.cpp
index 39a5e19..4f6a6c3 100644
--- a/media/libmediaformatshaper/VQApply.cpp
+++ b/media/libmediaformatshaper/VQApply.cpp
@@ -23,9 +23,9 @@
#include <media/NdkMediaFormat.h>
-#include <media/formatshaper/VQops.h>
-#include <media/formatshaper/CodecProperties.h>
-#include <media/formatshaper/VideoShaper.h>
+#include "VQops.h"
+#include "CodecProperties.h"
+#include "VideoShaper.h"
namespace android {
namespace mediaformatshaper {
@@ -48,11 +48,21 @@
//
static const int BITRATE_MODE_VBR = 1;
+
+// constants we use within the calculations
+//
+constexpr double BITRATE_LEAVE_UNTOUCHED = 1.75;
+
+// 20% bump if QP is configured but it is unavailable
+constexpr double BITRATE_QP_UNAVAILABLE_BOOST = 0.20;
+
+
//
// Caller retains ownership of and responsibility for inFormat
//
int VQApply(CodecProperties *codec, vqOps_t *info, AMediaFormat* inFormat, int flags) {
ALOGV("codecName %s inFormat %p flags x%x", codec->getName().c_str(), inFormat, flags);
+ (void) info; // unused for now
int32_t bitRateMode = -1;
if (AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BITRATE_MODE, &bitRateMode)
@@ -69,74 +79,105 @@
}
//
- // apply any and all tools that we have.
+ // consider any and all tools available
// -- qp
// -- minimum bits-per-pixel
//
- if (!codec->supportsQp()) {
- ALOGD("minquality: no qp bounding in codec %s", codec->getName().c_str());
+ int64_t bitrateChosen = 0;
+ int32_t qpChosen = INT32_MAX;
+
+ int64_t bitrateConfigured = 0;
+ int32_t bitrateConfiguredTmp = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfiguredTmp);
+ bitrateConfigured = bitrateConfiguredTmp;
+ bitrateChosen = bitrateConfigured;
+
+ int32_t width = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
+ int32_t height = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
+ int64_t pixels = ((int64_t)width) * height;
+ double minimumBpp = codec->getBpp(width, height);
+
+ int64_t bitrateFloor = pixels * minimumBpp;
+ int64_t bitrateCeiling = bitrateFloor * BITRATE_LEAVE_UNTOUCHED;
+ if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
+ if (bitrateCeiling > INT32_MAX) bitrateCeiling = INT32_MAX;
+
+ // if we are far enough above the target bpp, leave it alone
+ //
+ ALOGV("bitrate: configured %" PRId64 " floor %" PRId64, bitrateConfigured, bitrateFloor);
+ if (bitrateConfigured >= bitrateCeiling) {
+ ALOGV("high enough bitrate: configured %" PRId64 " >= ceiling %" PRId64,
+ bitrateConfigured, bitrateCeiling);
+ return 0;
+ }
+
+ // raise anything below the bitrate floor
+ if (bitrateConfigured < bitrateFloor) {
+ ALOGD("raise bitrate: configured %" PRId64 " to floor %" PRId64,
+ bitrateConfigured, bitrateFloor);
+ bitrateChosen = bitrateFloor;
+ }
+
+ bool qpPresent = hasQpMax(inFormat);
+
+ // calculate a target QP value
+ int32_t qpmax = codec->targetQpMax();
+ if (!qpPresent) {
+ // user didn't, so shaper wins
+ if (qpmax != INT32_MAX) {
+ ALOGV("choosing qp=%d", qpmax);
+ qpChosen = qpmax;
+ }
+ } else if (qpmax == INT32_MAX) {
+ // shaper didn't so user wins
+ qpChosen = INT32_MAX;
+ AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, &qpChosen);
} else {
- // use a (configurable) QP value to force better quality
- //
- int32_t qpmax = codec->targetQpMax();
- int32_t qpmaxUser = INT32_MAX;
- if (hasQp(inFormat)) {
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, &qpmaxUser);
- ALOGD("minquality by QP: format already sets QP");
- }
+ // both sides want it, choose most restrictive
+ int32_t value = INT32_MAX;
+ AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, &value);
+ qpChosen = std::min(qpmax, value);
+ }
- // if the system didn't do one, use what the user provided
- if (qpmax == 0 && qpmaxUser != INT32_MAX) {
- qpmax = qpmaxUser;
- }
- // XXX: if both said something, how do we want to reconcile that
-
- if (qpmax > 0) {
- ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
- AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
-
- // force spreading the QP across frame types, since we imposing a value
- qpSpreadMaxPerFrameType(inFormat, info->qpDelta, info->qpMax, /* override */ true);
+ // if QP is desired but not supported, compensate with additional bits
+ if (!codec->supportsQp()) {
+ if (qpChosen != INT32_MAX) {
+ int64_t boost = 0;
+ boost = bitrateChosen * BITRATE_QP_UNAVAILABLE_BOOST;
+ ALOGD("minquality: requested QP unsupported, boost bitrate %" PRId64 " by %" PRId64,
+ bitrateChosen, boost);
+ bitrateChosen = bitrateChosen + boost;
+ qpChosen = INT32_MAX;
}
}
- double bpp = codec->getBpp();
- if (bpp > 0.0) {
- // if we've decided to use bits-per-pixel (per second) to drive the quality
- //
- // (properly phrased as 'bits per second per pixel' so that it's resolution
- // and framerate agnostic
- //
- // all of these is structured so that a missing value cleanly gets us to a
- // non-faulting value of '0' for the minimum bits-per-pixel.
- //
- int32_t width = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
- int32_t height = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
- int32_t bitrateConfigured = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfigured);
+ // limits
+ // apply our chosen values
+ //
+ if (qpChosen != INT32_MAX) {
+ ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
+ AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
- int64_t pixels = ((int64_t)width) * height;
- int64_t bitrateFloor = pixels * bpp;
+ // caller (VideoShaper) handles spreading this across the subframes
+ }
- if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
-
- ALOGD("minquality/bitrate: target %d floor %" PRId64 "(%.3f bpp * (%d w * %d h)",
- bitrateConfigured, bitrateFloor, codec->getBpp(), height, width);
-
- if (bitrateConfigured < bitrateFloor) {
- ALOGD("minquality/target bitrate raised from %d to %" PRId64 " bps",
- bitrateConfigured, bitrateFloor);
- AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateFloor);
+ if (bitrateChosen != bitrateConfigured) {
+ if (bitrateChosen > bitrateCeiling) {
+ ALOGD("minquality: bitrate clamped at ceiling %" PRId64, bitrateCeiling);
+ bitrateChosen = bitrateCeiling;
}
+ ALOGD("minquality/target bitrate raised from %" PRId64 " to %" PRId64 " bps",
+ bitrateConfigured, bitrateChosen);
+ AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateChosen);
}
return 0;
}
-bool hasQpPerFrameType(AMediaFormat *format) {
+bool hasQpMaxPerFrameType(AMediaFormat *format) {
int32_t value;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MAX, &value)
@@ -154,19 +195,29 @@
return false;
}
-bool hasQp(AMediaFormat *format) {
+bool hasQpMaxGlobal(AMediaFormat *format) {
int32_t value;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_MAX, &value)
|| AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_MIN, &value)) {
return true;
}
- return hasQpPerFrameType(format);
+ return false;
+}
+
+bool hasQpMax(AMediaFormat *format) {
+ if (hasQpMaxGlobal(format)) {
+ return true;
+ }
+ return hasQpMaxPerFrameType(format);
}
void qpSpreadPerFrameType(AMediaFormat *format, int delta,
int qplow, int qphigh, bool override) {
- qpSpreadMaxPerFrameType(format, delta, qphigh, override);
+
qpSpreadMinPerFrameType(format, qplow, override);
+ qpSpreadMaxPerFrameType(format, delta, qphigh, override);
+ // make sure that min<max for all the QP fields.
+ qpVerifyMinMaxOrdering(format);
}
void qpSpreadMaxPerFrameType(AMediaFormat *format, int delta, int qphigh, bool override) {
@@ -174,20 +225,26 @@
int32_t qpOffered = 0;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_MAX, &qpOffered)) {
- // propagate to otherwise unspecified frame-specific keys
- int32_t maxI;
- if (override || !AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MAX, &maxI)) {
- int32_t value = std::min(qphigh, qpOffered);
+ // propagate to frame-specific keys, choosing most restrictive
+ // ensure that we don't violate min<=max rules
+ {
+ int32_t maxI = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MAX, &maxI);
+ int32_t value = std::min({qpOffered, qphigh, maxI});
AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MAX, value);
}
- int32_t maxP;
- if (override || !AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MAX, &maxP)) {
- int32_t value = std::min(qphigh, (std::min(qpOffered, INT32_MAX-delta) + delta));
+ {
+ int32_t maxP = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MAX, &maxP);
+ int32_t value = std::min({(std::min(qpOffered, INT32_MAX-1*delta) + 1*delta),
+ qphigh, maxP});
AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MAX, value);
}
- int32_t maxB;
- if (override || !AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MAX, &maxB)) {
- int32_t value = std::min(qphigh, (std::min(qpOffered, INT32_MAX-2*delta) + 2*delta));
+ {
+ int32_t maxB = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MAX, &maxB);
+ int32_t value = std::min({(std::min(qpOffered, INT32_MAX-2*delta) + 2*delta),
+ qphigh, maxB});
AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MAX, value);
}
}
@@ -199,19 +256,47 @@
int32_t qpOffered = 0;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_MIN, &qpOffered)) {
int value = std::max(qplow, qpOffered);
- // propagate to otherwise unspecified frame-specific keys
- int32_t minI;
- if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, &minI)) {
- AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, value);
- }
- int32_t minP;
- if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, &minP)) {
- AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, value);
- }
- int32_t minB;
- if (!AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, &minB)) {
- AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, value);
- }
+ // propagate to frame-specific keys, use lowest of this and existing per-frame value
+ int32_t minI = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, &minI);
+ int32_t setI = std::min(value, minI);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, setI);
+
+ int32_t minP = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, &minP);
+ int32_t setP = std::min(value, minP);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, setP);
+
+ int32_t minB = INT32_MAX;
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, &minB);
+ int32_t setB = std::min(value, minB);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, setB);
+ }
+}
+
+// XXX whether we allow min==max, or if we'll insist that min<max
+void qpVerifyMinMaxOrdering(AMediaFormat *format) {
+ // ensure that we don't violate min<=max rules
+ int32_t maxI = INT32_MAX;
+ int32_t minI = INT32_MIN;
+ if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MAX, &maxI)
+ && AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, &minI)
+ && minI > maxI) {
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_I_MIN, maxI);
+ }
+ int32_t maxP = INT32_MAX;
+ int32_t minP = INT32_MIN;
+ if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MAX, &maxP)
+ && AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, &minP)
+ && minP > maxP) {
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_P_MIN, maxP);
+ }
+ int32_t maxB = INT32_MAX;
+ int32_t minB = INT32_MIN;
+ if (AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MAX, &maxB)
+ && AMediaFormat_getInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, &minB)
+ && minB > maxB) {
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_VIDEO_QP_B_MIN, maxB);
}
}
diff --git a/media/libmediaformatshaper/include/media/formatshaper/VQops.h b/media/libmediaformatshaper/VQops.h
similarity index 88%
rename from media/libmediaformatshaper/include/media/formatshaper/VQops.h
rename to media/libmediaformatshaper/VQops.h
index 807e8af..74cce18 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/VQops.h
+++ b/media/libmediaformatshaper/VQops.h
@@ -17,7 +17,7 @@
#ifndef LIBMEDIAFORMATSHAPER_VQOPS_H_
#define LIBMEDIAFORMATSHAPER_VQOPS_H_
-#include <media/formatshaper/CodecProperties.h>
+#include "CodecProperties.h"
#include <media/NdkMediaFormat.h>
namespace android {
@@ -39,10 +39,12 @@
void qpSpreadPerFrameType(AMediaFormat *format, int delta, int qplow, int qphigh, bool override);
void qpSpreadMaxPerFrameType(AMediaFormat *format, int delta, int qphigh, bool override);
void qpSpreadMinPerFrameType(AMediaFormat *format, int qplow, bool override);
+void qpVerifyMinMaxOrdering(AMediaFormat *format);
// does the format have QP bounding entries
-bool hasQp(AMediaFormat *format);
-bool hasQpPerFrameType(AMediaFormat *format);
+bool hasQpMax(AMediaFormat *format);
+bool hasQpMaxGlobal(AMediaFormat *format);
+bool hasQpMaxPerFrameType(AMediaFormat *format);
} // namespace mediaformatshaper
} // namespace android
diff --git a/media/libmediaformatshaper/VideoShaper.cpp b/media/libmediaformatshaper/VideoShaper.cpp
index f772a66..cf8b50f 100644
--- a/media/libmediaformatshaper/VideoShaper.cpp
+++ b/media/libmediaformatshaper/VideoShaper.cpp
@@ -23,9 +23,9 @@
#include <media/NdkMediaFormat.h>
-#include <media/formatshaper/VQops.h>
-#include <media/formatshaper/CodecProperties.h>
-#include <media/formatshaper/VideoShaper.h>
+#include "CodecProperties.h"
+#include "VideoShaper.h"
+#include "VQops.h"
namespace android {
namespace mediaformatshaper {
@@ -83,10 +83,10 @@
// apply any quality transforms in here..
(void) VQApply(codec, info, inFormat, flags);
- // We must always spread any QP parameters.
+ // We always spread any QP parameters.
// Sometimes it's something we inserted here, sometimes it's a value that the user injected.
//
- qpSpreadPerFrameType(inFormat, info->qpDelta, info->qpMin, info->qpMax, /* override */ false);
+ qpSpreadPerFrameType(inFormat, info->qpDelta, info->qpMin, info->qpMax, /* override */ true);
//
return 0;
diff --git a/media/libmediaformatshaper/include/media/formatshaper/VideoShaper.h b/media/libmediaformatshaper/VideoShaper.h
similarity index 100%
rename from media/libmediaformatshaper/include/media/formatshaper/VideoShaper.h
rename to media/libmediaformatshaper/VideoShaper.h
diff --git a/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h b/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
index 8ad81cd..a1747cc 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
+++ b/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
@@ -84,6 +84,12 @@
typedef int (*setFeature_t)(shaperHandle_t shaper, const char *feature, int value);
/*
+ * establishes that codec "codecName" encoding for "mediaType" supports the indicated
+ * tuning at the indicated value
+ */
+typedef int (*setTuning_t)(shaperHandle_t shaper, const char *feature, const char * value);
+
+/*
* The expectation is that the client will implement a flow similar to the following when
* setting up an encoding.
*
@@ -118,6 +124,10 @@
shapeFormat_t shapeFormat;
getMappings_t getMappings;
getMappings_t getReverseMappings;
+
+ setTuning_t setTuning;
+
+ // additions happen at the end of the structure
} FormatShaperOps_t;
// versioninf information
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index de4f8d4..383bae8 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -160,6 +160,12 @@
#define AMEDIAMETRICS_PROP_VOLUME_LEFT "volume.left" // double (AudioTrack)
#define AMEDIAMETRICS_PROP_VOLUME_RIGHT "volume.right" // double (AudioTrack)
#define AMEDIAMETRICS_PROP_WHERE "where" // string value
+// EncodingRequested is the encoding format requested by the app
+#define AMEDIAMETRICS_PROP_ENCODINGREQUESTED "encodingRequested" // string
+// PerformanceModeActual is the actual selected performance mode, could be "none', "loeLatency" or
+// "powerSaving"
+#define AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL "performanceModeActual" // string
+#define AMEDIAMETRICS_PROP_FRAMESTRANSFERRED "framesTransferred" // int64_t, transferred frames
// Timing values: millisecond values are suffixed with MS and the type is double
// nanosecond values are suffixed with NS and the type is int64.
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index d250976..287317d 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -38,6 +38,7 @@
"media_permission-aidl-cpp",
"libaudioclient_aidl_conversion",
"libbase",
+ "libactivitymanager_aidl",
"libandroid_net",
"libaudioclient",
"libbinder",
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
index 7bee002..af9cf45 100644
--- a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
@@ -62,7 +62,7 @@
binder::Status status = mPowerManager->acquireWakeLock(
binder, POWERMANAGER_PARTIAL_WAKE_LOCK,
String16("AWakeLock"), String16("media"),
- {} /* workSource */, {} /* historyTag */);
+ {} /* workSource */, {} /* historyTag */, -1 /* displayId */);
IPCThreadState::self()->restoreCallingIdentity(token);
if (status.isOk()) {
mWakeLockToken = binder;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 6a8c708..4a65f71 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1962,7 +1962,7 @@
ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
mime.c_str(), audioFormat);
- int avgBitRate = -1;
+ int avgBitRate = 0;
format->findInt32("bitrate", &avgBitRate);
int32_t aacProfile = -1;
diff --git a/media/libmediatranscoding/TEST_MAPPING b/media/libmediatranscoding/TEST_MAPPING
index f8a9db9..40f7b21 100644
--- a/media/libmediatranscoding/TEST_MAPPING
+++ b/media/libmediatranscoding/TEST_MAPPING
@@ -26,6 +26,9 @@
},
{
"name": "VideoTrackTranscoderTests"
+ },
+ {
+ "name": "CtsMediaTranscodingTestCases"
}
]
}
diff --git a/media/libmediatranscoding/TranscoderWrapper.cpp b/media/libmediatranscoding/TranscoderWrapper.cpp
index b19e711..5e4c671 100644
--- a/media/libmediatranscoding/TranscoderWrapper.cpp
+++ b/media/libmediatranscoding/TranscoderWrapper.cpp
@@ -23,6 +23,7 @@
#include <media/NdkCommon.h>
#include <media/TranscoderWrapper.h>
#include <media/TranscodingRequest.h>
+#include <utils/AndroidThreads.h>
#include <utils/Log.h>
#include <thread>
@@ -599,6 +600,7 @@
}
void TranscoderWrapper::threadLoop() {
+ androidSetThreadPriority(0 /*tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
std::unique_lock<std::mutex> lock{mLock};
// TranscoderWrapper currently lives in the transcoding service, as long as
// MediaTranscodingService itself.
diff --git a/media/libmediatranscoding/TranscodingClientManager.cpp b/media/libmediatranscoding/TranscodingClientManager.cpp
index 06c5421..6dbcaf9 100644
--- a/media/libmediatranscoding/TranscodingClientManager.cpp
+++ b/media/libmediatranscoding/TranscodingClientManager.cpp
@@ -94,6 +94,12 @@
Status getSessionWithId(int32_t /*in_sessionId*/, TranscodingSessionParcel* /*out_session*/,
bool* /*_aidl_return*/) override;
+ Status addClientUid(int32_t /*in_sessionId*/, int32_t /*in_clientUid*/,
+ bool* /*_aidl_return*/) override;
+
+ Status getClientUids(int32_t /*in_sessionId*/,
+ std::optional<std::vector<int32_t>>* /*_aidl_return*/) override;
+
Status unregister() override;
};
@@ -217,6 +223,63 @@
return Status::ok();
}
+Status TranscodingClientManager::ClientImpl::addClientUid(int32_t in_sessionId,
+ int32_t in_clientUid,
+ bool* _aidl_return) {
+ *_aidl_return = false;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ int32_t callingPid = AIBinder_getCallingPid();
+ int32_t callingUid = AIBinder_getCallingUid();
+
+ // Check if we can trust clientUid. Only privilege caller could add uid to existing sessions.
+ if (in_clientUid == IMediaTranscodingService::USE_CALLING_UID) {
+ in_clientUid = callingUid;
+ } else if (in_clientUid < 0) {
+ return Status::ok();
+ } else if (in_clientUid != callingUid && !owner->isTrustedCaller(callingPid, callingUid)) {
+ ALOGE("addClientUid rejected (clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "addClientUid rejected (clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientUid, callingUid);
+ }
+
+ *_aidl_return = owner->mSessionController->addClientUid(mClientId, in_sessionId, in_clientUid);
+ return Status::ok();
+}
+
+Status TranscodingClientManager::ClientImpl::getClientUids(
+ int32_t in_sessionId, std::optional<std::vector<int32_t>>* _aidl_return) {
+ *_aidl_return = std::nullopt;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ std::vector<int32_t> result;
+
+ if (owner->mSessionController->getClientUids(mClientId, in_sessionId, &result)) {
+ *_aidl_return = result;
+ }
+ return Status::ok();
+}
+
Status TranscodingClientManager::ClientImpl::unregister() {
bool abandoned = mAbandoned.exchange(true);
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
index aeabe0f..ea3e518 100644
--- a/media/libmediatranscoding/TranscodingSessionController.cpp
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -19,9 +19,11 @@
#define VALIDATE_STATE 1
+#include <android/permission_manager.h>
#include <inttypes.h>
#include <media/TranscodingSessionController.h>
#include <media/TranscodingUidPolicy.h>
+#include <utils/AndroidThreads.h>
#include <utils/Log.h>
#include <thread>
@@ -161,6 +163,7 @@
// Unfortunately std::unique_lock is incompatible with -Wthread-safety.
void TranscodingSessionController::Watchdog::threadLoop() NO_THREAD_SAFETY_ANALYSIS {
+ androidSetThreadPriority(0 /*tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
std::unique_lock<std::mutex> lock{mLock};
while (!mAbort) {
@@ -193,8 +196,9 @@
~Pacer() = default;
+ bool onSessionStarted(uid_t uid, uid_t callingUid);
void onSessionCompleted(uid_t uid, std::chrono::microseconds runningTime);
- bool onSessionStarted(uid_t uid);
+ void onSessionCancelled(uid_t uid);
private:
// Threshold of time between finish/start below which a back-to-back start is counted.
@@ -205,26 +209,60 @@
int32_t mBurstTimeQuotaSec;
struct UidHistoryEntry {
- std::chrono::steady_clock::time_point lastCompletedTime;
+ bool sessionActive = false;
int32_t burstCount = 0;
std::chrono::steady_clock::duration burstDuration{0};
+ std::chrono::steady_clock::time_point lastCompletedTime;
};
std::map<uid_t, UidHistoryEntry> mUidHistoryMap;
+ std::unordered_set<uid_t> mMtpUids;
+ std::unordered_set<uid_t> mNonMtpUids;
+
+ bool isSubjectToQuota(uid_t uid, uid_t callingUid);
};
-void TranscodingSessionController::Pacer::onSessionCompleted(
- uid_t uid, std::chrono::microseconds runningTime) {
- if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
- mUidHistoryMap.emplace(uid, UidHistoryEntry{});
+bool TranscodingSessionController::Pacer::isSubjectToQuota(uid_t uid, uid_t callingUid) {
+ // Submitting with self uid is not limited (which can only happen if it's used as an
+ // app-facing API). MediaProvider usage always submit on behalf of other uids.
+ if (uid == callingUid) {
+ return false;
}
- mUidHistoryMap[uid].lastCompletedTime = std::chrono::steady_clock::now();
- mUidHistoryMap[uid].burstCount++;
- mUidHistoryMap[uid].burstDuration += runningTime;
+
+ if (mMtpUids.find(uid) != mMtpUids.end()) {
+ return false;
+ }
+
+ if (mNonMtpUids.find(uid) != mNonMtpUids.end()) {
+ return true;
+ }
+
+ // We don't have MTP permission info about this uid yet, check permission and save the result.
+ int32_t result;
+ if (__builtin_available(android __TRANSCODING_MIN_API__, *)) {
+ if (APermissionManager_checkPermission("android.permission.ACCESS_MTP", -1 /*pid*/, uid,
+ &result) == PERMISSION_MANAGER_STATUS_OK &&
+ result == PERMISSION_MANAGER_PERMISSION_GRANTED) {
+ mMtpUids.insert(uid);
+ return false;
+ }
+ }
+
+ mNonMtpUids.insert(uid);
+ return true;
}
-bool TranscodingSessionController::Pacer::onSessionStarted(uid_t uid) {
- // If uid doesn't exist, this uid has no completed sessions. Skip.
+bool TranscodingSessionController::Pacer::onSessionStarted(uid_t uid, uid_t callingUid) {
+ if (!isSubjectToQuota(uid, callingUid)) {
+ ALOGI("Pacer::onSessionStarted: uid %d (caling uid: %d): not subject to quota", uid,
+ callingUid);
+ return true;
+ }
+
+ // If uid doesn't exist, only insert the entry and mark session active. Skip quota checking.
if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
+ mUidHistoryMap.emplace(uid, UidHistoryEntry{});
+ mUidHistoryMap[uid].sessionActive = true;
+ ALOGV("Pacer::onSessionStarted: uid %d: new", uid);
return true;
}
@@ -236,25 +274,55 @@
std::chrono::steady_clock::now() - mUidHistoryMap[uid].lastCompletedTime;
if (mUidHistoryMap[uid].burstCount >= mBurstCountQuota &&
mUidHistoryMap[uid].burstDuration >= std::chrono::seconds(mBurstTimeQuotaSec)) {
- ALOGW("Pacer: uid %d: over quota, burst count %d, time %lldms", uid,
- mUidHistoryMap[uid].burstCount, (long long)mUidHistoryMap[uid].burstDuration.count());
+ ALOGW("Pacer::onSessionStarted: uid %d: over quota, burst count %d, time %lldms", uid,
+ mUidHistoryMap[uid].burstCount,
+ (long long)mUidHistoryMap[uid].burstDuration.count() / 1000000);
return false;
}
// If not over quota, allow the session, and reset as long as this is not too close
// to previous completion.
if (timeSinceLastComplete > std::chrono::milliseconds(mBurstThresholdMs)) {
- ALOGV("Pacer: uid %d: reset quota", uid);
+ ALOGV("Pacer::onSessionStarted: uid %d: reset quota", uid);
mUidHistoryMap[uid].burstCount = 0;
mUidHistoryMap[uid].burstDuration = std::chrono::milliseconds(0);
} else {
- ALOGV("Pacer: uid %d: burst count %d, time %lldms", uid, mUidHistoryMap[uid].burstCount,
- (long long)mUidHistoryMap[uid].burstDuration.count());
+ ALOGV("Pacer::onSessionStarted: uid %d: burst count %d, time %lldms", uid,
+ mUidHistoryMap[uid].burstCount,
+ (long long)mUidHistoryMap[uid].burstDuration.count() / 1000000);
}
+ mUidHistoryMap[uid].sessionActive = true;
return true;
}
+void TranscodingSessionController::Pacer::onSessionCompleted(
+ uid_t uid, std::chrono::microseconds runningTime) {
+ // Skip quota update if this uid missed the start. (Could happen if the uid is added via
+ // addClientUid() after the session start.)
+ if (mUidHistoryMap.find(uid) == mUidHistoryMap.end() || !mUidHistoryMap[uid].sessionActive) {
+ ALOGV("Pacer::onSessionCompleted: uid %d: not started", uid);
+ return;
+ }
+ ALOGV("Pacer::onSessionCompleted: uid %d: runningTime %lld", uid, runningTime.count() / 1000);
+ mUidHistoryMap[uid].sessionActive = false;
+ mUidHistoryMap[uid].burstCount++;
+ mUidHistoryMap[uid].burstDuration += runningTime;
+ mUidHistoryMap[uid].lastCompletedTime = std::chrono::steady_clock::now();
+}
+
+void TranscodingSessionController::Pacer::onSessionCancelled(uid_t uid) {
+ if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
+ ALOGV("Pacer::onSessionCancelled: uid %d: not present", uid);
+ return;
+ }
+ // This is only called if a uid is removed from a session (due to it being killed
+ // or the original submitting client was gone but session was kept for offline use).
+ // Since the uid is going to miss the onSessionCompleted(), we can't track this
+ // session, and have to check back at next onSessionStarted().
+ mUidHistoryMap[uid].sessionActive = false;
+}
+
///////////////////////////////////////////////////////////////////////////////
TranscodingSessionController::TranscodingSessionController(
@@ -372,6 +440,14 @@
}
uid_t topUid = *mUidSortedList.begin();
+ // If the current session is running, and it's in the topUid's queue, let it continue
+ // to run even if it's not the earliest in that uid's queue.
+ // For example, uid(B) is added to a session while it's pending in uid(A)'s queue, then
+ // B is brought to front which caused the session to run, then user switches back to A.
+ if (mCurrentSession != nullptr && mCurrentSession->getState() == Session::RUNNING &&
+ mCurrentSession->allClientUids.count(topUid) > 0) {
+ return mCurrentSession;
+ }
SessionKeyType topSessionKey = *mSessionQueues[topUid].begin();
return &mSessionMap[topSessionKey];
}
@@ -427,7 +503,7 @@
void TranscodingSessionController::updateCurrentSession_l() {
Session* curSession = mCurrentSession;
- Session* topSession = getTopSession_l();
+ Session* topSession = nullptr;
// Delayed init of transcoder and watchdog.
if (mTranscoder == nullptr) {
@@ -458,9 +534,18 @@
// Otherwise, ensure topSession is running.
if (topSession->getState() == Session::NOT_STARTED) {
- if (!mPacer->onSessionStarted(topSession->clientUid)) {
- // Unfortunately this uid is out of quota for new sessions.
- // Drop this sesion and try another one.
+ // Check if at least one client has quota to start the session.
+ bool keepForClient = false;
+ for (uid_t uid : topSession->allClientUids) {
+ if (mPacer->onSessionStarted(uid, topSession->callingUid)) {
+ keepForClient = true;
+ // DO NOT break here, because book-keeping still needs to happen
+ // for the other uids.
+ }
+ }
+ if (!keepForClient) {
+ // Unfortunately all uids requesting this session are out of quota.
+ // Drop this session and try the next one.
{
auto clientCallback = mSessionMap[topSession->key].callback.lock();
if (clientCallback != nullptr) {
@@ -484,8 +569,35 @@
mCurrentSession = topSession;
}
-void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey,
- Session::State finalState) {
+void TranscodingSessionController::addUidToSession_l(uid_t clientUid,
+ const SessionKeyType& sessionKey) {
+ // If it's an offline session, the queue was already added in constructor.
+ // If it's a real-time sessions, check if a queue is already present for the uid,
+ // and add a new queue if needed.
+ if (clientUid != OFFLINE_UID) {
+ if (mSessionQueues.count(clientUid) == 0) {
+ mUidPolicy->registerMonitorUid(clientUid);
+ if (mUidPolicy->isUidOnTop(clientUid)) {
+ mUidSortedList.push_front(clientUid);
+ } else {
+ // Shouldn't be submitting real-time requests from non-top app,
+ // put it in front of the offline queue.
+ mUidSortedList.insert(mOfflineUidIterator, clientUid);
+ }
+ } else if (clientUid != *mUidSortedList.begin()) {
+ if (mUidPolicy->isUidOnTop(clientUid)) {
+ mUidSortedList.remove(clientUid);
+ mUidSortedList.push_front(clientUid);
+ }
+ }
+ }
+ // Append this session to the uid's queue.
+ mSessionQueues[clientUid].push_back(sessionKey);
+}
+
+void TranscodingSessionController::removeSession_l(
+ const SessionKeyType& sessionKey, Session::State finalState,
+ const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid) {
ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
if (mSessionMap.count(sessionKey) == 0) {
@@ -494,26 +606,48 @@
}
// Remove session from uid's queue.
- const uid_t uid = mSessionMap[sessionKey].clientUid;
- SessionQueueType& sessionQueue = mSessionQueues[uid];
- auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
- if (it == sessionQueue.end()) {
- ALOGE("couldn't find session %s in queue for uid %d", sessionToString(sessionKey).c_str(),
- uid);
- return;
+ bool uidQueueRemoved = false;
+ std::unordered_set<uid_t> remainingUids;
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ if (keepUid != nullptr) {
+ if ((*keepUid)(uid)) {
+ remainingUids.insert(uid);
+ continue;
+ }
+ // If we have uids to keep, the session is not going to any final
+ // state we can't use onSessionCompleted as the running time will
+ // not be valid. Only notify pacer to stop tracking this session.
+ mPacer->onSessionCancelled(uid);
+ }
+ SessionQueueType& sessionQueue = mSessionQueues[uid];
+ auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
+ if (it == sessionQueue.end()) {
+ ALOGW("couldn't find session %s in queue for uid %d",
+ sessionToString(sessionKey).c_str(), uid);
+ continue;
+ }
+ sessionQueue.erase(it);
+
+ // If this is the last session in a real-time queue, remove this uid's queue.
+ if (uid != OFFLINE_UID && sessionQueue.empty()) {
+ mUidSortedList.remove(uid);
+ mSessionQueues.erase(uid);
+ mUidPolicy->unregisterMonitorUid(uid);
+
+ uidQueueRemoved = true;
+ }
}
- sessionQueue.erase(it);
- // If this is the last session in a real-time queue, remove this uid's queue.
- if (uid != OFFLINE_UID && sessionQueue.empty()) {
- mUidSortedList.remove(uid);
- mSessionQueues.erase(uid);
- mUidPolicy->unregisterMonitorUid(uid);
-
+ if (uidQueueRemoved) {
std::unordered_set<uid_t> topUids = mUidPolicy->getTopUids();
moveUidsToTop_l(topUids, false /*preserveTopUid*/);
}
+ if (keepUid != nullptr) {
+ mSessionMap[sessionKey].allClientUids = remainingUids;
+ return;
+ }
+
// Clear current session.
if (mCurrentSession == &mSessionMap[sessionKey]) {
mCurrentSession = nullptr;
@@ -521,9 +655,10 @@
setSessionState_l(&mSessionMap[sessionKey], finalState);
- if (finalState == Session::FINISHED || finalState == Session::ERROR) {
- mPacer->onSessionCompleted(mSessionMap[sessionKey].clientUid,
- mSessionMap[sessionKey].runningTime);
+ // We can use onSessionCompleted() even for CANCELLED, because runningTime is
+ // now updated by setSessionState_l().
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ mPacer->onSessionCompleted(uid, mSessionMap[sessionKey].runningTime);
}
mSessionHistory.push_back(mSessionMap[sessionKey]);
@@ -617,34 +752,13 @@
// Add session to session map.
mSessionMap[sessionKey].key = sessionKey;
- mSessionMap[sessionKey].clientUid = clientUid;
mSessionMap[sessionKey].callingUid = callingUid;
+ mSessionMap[sessionKey].allClientUids.insert(clientUid);
mSessionMap[sessionKey].request = request;
mSessionMap[sessionKey].callback = callback;
setSessionState_l(&mSessionMap[sessionKey], Session::NOT_STARTED);
- // If it's an offline session, the queue was already added in constructor.
- // If it's a real-time sessions, check if a queue is already present for the uid,
- // and add a new queue if needed.
- if (clientUid != OFFLINE_UID) {
- if (mSessionQueues.count(clientUid) == 0) {
- mUidPolicy->registerMonitorUid(clientUid);
- if (mUidPolicy->isUidOnTop(clientUid)) {
- mUidSortedList.push_front(clientUid);
- } else {
- // Shouldn't be submitting real-time requests from non-top app,
- // put it in front of the offline queue.
- mUidSortedList.insert(mOfflineUidIterator, clientUid);
- }
- } else if (clientUid != *mUidSortedList.begin()) {
- if (mUidPolicy->isUidOnTop(clientUid)) {
- mUidSortedList.remove(clientUid);
- mUidSortedList.push_front(clientUid);
- }
- }
- }
- // Append this session to the uid's queue.
- mSessionQueues[clientUid].push_back(sessionKey);
+ addUidToSession_l(clientUid, sessionKey);
updateCurrentSession_l();
@@ -657,14 +771,20 @@
ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
- std::list<SessionKeyType> sessionsToRemove;
+ std::list<SessionKeyType> sessionsToRemove, sessionsForOffline;
std::scoped_lock lock{mLock};
if (sessionId < 0) {
for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
- if (it->first.first == clientId && it->second.clientUid != OFFLINE_UID) {
- sessionsToRemove.push_back(it->first);
+ if (it->first.first == clientId) {
+ // If there is offline request, only keep the offline client;
+ // otherwise remove the session.
+ if (it->second.allClientUids.count(OFFLINE_UID) > 0) {
+ sessionsForOffline.push_back(it->first);
+ } else {
+ sessionsToRemove.push_back(it->first);
+ }
}
}
} else {
@@ -688,6 +808,12 @@
removeSession_l(*it, Session::CANCELED);
}
+ auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+ [](uid_t uid) { return uid == OFFLINE_UID; });
+ for (auto it = sessionsForOffline.begin(); it != sessionsForOffline.end(); ++it) {
+ removeSession_l(*it, Session::CANCELED, keepUid);
+ }
+
// Start next session.
updateCurrentSession_l();
@@ -695,6 +821,51 @@
return true;
}
+bool TranscodingSessionController::addClientUid(ClientIdType clientId, SessionIdType sessionId,
+ uid_t clientUid) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ if (mSessionMap[sessionKey].allClientUids.count(clientUid) > 0) {
+ ALOGE("session %s already has uid %d", sessionToString(sessionKey).c_str(), clientUid);
+ return false;
+ }
+
+ mSessionMap[sessionKey].allClientUids.insert(clientUid);
+ addUidToSession_l(clientUid, sessionKey);
+
+ updateCurrentSession_l();
+
+ validateState_l();
+ return true;
+}
+
+bool TranscodingSessionController::getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ out_clientUids->clear();
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ if (uid != OFFLINE_UID) {
+ out_clientUids->push_back(uid);
+ }
+ }
+ return true;
+}
+
bool TranscodingSessionController::getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
@@ -886,6 +1057,58 @@
validateState_l();
}
+void TranscodingSessionController::onUidGone(uid_t goneUid) {
+ ALOGD("%s: gone uid %u", __FUNCTION__, goneUid);
+
+ std::list<SessionKeyType> sessionsToRemove, sessionsForOtherUids;
+
+ std::scoped_lock lock{mLock};
+
+ for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
+ if (it->second.allClientUids.count(goneUid) > 0) {
+ // If goneUid is the only uid, remove the session; otherwise, only
+ // remove the uid from the session.
+ if (it->second.allClientUids.size() > 1) {
+ sessionsForOtherUids.push_back(it->first);
+ } else {
+ sessionsToRemove.push_back(it->first);
+ }
+ }
+ }
+
+ for (auto it = sessionsToRemove.begin(); it != sessionsToRemove.end(); ++it) {
+ // If the session has ever been started, stop it now.
+ // Note that stop() is needed even if the session is currently paused. This instructs
+ // the transcoder to discard any states for the session, otherwise the states may
+ // never be discarded.
+ if (mSessionMap[*it].getState() != Session::NOT_STARTED) {
+ mTranscoder->stop(it->first, it->second);
+ }
+
+ {
+ auto clientCallback = mSessionMap[*it].callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFailed(it->second,
+ TranscodingErrorCode::kUidGoneCancelled);
+ }
+ }
+
+ // Remove the session.
+ removeSession_l(*it, Session::CANCELED);
+ }
+
+ auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+ [goneUid](uid_t uid) { return uid != goneUid; });
+ for (auto it = sessionsForOtherUids.begin(); it != sessionsForOtherUids.end(); ++it) {
+ removeSession_l(*it, Session::CANCELED, keepUid);
+ }
+
+ // Start next session.
+ updateCurrentSession_l();
+
+ validateState_l();
+}
+
void TranscodingSessionController::onResourceAvailable() {
std::scoped_lock lock{mLock};
@@ -938,7 +1161,8 @@
LOG_ALWAYS_FATAL_IF(*mOfflineUidIterator != OFFLINE_UID,
"mOfflineUidIterator not pointing to offline uid");
LOG_ALWAYS_FATAL_IF(mUidSortedList.size() != mSessionQueues.size(),
- "mUidList and mSessionQueues size mismatch");
+ "mUidSortedList and mSessionQueues size mismatch, %zu vs %zu",
+ mUidSortedList.size(), mSessionQueues.size());
int32_t totalSessions = 0;
for (auto uid : mUidSortedList) {
@@ -952,8 +1176,14 @@
totalSessions += mSessionQueues[uid].size();
}
- LOG_ALWAYS_FATAL_IF(mSessionMap.size() != totalSessions,
- "mSessions size doesn't match total sessions counted from uid queues");
+ int32_t totalSessionsAlternative = 0;
+ for (auto const& s : mSessionMap) {
+ totalSessionsAlternative += s.second.allClientUids.size();
+ }
+ LOG_ALWAYS_FATAL_IF(totalSessions != totalSessionsAlternative,
+ "session count (including dup) from mSessionQueues doesn't match that from "
+ "mSessionMap, %d vs %d",
+ totalSessions, totalSessionsAlternative);
#endif // VALIDATE_STATE
}
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
index b5eb028..0a1ffbc 100644
--- a/media/libmediatranscoding/TranscodingUidPolicy.cpp
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -141,38 +141,34 @@
}
void TranscodingUidPolicy::onUidStateChanged(uid_t uid, int32_t procState) {
- ALOGV("onUidStateChanged: %u, procState %d", uid, procState);
+ ALOGV("onUidStateChanged: uid %u, procState %d", uid, procState);
bool topUidSetChanged = false;
+ bool isUidGone = false;
std::unordered_set<uid_t> topUids;
{
Mutex::Autolock _l(mUidLock);
auto it = mUidStateMap.find(uid);
if (it != mUidStateMap.end() && it->second != procState) {
- // Top set changed if 1) the uid is in the current top uid set, or 2) the
- // new procState is at least the same priority as the current top uid state.
- bool isUidCurrentTop =
- mTopUidState != IMPORTANCE_UNKNOWN && mStateUidMap[mTopUidState].count(uid) > 0;
- bool isNewStateHigherThanTop =
- procState != IMPORTANCE_UNKNOWN &&
- (procState <= mTopUidState || mTopUidState == IMPORTANCE_UNKNOWN);
- topUidSetChanged = (isUidCurrentTop || isNewStateHigherThanTop);
+ isUidGone = (procState == AACTIVITYMANAGER_IMPORTANCE_GONE);
+
+ topUids = mStateUidMap[mTopUidState];
// Move uid to the new procState.
mStateUidMap[it->second].erase(uid);
mStateUidMap[procState].insert(uid);
it->second = procState;
- if (topUidSetChanged) {
- updateTopUid_l();
-
+ updateTopUid_l();
+ if (topUids != mStateUidMap[mTopUidState]) {
// Make a copy of the uid set for callback.
topUids = mStateUidMap[mTopUidState];
+ topUidSetChanged = true;
}
}
}
- ALOGV("topUidSetChanged: %d", topUidSetChanged);
+ ALOGV("topUidSetChanged: %d, isUidGone %d", topUidSetChanged, isUidGone);
if (topUidSetChanged) {
auto callback = mUidPolicyCallback.lock();
@@ -180,6 +176,12 @@
callback->onTopUidsChanged(topUids);
}
}
+ if (isUidGone) {
+ auto callback = mUidPolicyCallback.lock();
+ if (callback != nullptr) {
+ callback->onUidGone(uid);
+ }
+ }
}
void TranscodingUidPolicy::updateTopUid_l() {
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
index 151e3d0..9ef9052 100644
--- a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
+++ b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
@@ -55,6 +55,32 @@
boolean getSessionWithId(in int sessionId, out TranscodingSessionParcel session);
/**
+ * Add an additional client uid requesting a session.
+ *
+ * @sessionId the session id to which to add the additional client uid.
+ * @clientUid the additional client uid to be added.
+ * @return false if the session doesn't exist or the client is already requesting the
+ * session, true otherwise.
+ */
+ boolean addClientUid(in int sessionId, int clientUid);
+
+ /**
+ * Retrieves the (unsorted) list of all clients requesting a session.
+ *
+ * Note that if a session was submitted with offline priority (
+ * TranscodingSessionPriority::kUnspecified), it initially will not be considered requested
+ * by any particular client, because the client could go away any time after the submission.
+ * However, additional uids could be added via addClientUid() after the submission, which
+ * essentially make the request a real-time request instead of an offline request.
+ *
+ * @sessionId the session id for which to retrieve the client uid list.
+ * @clientUids array to hold the retrieved client uid list.
+ * @return false if the session doesn't exist, true otherwise.
+ */
+ @nullable
+ int[] getClientUids(in int sessionId);
+
+ /**
* Unregister the client with the MediaTranscodingService.
*
* Client will not be able to perform any more transcoding after unregister.
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
index 5349fe1..fdd86c7 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
@@ -38,4 +38,5 @@
kErrorIO = kPrivateErrorFirst + 5,
kInsufficientResources = kPrivateErrorFirst + 6,
kWatchdogTimeout = kPrivateErrorFirst + 7,
+ kUidGoneCancelled = kPrivateErrorFirst + 8,
}
\ No newline at end of file
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
index 0d13607..9311e2e 100644
--- a/media/libmediatranscoding/include/media/ControllerClientInterface.h
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -60,6 +60,29 @@
virtual bool getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) = 0;
+ /**
+ * Add an additional client uid requesting the session identified by <clientId, sessionId>.
+ *
+ * Returns false if the session doesn't exist, or the client is already requesting the
+ * session. Returns true otherwise.
+ */
+ virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid);
+
+ /**
+ * Retrieves the (unsorted) list of all clients requesting the session identified by
+ * <clientId, sessionId>.
+ *
+ * Note that if a session was submitted with offline priority (
+ * TranscodingSessionPriority::kUnspecified), it initially will not be considered requested
+ * by any particular client, because the client could go away any time after the submission.
+ * However, additional uids could be added via addClientUid() after the submission, which
+ * essentially make the request a real-time request instead of an offline request.
+ *
+ * Returns false if the session doesn't exist. Returns true otherwise.
+ */
+ virtual bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids);
+
protected:
virtual ~ControllerClientInterface() = default;
};
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index b2d6f0a..2691201 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -54,6 +54,9 @@
bool cancel(ClientIdType clientId, SessionIdType sessionId) override;
bool getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) override;
+ bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) override;
+ bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) override;
// ~ControllerClientInterface
// TranscoderCallbackInterface
@@ -70,6 +73,7 @@
// UidPolicyCallbackInterface
void onTopUidsChanged(const std::unordered_set<uid_t>& uids) override;
+ void onUidGone(uid_t goneUid) override;
// ~UidPolicyCallbackInterface
// ResourcePolicyCallbackInterface
@@ -120,8 +124,8 @@
DROPPED_BY_PACER,
};
SessionKeyType key;
- uid_t clientUid;
uid_t callingUid;
+ std::unordered_set<uid_t> allClientUids;
int32_t lastProgress = 0;
int32_t pauseCount = 0;
std::chrono::time_point<std::chrono::steady_clock> stateEnterTime;
@@ -184,7 +188,9 @@
void dumpSession_l(const Session& session, String8& result, bool closedSession = false);
Session* getTopSession_l();
void updateCurrentSession_l();
- void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState);
+ void addUidToSession_l(uid_t uid, const SessionKeyType& sessionKey);
+ void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState,
+ const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid = nullptr);
void moveUidsToTop_l(const std::unordered_set<uid_t>& uids, bool preserveTopUid);
void setSessionState_l(Session* session, Session::State state);
void notifyClient(ClientIdType clientId, SessionIdType sessionId, const char* reason,
diff --git a/media/libmediatranscoding/include/media/UidPolicyInterface.h b/media/libmediatranscoding/include/media/UidPolicyInterface.h
index 05d8db0..445a2ff 100644
--- a/media/libmediatranscoding/include/media/UidPolicyInterface.h
+++ b/media/libmediatranscoding/include/media/UidPolicyInterface.h
@@ -48,6 +48,9 @@
// has changed. The receiver of this callback should adjust accordingly.
virtual void onTopUidsChanged(const std::unordered_set<uid_t>& uids) = 0;
+ // Called when a uid is gone.
+ virtual void onUidGone(uid_t goneUid) = 0;
+
protected:
virtual ~UidPolicyCallbackInterface() = default;
};
diff --git a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
index 57a2e27..9233410 100644
--- a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
@@ -50,6 +50,7 @@
constexpr const char* kClientName = "TestClientName";
constexpr const char* kClientPackage = "TestClientPackage";
+constexpr uid_t OFFLINE_UID = -1;
#define SESSION(n) (n)
@@ -135,8 +136,8 @@
virtual ~TestController() { ALOGI("TestController Destroyed"); }
- bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t /*callingUid*/, uid_t /*uid*/,
- const TranscodingRequestParcel& request,
+ bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t /*callingUid*/,
+ uid_t clientUid, const TranscodingRequestParcel& request,
const std::weak_ptr<ITranscodingClientCallback>& clientCallback) override {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
if (mSessions.count(sessionKey) > 0) {
@@ -149,13 +150,47 @@
return false;
}
+ if (request.priority == TranscodingSessionPriority::kUnspecified) {
+ clientUid = OFFLINE_UID;
+ }
+
mSessions[sessionKey].request = request;
mSessions[sessionKey].callback = clientCallback;
+ mSessions[sessionKey].allClientUids.insert(clientUid);
mLastSession = sessionKey;
return true;
}
+ bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+ if (mSessions[sessionKey].allClientUids.count(clientUid) > 0) {
+ return false;
+ }
+ mSessions[sessionKey].allClientUids.insert(clientUid);
+ return true;
+ }
+
+ bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+ out_clientUids->clear();
+ for (uid_t uid : mSessions[sessionKey].allClientUids) {
+ if (uid != OFFLINE_UID) {
+ out_clientUids->push_back(uid);
+ }
+ }
+ return true;
+ }
+
bool cancel(ClientIdType clientId, SessionIdType sessionId) override {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
@@ -211,6 +246,7 @@
struct Session {
TranscodingRequest request;
std::weak_ptr<ITranscodingClientCallback> callback;
+ std::unordered_set<uid_t> allClientUids;
};
typedef std::pair<ClientIdType, SessionIdType> SessionKeyType;
@@ -537,4 +573,93 @@
EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
}
+TEST_F(TranscodingClientManagerTest, TestAddGetClientUidsInvalidArgs) {
+ addMultipleClients();
+
+ bool result;
+ std::optional<std::vector<int32_t>> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Add/Get clients with invalid session id fails.
+ EXPECT_TRUE(mClient1->addClientUid(-1, ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+ EXPECT_TRUE(mClient1->getClientUids(-1, &clientUids).isOk());
+ EXPECT_EQ(clientUids, std::nullopt);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+ EXPECT_EQ(clientUids, std::nullopt);
+
+ unregisterMultipleClients();
+}
+
+TEST_F(TranscodingClientManagerTest, TestAddGetClientUids) {
+ addMultipleClients();
+
+ bool result;
+ std::optional<std::vector<int32_t>> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Submit one real-time session.
+ request.sourceFilePath = "test_source_file_0";
+ request.destinationFilePath = "test_desintaion_file_0";
+ request.priority = TranscodingSessionPriority::kNormal;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+
+ // Should have own uid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 1);
+ EXPECT_EQ((*clientUids)[0], ownUid);
+
+ // Adding invalid client uid should fail.
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), kInvalidClientUid, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Adding own uid again should fail.
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Submit one offline session.
+ request.sourceFilePath = "test_source_file_1";
+ request.destinationFilePath = "test_desintaion_file_1";
+ request.priority = TranscodingSessionPriority::kUnspecified;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+
+ // Should not have own uid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 0);
+
+ // Add own uid (with IMediaTranscodingService::USE_CALLING_UID) again, should succeed.
+ EXPECT_TRUE(
+ mClient1->addClientUid(SESSION(1), IMediaTranscodingService::USE_CALLING_UID, &result)
+ .isOk());
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 1);
+ EXPECT_EQ((*clientUids)[0], ownUid);
+
+ // Add more uids, should succeed.
+ int32_t kFakeUid = ::getuid() ^ 0x1;
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(1), kFakeUid, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids->begin(), clientUids->end());
+ EXPECT_EQ(uidSet.size(), 2);
+ EXPECT_EQ(uidSet.count(ownUid), 1);
+ EXPECT_EQ(uidSet.count(kFakeUid), 1);
+
+ unregisterMultipleClients();
+}
+
} // namespace android
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index 560d1fe..ef9c4f8 100644
--- a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -234,11 +234,14 @@
}
struct TestClientCallback : public BnTranscodingClientCallback {
- TestClientCallback(TestTranscoder* owner, int64_t clientId)
- : mOwner(owner), mClientId(clientId) {
+ TestClientCallback(TestTranscoder* owner, ClientIdType clientId, uid_t clientUid)
+ : mOwner(owner), mClientId(clientId), mClientUid(clientUid) {
ALOGD("TestClient Created");
}
+ ClientIdType clientId() const { return mClientId; }
+ uid_t clientUid() const { return mClientUid; }
+
Status openFileDescriptor(const std::string& /*in_fileUri*/, const std::string& /*in_mode*/,
::ndk::ScopedFileDescriptor* /*_aidl_return*/) override {
return Status::ok();
@@ -277,7 +280,8 @@
private:
TestTranscoder* mOwner;
- int64_t mClientId;
+ ClientIdType mClientId;
+ uid_t mClientUid;
TestClientCallback(const TestClientCallback&) = delete;
TestClientCallback& operator=(const TestClientCallback&) = delete;
};
@@ -313,14 +317,14 @@
// Set priority only, ignore other fields for now.
mOfflineRequest.priority = TranscodingSessionPriority::kUnspecified;
mRealtimeRequest.priority = TranscodingSessionPriority::kHigh;
- mClientCallback0 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(0));
- mClientCallback1 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(1));
- mClientCallback2 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(2));
- mClientCallback3 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(3));
+ mClientCallback0 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(0), UID(0));
+ mClientCallback1 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(1), UID(1));
+ mClientCallback2 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(2), UID(2));
+ mClientCallback3 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(3), UID(3));
}
void TearDown() override { ALOGI("TranscodingSessionControllerTest tear down"); }
@@ -335,36 +339,71 @@
EXPECT_EQ(mTranscoder.use_count(), 2);
}
+ void testPacerHelper(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ false /*pauseLastSuccessSession*/, true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithPause(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ true /*pauseLastSuccessSession*/, true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithMultipleUids(int numSubmits, int sessionDurationMs, int expectedSuccess,
+ const std::shared_ptr<TestClientCallback>& client,
+ const std::vector<int>& additionalClientUids) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, client,
+ additionalClientUids, false /*pauseLastSuccessSession*/,
+ true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithSelfUid(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ false /*pauseLastSuccessSession*/, false /*useRealCallingUid*/);
+ }
+
void testPacerHelper(int numSubmits, int sessionDurationMs, int expectedSuccess,
- bool pauseLastSuccessSession = false) {
+ const std::shared_ptr<TestClientCallback>& client,
+ const std::vector<int>& additionalClientUids, bool pauseLastSuccessSession,
+ bool useRealCallingUid) {
+ uid_t callingUid = useRealCallingUid ? ::getuid() : client->clientUid();
for (int i = 0; i < numSubmits; i++) {
- mController->submit(CLIENT(0), SESSION(i), UID(0), UID(0),
- mRealtimeRequest, mClientCallback0);
+ mController->submit(client->clientId(), SESSION(i), callingUid, client->clientUid(),
+ mRealtimeRequest, client);
+ for (int additionalUid : additionalClientUids) {
+ mController->addClientUid(client->clientId(), SESSION(i), additionalUid);
+ }
}
for (int i = 0; i < expectedSuccess; i++) {
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Start(client->clientId(), SESSION(i)));
if ((i == expectedSuccess - 1) && pauseLastSuccessSession) {
// Insert a pause of 3 sec to the last success running session
mController->onThrottlingStarted();
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Pause(client->clientId(), SESSION(i)));
sleep(3);
mController->onThrottlingStopped();
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Resume(client->clientId(), SESSION(i)));
}
usleep(sessionDurationMs * 1000);
// Test half of Finish and half of Error, both should be counted as burst runs.
if (i & 1) {
- mController->onFinish(CLIENT(0), SESSION(i));
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(i)));
+ mController->onFinish(client->clientId(), SESSION(i));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Finished(client->clientId(), SESSION(i)));
} else {
- mController->onError(CLIENT(0), SESSION(i), TranscodingErrorCode::kUnknown);
+ mController->onError(client->clientId(), SESSION(i),
+ TranscodingErrorCode::kUnknown);
EXPECT_EQ(mTranscoder->popEvent(100000),
- TestTranscoder::Failed(CLIENT(0), SESSION(i)));
+ TestTranscoder::Failed(client->clientId(), SESSION(i)));
EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUnknown);
}
}
for (int i = expectedSuccess; i < numSubmits; i++) {
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Failed(client->clientId(), SESSION(i)));
EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kDroppedByService);
}
}
@@ -470,6 +509,83 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(3)));
}
+TEST_F(TranscodingSessionControllerTest, TestCancelSessionWithMultipleUids) {
+ ALOGD("TestCancelSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+
+ // Add UID(1) to the offline SESSION(2), SESSION(2) should start and SESSION(0) should pause.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+
+ // Add UID(1) to SESSION(1) as well.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Cancel SESSION(2), should be cancelled and SESSION(1) should start.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Cancel SESSION(1), should be cancelled and SESSION(0) should resume.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(1)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestCancelAllSessionsForClient) {
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ std::vector<int32_t> clientUids;
+ // Make some more uids blocked on the sessions.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 2);
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 2);
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+
+ // Cancel all sessions for CLIENT(0) with -1.
+ // Expect SESSION(0) and SESSION(1) to be gone.
+ // Expect SESSION(2) still there with empty client uid list (only kept for offline) and start.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), -1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+}
+
TEST_F(TranscodingSessionControllerTest, TestFinishSession) {
ALOGD("TestFinishSession");
@@ -527,6 +643,45 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
}
+TEST_F(TranscodingSessionControllerTest, TestFinishSessionWithMultipleUids) {
+ ALOGD("TestFinishSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Start with unspecified top uid.
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(2)));
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // SESSION(0) should pause, SESSION(1) should start.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Finish SESSION(1), SESSION(2) (next in line for UID(1)) should start.
+ mController->onFinish(CLIENT(0), SESSION(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+
+ // Finish SESSION(2), SESSION(0) should resume.
+ mController->onFinish(CLIENT(0), SESSION(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+}
+
TEST_F(TranscodingSessionControllerTest, TestFailSession) {
ALOGD("TestFailSession");
@@ -588,6 +743,49 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
}
+TEST_F(TranscodingSessionControllerTest, TestFailSessionWithMultipleUids) {
+ ALOGD("TestFailSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Start with unspecified top uid.
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // SESSION(0) should pause, SESSION(1) should start.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Add UID(1) and UID(2) to SESSION(2).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(2)));
+
+ // Fail SESSION(1), SESSION(2) (next in line for UID(1)) should start.
+ mController->onError(CLIENT(0), SESSION(1), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+
+ // Fail SESSION(2), SESSION(0) should resume.
+ mController->onError(CLIENT(0), SESSION(2), TranscodingErrorCode::kInvalidOperation);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kInvalidOperation);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+}
+
TEST_F(TranscodingSessionControllerTest, TestTopUidChanged) {
ALOGD("TestTopUidChanged");
@@ -630,8 +828,59 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
}
+TEST_F(TranscodingSessionControllerTest, TestTopUidChangedMultipleUids) {
+ ALOGD("TestTopUidChangedMultipleUids");
+
+ // Start with unspecified top UID.
+ // Submit real-time session to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit offline session to CLIENT(0), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), UID(0), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Add UID(1) to SESSION(0), SESSION(0) should continue to run
+ // (no pause&resume of the same session).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(0) back to top, SESSION(0) should continue to run
+ // (no pause&resume of the same session).
+ mUidPolicy->setTop(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(2) to top.
+ mUidPolicy->setTop(UID(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ // Add UID(2) to the offline session, it should be started.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(1), SESSION(0), UID(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+
+ // ADD UID(3) to SESSION(0).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(3)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ // Bring UID(3) to top, SESSION(0) should resume.
+ mUidPolicy->setTop(UID(3));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(1), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Now make UID(2) also blocked on CLIENT(0), SESSION(0).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(2)));
+
+ // Bring UID(2) back to top, CLIENT(0), SESSION(0) should continue to run (even if it's
+ // added to UID(2)'s queue later than CLIENT(1)'s SESSION(0)).
+ mUidPolicy->setTop(UID(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+}
+
TEST_F(TranscodingSessionControllerTest, TestTopUidSetChanged) {
- ALOGD("TestTopUidChanged_MultipleUids");
+ ALOGD("TestTopUidSetChanged");
// Start with unspecified top UID.
// Submit real-time session to CLIENT(0), session should start immediately.
@@ -684,6 +933,100 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
}
+TEST_F(TranscodingSessionControllerTest, TestUidGone) {
+ ALOGD("TestUidGone");
+
+ mUidPolicy->setTop(UID(0));
+ // Start with unspecified top UID.
+ // Submit real-time sessions to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session to CLIENT(1), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), UID(1), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(1), SESSION(0), UID(1)));
+
+ // Tell the controller that UID(0) is gone.
+ mUidPolicy->setTop(UID(1));
+ // CLIENT(0)'s SESSION(1) should start, SESSION(0) should be cancelled.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+ mController->onUidGone(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ std::vector<int32_t> clientUids;
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+ EXPECT_EQ(clientUids[0], UID(1));
+
+ // Tell the controller that UID(1) is gone too.
+ mController->onUidGone(UID(1));
+ // CLIENT(1)'s SESSION(0) should start, CLIENT(0)'s SESSION(1) should be cancelled.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+ // CLIENT(1) SESSION(0) should not have any client uids as it's only kept for offline.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(1), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+}
+
+TEST_F(TranscodingSessionControllerTest, TestAddGetClientUids) {
+ ALOGD("TestAddGetClientUids");
+
+ // Add/get client uids with non-existent session, should fail.
+ std::vector<int32_t> clientUids;
+ uid_t ownUid = ::getuid();
+ EXPECT_FALSE(mController->addClientUid(CLIENT(0), SESSION(0), ownUid));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+
+ // Submit a real-time request.
+ EXPECT_TRUE(mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest,
+ mClientCallback0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Should have own uid in client uids.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+ EXPECT_EQ(clientUids[0], UID(0));
+
+ // Add UID(0) again should fail.
+ EXPECT_FALSE(mController->addClientUid(CLIENT(0), SESSION(0), UID(0)));
+
+ // Add own uid should succeed.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), ownUid));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids.begin(), clientUids.end());
+ EXPECT_EQ(uidSet.size(), 2);
+ EXPECT_EQ(uidSet.count(UID(0)), 1);
+ EXPECT_EQ(uidSet.count(ownUid), 1);
+
+ // Submit an offline request.
+ EXPECT_TRUE(mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mOfflineRequest,
+ mClientCallback0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Should not have own uid in client uids.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+
+ // Move UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ // Add UID(1) to offline session, offline session should start and SESSION(0) should pause.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+}
+
/* Test resource lost without thermal throttling */
TEST_F(TranscodingSessionControllerTest, TestResourceLost) {
ALOGD("TestResourceLost");
@@ -969,8 +1312,36 @@
TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerWithPause) {
ALOGD("TestTranscoderPacerDuringPause");
- testPacerHelper(12 /*numSubmits*/, 400 /*sessionDurationMs*/, 10 /*expectedSuccess*/,
- true /*pauseLastSuccessSession*/);
+ testPacerHelperWithPause(12 /*numSubmits*/, 400 /*sessionDurationMs*/, 10 /*expectedSuccess*/);
+}
+
+/*
+ * Test the case where multiple client uids request the same session. Session should only
+ * be dropped when all clients are over quota.
+ */
+TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerMultipleUids) {
+ ALOGD("TestTranscoderPacerMultipleUids");
+ // First, run mClientCallback0 to the point of no quota.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 10 /*expectedSuccess*/, mClientCallback0, {});
+ // Make UID(0) block on Client1's sessions too, Client1's quota should not be affected.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 10 /*expectedSuccess*/, mClientCallback1, {UID(0)});
+ // Make UID(10) block on Client2's sessions. We expect to see 11 succeeds (instead of 10),
+ // because the addClientUid() is called after the submit, and first session is already
+ // started by the time UID(10) is added. UID(10) allowed us to run the 11th session,
+ // after that both UID(10) and UID(2) are out of quota.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 11 /*expectedSuccess*/, mClientCallback2, {UID(10)});
+}
+
+/*
+ * Use same uid for clientUid and callingUid, should not be limited by quota.
+ */
+TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerSelfUid) {
+ ALOGD("TestTranscoderPacerSelfUid");
+ testPacerHelperWithSelfUid(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 12 /*expectedSuccess*/);
}
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
index 10b2e80..e931cc1 100644
--- a/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
+++ b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
@@ -19,7 +19,10 @@
#include <android-base/logging.h>
#include <media/MediaSampleWriter.h>
+#include <media/NdkCommon.h>
#include <media/NdkMediaMuxer.h>
+#include <sys/prctl.h>
+#include <utils/AndroidThreads.h>
namespace android {
@@ -124,7 +127,15 @@
LOG(ERROR) << "Muxer needs to be initialized when adding tracks.";
return nullptr;
}
- ssize_t trackIndexOrError = mMuxer->addTrack(trackFormat.get());
+
+ AMediaFormat* trackFormatCopy = AMediaFormat_new();
+ AMediaFormat_copy(trackFormatCopy, trackFormat.get());
+ // Request muxer to use background priorities by default.
+ AMediaFormatUtils::SetDefaultFormatValueInt32(TBD_AMEDIACODEC_PARAMETER_KEY_BACKGROUND_MODE,
+ trackFormatCopy, 1 /* true */);
+
+ ssize_t trackIndexOrError = mMuxer->addTrack(trackFormatCopy);
+ AMediaFormat_delete(trackFormatCopy);
if (trackIndexOrError < 0) {
LOG(ERROR) << "Failed to add media track to muxer: " << trackIndexOrError;
return nullptr;
@@ -173,6 +184,9 @@
mState = STARTED;
std::thread([this] {
+ androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
+ prctl(PR_SET_NAME, (unsigned long)"SampleWriterTrd", 0, 0, 0);
+
bool wasStopped = false;
media_status_t status = writeSamples(&wasStopped);
if (auto callbacks = mCallbacks.lock()) {
diff --git a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
index 15f7427..f01a948 100644
--- a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
@@ -20,6 +20,7 @@
#include <android-base/logging.h>
#include <media/MediaTrackTranscoder.h>
#include <media/MediaTrackTranscoderCallback.h>
+#include <utils/AndroidThreads.h>
namespace android {
@@ -72,6 +73,7 @@
mState = STARTED;
std::thread([this] {
+ androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
bool stopped = false;
media_status_t status = runTranscodeLoop(&stopped);
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index 879241e..e20f7ab 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -25,6 +25,7 @@
#include <media/NdkCommon.h>
#include <media/PassthroughTrackTranscoder.h>
#include <media/VideoTrackTranscoder.h>
+#include <sys/prctl.h>
#include <unistd.h>
namespace android {
@@ -125,6 +126,8 @@
std::thread asyncNotificationThread{[this, self = shared_from_this(),
status = mTranscoderStatus,
stopped = mTranscoderStopped] {
+ prctl(PR_SET_NAME, (unsigned long)"TranscodCallbk", 0, 0, 0);
+
// If the transcoder was stopped that means a caller is waiting in stop or pause
// in which case we don't send a callback.
if (status != AMEDIA_OK) {
diff --git a/media/libmediatranscoding/transcoder/NdkCommon.cpp b/media/libmediatranscoding/transcoder/NdkCommon.cpp
index 2d85df7..e133cd6 100644
--- a/media/libmediatranscoding/transcoder/NdkCommon.cpp
+++ b/media/libmediatranscoding/transcoder/NdkCommon.cpp
@@ -42,6 +42,7 @@
/* TODO(lnilsson): Finalize value or adopt AMediaFormat key once available. */
const char* TBD_AMEDIACODEC_PARAMETER_KEY_COLOR_TRANSFER_REQUEST = "color-transfer-request";
+const char* TBD_AMEDIACODEC_PARAMETER_KEY_BACKGROUND_MODE = "android._background-mode";
namespace AMediaFormatUtils {
diff --git a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
index c55e244..3335d6c 100644
--- a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
@@ -19,6 +19,7 @@
#include <android-base/logging.h>
#include <media/PassthroughTrackTranscoder.h>
+#include <sys/prctl.h>
namespace android {
@@ -94,6 +95,8 @@
}
media_status_t PassthroughTrackTranscoder::runTranscodeLoop(bool* stopped) {
+ prctl(PR_SET_NAME, (unsigned long)"PassthruThread", 0, 0, 0);
+
MediaSampleInfo info;
std::shared_ptr<MediaSample> sample;
bool eosReached = false;
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index 4405180..b43efcb 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -21,7 +21,7 @@
#include <android-base/properties.h>
#include <media/NdkCommon.h>
#include <media/VideoTrackTranscoder.h>
-#include <utils/AndroidThreads.h>
+#include <sys/prctl.h>
using namespace AMediaFormatUtils;
@@ -220,16 +220,15 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- int32_t bitrate;
- if (!AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrate)) {
- status = mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &bitrate);
+ if (!AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, &mConfiguredBitrate)) {
+ status = mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &mConfiguredBitrate);
if (status != AMEDIA_OK) {
LOG(ERROR) << "Unable to estimate bitrate. Using default " << kDefaultBitrateMbps;
- bitrate = kDefaultBitrateMbps;
+ mConfiguredBitrate = kDefaultBitrateMbps;
}
- LOG(INFO) << "Configuring bitrate " << bitrate;
- AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, bitrate);
+ LOG(INFO) << "Configuring bitrate " << mConfiguredBitrate;
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, mConfiguredBitrate);
}
SetDefaultFormatValueFloat(AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, encoderFormat,
@@ -254,6 +253,10 @@
// MediaSampleWriter track format, and MediaSampleWriter will call AMediaMuxer_setOrientationHint.
AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_ROTATION, 0);
+ // Request encoder to use background priorities by default.
+ SetDefaultFormatValueInt32(TBD_AMEDIACODEC_PARAMETER_KEY_BACKGROUND_MODE, encoderFormat,
+ 1 /* true */);
+
mDestinationFormat = std::shared_ptr<AMediaFormat>(encoderFormat, &AMediaFormat_delete);
// Create and configure the encoder.
@@ -335,6 +338,7 @@
static const std::vector<EntryCopier> kEncoderEntriesToCopy{
ENTRY_COPIER2(AMEDIAFORMAT_KEY_OPERATING_RATE, Float, Int32),
ENTRY_COPIER(AMEDIAFORMAT_KEY_PRIORITY, Int32),
+ ENTRY_COPIER(TBD_AMEDIACODEC_PARAMETER_KEY_BACKGROUND_MODE, Int32),
};
CopyFormatEntries(mDestinationFormat.get(), decoderFormat.get(), kEncoderEntriesToCopy);
@@ -589,7 +593,7 @@
}
media_status_t VideoTrackTranscoder::runTranscodeLoop(bool* stopped) {
- androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_VIDEO);
+ prctl(PR_SET_NAME, (unsigned long)"VideTranscodTrd", 0, 0, 0);
// Push start decoder and encoder as two messages, so that these are subject to the
// stop request as well. If the session is cancelled (or paused) immediately after start,
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
index ac3b2c0..8f8ad4e 100644
--- a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
@@ -113,6 +113,16 @@
// Asset directory
static const std::string kAssetDirectory = "/data/local/tmp/TranscodingBenchmark/";
+ // Transcoding configuration params to be logged
+ int64_t trackDurationUs = 0;
+ int32_t width = 0;
+ int32_t height = 0;
+ std::string sourceMime = "NA";
+ std::string targetMime = "NA";
+ bool includeAudio = false;
+ bool transcodeVideo = false;
+ int32_t targetBitrate = 0;
+
int srcFd = 0;
int dstFd = 0;
@@ -163,10 +173,30 @@
state.counters[PARAM_VIDEO_FRAME_RATE] = benchmark::Counter(
frameCount, benchmark::Counter::kIsIterationInvariantRate);
}
+ if (!AMediaFormat_getInt32(srcFormat, AMEDIAFORMAT_KEY_WIDTH, &width)) {
+ state.SkipWithError("Video source track format does not have width");
+ goto exit;
+ }
+ if (!AMediaFormat_getInt32(srcFormat, AMEDIAFORMAT_KEY_HEIGHT, &height)) {
+ state.SkipWithError("Video source track format does not have height");
+ goto exit;
+ }
+ AMediaFormat_getInt64(srcFormat, AMEDIAFORMAT_KEY_DURATION, &trackDurationUs);
+ sourceMime = mime;
}
if (trackSelectionCallback(mime, &dstFormat)) {
status = transcoder->configureTrackFormat(i, dstFormat);
+ if (strncmp(mime, "video/", 6) == 0 && dstFormat != nullptr) {
+ const char* mime = nullptr;
+ if (AMediaFormat_getString(dstFormat, AMEDIAFORMAT_KEY_MIME, &mime)) {
+ targetMime = mime;
+ }
+ AMediaFormat_getInt32(dstFormat, AMEDIAFORMAT_KEY_BIT_RATE, &targetBitrate);
+ transcodeVideo = true;
+ } else if (strncmp(mime, "audio/", 6) == 0) {
+ includeAudio = true;
+ }
}
if (dstFormat != nullptr) {
@@ -195,6 +225,17 @@
}
}
+ // Set transcoding configuration params in benchmark label
+ state.SetLabel(srcFileName + "," +
+ std::to_string(width) + "x" + std::to_string(height) + "," +
+ sourceMime + "," +
+ std::to_string(trackDurationUs/1000) + "," +
+ (includeAudio ? "Yes" : "No") + "," +
+ (transcodeVideo ? "Yes" : "No") + "," +
+ targetMime + "," +
+ std::to_string(targetBitrate)
+ );
+
exit:
if (srcFd > 0) close(srcFd);
if (dstFd > 0) close(dstFd);
@@ -543,7 +584,11 @@
void PrintRunData(const Run& report);
bool mPrintedHeader;
- std::vector<std::string> mHeaders = {"name", "real_time", "cpu_time", PARAM_VIDEO_FRAME_RATE};
+ std::vector<std::string> mHeaders = {
+ "File", "Resolution", "SourceMime", "VideoTrackDuration(ms)",
+ "IncludeAudio", "TranscodeVideo", "TargetMime", "TargetBirate(bps)",
+ "real_time(ms)", "cpu_time(ms)", PARAM_VIDEO_FRAME_RATE
+ };
};
bool CustomCsvReporter::ReportContext(const Context& context __unused) {
@@ -574,7 +619,8 @@
return;
}
std::ostream& Out = GetOutputStream();
- Out << run.benchmark_name() << ",";
+ // Log the transcoding params reported through label
+ Out << run.report_label << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
auto frameRate = run.counters.find(PARAM_VIDEO_FRAME_RATE);
diff --git a/media/libmediatranscoding/transcoder/include/media/NdkCommon.h b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
index c5547c6..2441922 100644
--- a/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
+++ b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
@@ -59,6 +59,7 @@
extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_VIDEO_BITRATE;
extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_MAX_B_FRAMES;
extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_COLOR_TRANSFER_REQUEST;
+extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_BACKGROUND_MODE;
static constexpr int TBD_AMEDIACODEC_BUFFER_FLAG_KEY_FRAME = 0x1;
static constexpr int kBitrateModeConstant = 2;
diff --git a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
index 8a506a0..3e72882 100644
--- a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
@@ -45,6 +45,7 @@
private:
friend struct AsyncCodecCallbackDispatch;
+ friend class VideoTrackTranscoderTests;
// Minimal blocking queue used as a message queue by VideoTrackTranscoder.
template <typename T>
@@ -101,6 +102,7 @@
uid_t mUid;
uint64_t mInputFrameCount = 0;
uint64_t mOutputFrameCount = 0;
+ int32_t mConfiguredBitrate = 0;
};
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
index e40a507..c3a0ced 100644
--- a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
+++ b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
@@ -24,7 +24,7 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="{MODULE}" />
- <option name="native-test-timeout" value="10m" />
+ <option name="native-test-timeout" value="30m" />
</test>
</configuration>
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
index 8b3905c..0a8a06e 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
@@ -117,8 +117,9 @@
}
bool operator==(const TestMuxer::Event& lhs, const TestMuxer::Event& rhs) {
- return lhs.type == rhs.type && lhs.format == rhs.format && lhs.trackIndex == rhs.trackIndex &&
- lhs.data == rhs.data && lhs.info == rhs.info;
+ // Don't test format pointer equality since the writer can make a copy.
+ return lhs.type == rhs.type /*&& lhs.format == rhs.format*/ &&
+ lhs.trackIndex == rhs.trackIndex && lhs.data == rhs.data && lhs.info == rhs.info;
}
/** Represents a media source file. */
diff --git a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
index 1f9ec77..88c3fd3 100644
--- a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
@@ -86,6 +86,10 @@
~VideoTrackTranscoderTests() { LOG(DEBUG) << "VideoTrackTranscoderTests destroyed"; }
+ static int32_t getConfiguredBitrate(const std::shared_ptr<VideoTrackTranscoder>& transcoder) {
+ return transcoder->mConfiguredBitrate;
+ }
+
std::shared_ptr<MediaSampleReader> mMediaSampleReader;
int mTrackIndex;
std::shared_ptr<AMediaFormat> mSourceFormat;
@@ -140,7 +144,7 @@
TEST_F(VideoTrackTranscoderTests, PreserveBitrate) {
LOG(DEBUG) << "Testing PreserveBitrate";
auto callback = std::make_shared<TestTrackTranscoderCallback>();
- std::shared_ptr<MediaTrackTranscoder> transcoder = VideoTrackTranscoder::create(callback);
+ auto transcoder = VideoTrackTranscoder::create(callback);
auto destFormat = TrackTranscoderTestUtils::getDefaultVideoDestinationFormat(
mSourceFormat.get(), false /* includeBitrate*/);
@@ -155,15 +159,11 @@
ASSERT_TRUE(transcoder->start());
callback->waitUntilTrackFormatAvailable();
-
- auto outputFormat = transcoder->getOutputFormat();
- ASSERT_NE(outputFormat, nullptr);
-
transcoder->stop();
EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
- int32_t outBitrate;
- EXPECT_TRUE(AMediaFormat_getInt32(outputFormat.get(), AMEDIAFORMAT_KEY_BIT_RATE, &outBitrate));
+ int32_t outBitrate = getConfiguredBitrate(transcoder);
+ ASSERT_GT(outBitrate, 0);
EXPECT_EQ(srcBitrate, outBitrate);
}
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 1054b68..ca98b28 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -46,13 +46,11 @@
status_t result;
result = mStream->getBufferSize(&mStreamBufferSizeBytes);
if (result != OK) return result;
- audio_format_t streamFormat;
- uint32_t sampleRate;
- audio_channel_mask_t channelMask;
- result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ result = mStream->getAudioProperties(&config);
if (result != OK) return result;
- mFormat = Format_from_SR_C(sampleRate,
- audio_channel_count_from_in_mask(channelMask), streamFormat);
+ mFormat = Format_from_SR_C(config.sample_rate,
+ audio_channel_count_from_in_mask(config.channel_mask), config.format);
mFrameSize = Format_frameSize(mFormat);
}
return NBAIO_Source::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index 8564899..581867f 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -44,13 +44,11 @@
status_t result;
result = mStream->getBufferSize(&mStreamBufferSizeBytes);
if (result != OK) return result;
- audio_format_t streamFormat;
- uint32_t sampleRate;
- audio_channel_mask_t channelMask;
- result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ result = mStream->getAudioProperties(&config);
if (result != OK) return result;
- mFormat = Format_from_SR_C(sampleRate,
- audio_channel_count_from_out_mask(channelMask), streamFormat);
+ mFormat = Format_from_SR_C(config.sample_rate,
+ audio_channel_count_from_out_mask(config.channel_mask), config.format);
mFrameSize = Format_frameSize(mFormat);
}
return NBAIO_Sink::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 52434b3..d6e36b9 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -274,6 +274,7 @@
"MPEG2TSWriter.cpp",
"MPEG4Writer.cpp",
"MediaAdapter.cpp",
+ "MediaAppender.cpp",
"MediaClock.cpp",
"MediaCodec.cpp",
"MediaCodecList.cpp",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 01190b5..0fd4ef2 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -113,7 +113,7 @@
return NULL;
}
sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
- if (frameMem == NULL) {
+ if (frameMem == NULL || frameMem->unsecurePointer() == NULL) {
ALOGE("not enough memory for VideoFrame size=%zu", size);
return NULL;
}
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 76a5cab..7c7fcac 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -519,12 +519,13 @@
mSendNotify = false;
mWriteSeekErr = false;
mFallocateErr = false;
-
// Reset following variables for all the sessions and they will be
// initialized in start(MetaData *param).
mIsRealTimeRecording = true;
+ mIsBackgroundMode = false;
mUse4ByteNalLength = true;
mOffset = 0;
+ mMaxOffsetAppend = 0;
mPreAllocateFileEndOffset = 0;
mMdatOffset = 0;
mMdatEndOffset = 0;
@@ -662,6 +663,14 @@
sp<MetaData> meta = source->getFormat();
meta->findCString(kKeyMIMEType, &mime);
+
+ // Background mode for media transcoding. If either audio or video track signal this is in
+ // background mode, we will set all the threads to run in background priority.
+ int32_t isBackgroundMode;
+ if (meta && meta->findInt32(kKeyBackgroundMode, &isBackgroundMode)) {
+ mIsBackgroundMode |= isBackgroundMode;
+ }
+
if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
return ERROR_UNSUPPORTED;
@@ -992,6 +1001,19 @@
seekOrPostError(mFd, mFreeBoxOffset, SEEK_SET);
writeInt32(mInMemoryCacheSize);
write("free", 4);
+ if (mInMemoryCacheSize >= 8) {
+ off64_t bufSize = mInMemoryCacheSize - 8;
+ char* zeroBuffer = new (std::nothrow) char[bufSize];
+ if (zeroBuffer) {
+ std::fill_n(zeroBuffer, bufSize, '0');
+ writeOrPostError(mFd, zeroBuffer, bufSize);
+ delete [] zeroBuffer;
+ } else {
+ ALOGW("freebox in file isn't initialized to 0");
+ }
+ } else {
+ ALOGW("freebox size is less than 8:%" PRId64, mInMemoryCacheSize);
+ }
mMdatOffset = mFreeBoxOffset + mInMemoryCacheSize;
} else {
mMdatOffset = mOffset;
@@ -1541,6 +1563,26 @@
MediaBuffer *buffer, bool usePrefix,
uint32_t tiffHdrOffset, size_t *bytesWritten) {
off64_t old_offset = mOffset;
+ int64_t offset;
+ ALOGV("buffer->range_length:%lld", (long long)buffer->range_length());
+ if (buffer->meta_data().findInt64(kKeySampleFileOffset, &offset)) {
+ ALOGV("offset:%lld, old_offset:%lld", (long long)offset, (long long)old_offset);
+ if (old_offset == offset) {
+ mOffset += buffer->range_length();
+ } else {
+ ALOGV("offset and old_offset are not equal! diff:%lld", (long long)offset - old_offset);
+ mOffset = offset + buffer->range_length();
+ // mOffset += buffer->range_length() + offset - old_offset;
+ }
+ *bytesWritten = buffer->range_length();
+ ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld, bytesWritten:%lld", (long long)mOffset,
+ (long long)mMaxOffsetAppend, (long long)*bytesWritten);
+ mMaxOffsetAppend = std::max(mOffset, mMaxOffsetAppend);
+ seekOrPostError(mFd, mMaxOffsetAppend, SEEK_SET);
+ return offset;
+ }
+
+ ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset, (long long)mMaxOffsetAppend);
if (usePrefix) {
addMultipleLengthPrefixedSamples_l(buffer);
@@ -1557,6 +1599,10 @@
mOffset += buffer->range_length();
}
*bytesWritten = mOffset - old_offset;
+
+ ALOGV("mOffset:%lld, old_offset:%lld, bytesWritten:%lld", (long long)mOffset,
+ (long long)old_offset, (long long)*bytesWritten);
+
return old_offset;
}
@@ -1569,6 +1615,7 @@
(const uint8_t *)buffer->data() + buffer->range_offset();
if (!memcmp(ptr, "\x00\x00\x00\x01", 4)) {
+ ALOGV("stripping start code");
buffer->set_range(
buffer->range_offset() + 4, buffer->range_length() - 4);
}
@@ -1599,8 +1646,10 @@
}
void MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
+ ALOGV("alp:buffer->range_length:%lld", (long long)buffer->range_length());
size_t length = buffer->range_length();
if (mUse4ByteNalLength) {
+ ALOGV("mUse4ByteNalLength");
uint8_t x[4];
x[0] = length >> 24;
x[1] = (length >> 16) & 0xff;
@@ -1610,6 +1659,7 @@
writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(), length);
mOffset += length + 4;
} else {
+ ALOGV("mUse2ByteNalLength");
CHECK_LT(length, 65536u);
uint8_t x[2];
@@ -2268,7 +2318,11 @@
if (mLooper == nullptr) {
mLooper = new ALooper;
mLooper->setName("MP4WtrCtrlHlpLooper");
- err = mLooper->start();
+ if (mIsBackgroundMode) {
+ err = mLooper->start(false, false, ANDROID_PRIORITY_BACKGROUND);
+ } else {
+ err = mLooper->start();
+ }
mReflector = new AHandlerReflector<MPEG4Writer>(this);
mLooper->registerHandler(mReflector);
}
@@ -2737,6 +2791,11 @@
prctl(PR_SET_NAME, (unsigned long)"MPEG4Writer", 0, 0, 0);
+ if (mIsBackgroundMode) {
+ // Background priority for media transcoding.
+ androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
+ }
+
Mutex::Autolock autoLock(mLock);
while (!mDone) {
Chunk chunk;
@@ -2762,6 +2821,9 @@
}
writeAllChunks();
+ ALOGV("threadFunc mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset,
+ (long long)mMaxOffsetAppend);
+ mOffset = std::max(mOffset, mMaxOffsetAppend);
}
status_t MPEG4Writer::startWriterThread() {
@@ -3323,6 +3385,7 @@
uint32_t lastSamplesPerChunk = 0;
int64_t lastSampleDurationUs = -1; // Duration calculated from EOS buffer and its timestamp
int64_t lastSampleDurationTicks = -1; // Timescale based ticks
+ int64_t sampleFileOffset = -1;
if (mIsAudio) {
prctl(PR_SET_NAME, (unsigned long)"MP4WtrAudTrkThread", 0, 0, 0);
@@ -3334,6 +3397,9 @@
if (mOwner->isRealTimeRecording()) {
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+ } else if (mOwner->isBackgroundMode()) {
+ // Background priority for media transcoding.
+ androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_BACKGROUND);
}
sp<MetaData> meta_data;
@@ -3342,6 +3408,7 @@
MediaBufferBase *buffer;
const char *trackName = getTrackType();
while (!mDone && (err = mSource->read(&buffer)) == OK) {
+ ALOGV("read:buffer->range_length:%lld", (long long)buffer->range_length());
int32_t isEOS = false;
if (buffer->range_length() == 0) {
if (buffer->meta_data().findInt32(kKeyIsEndOfStream, &isEOS) && isEOS) {
@@ -3448,6 +3515,14 @@
continue;
}
}
+ if (!buffer->meta_data().findInt64(kKeySampleFileOffset, &sampleFileOffset)) {
+ sampleFileOffset = -1;
+ }
+ int64_t lastSample = -1;
+ if (!buffer->meta_data().findInt64(kKeyLastSampleIndexInChunk, &lastSample)) {
+ lastSample = -1;
+ }
+ ALOGV("sampleFileOffset:%lld", (long long)sampleFileOffset);
/*
* Reserve space in the file for the current sample + to be written MOOV box. If reservation
@@ -3455,7 +3530,7 @@
* write MOOV box successfully as space for the same was reserved in the prior call.
* Release the current buffer/sample here.
*/
- if (!mOwner->preAllocate(buffer->range_length())) {
+ if (sampleFileOffset == -1 && !mOwner->preAllocate(buffer->range_length())) {
buffer->release();
buffer = nullptr;
break;
@@ -3466,9 +3541,14 @@
// Make a deep copy of the MediaBuffer and Metadata and release
// the original as soon as we can
MediaBuffer *copy = new MediaBuffer(buffer->range_length());
- memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
+ if (sampleFileOffset != -1) {
+ copy->meta_data().setInt64(kKeySampleFileOffset, sampleFileOffset);
+ } else {
+ memcpy(copy->data(), (uint8_t*)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+ }
copy->set_range(0, buffer->range_length());
+
meta_data = new MetaData(buffer->meta_data());
buffer->release();
buffer = NULL;
@@ -3476,14 +3556,16 @@
copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
}
bool usePrefix = this->usePrefix() && !isExif;
-
- if (usePrefix) StripStartcode(copy);
-
+ if (sampleFileOffset == -1 && usePrefix) {
+ StripStartcode(copy);
+ }
size_t sampleSize = copy->range_length();
- if (usePrefix) {
+ if (sampleFileOffset == -1 && usePrefix) {
if (mOwner->useNalLengthFour()) {
+ ALOGV("nallength4");
sampleSize += 4;
} else {
+ ALOGV("nallength2");
sampleSize += 2;
}
}
@@ -3778,7 +3860,8 @@
chunkTimestampUs = timestampUs;
} else {
int64_t chunkDurationUs = timestampUs - chunkTimestampUs;
- if (chunkDurationUs > interleaveDurationUs) {
+ if (chunkDurationUs > interleaveDurationUs || lastSample > 1) {
+ ALOGV("lastSample:%lld", (long long)lastSample);
if (chunkDurationUs > mMaxChunkDurationUs) {
mMaxChunkDurationUs = chunkDurationUs;
}
@@ -4014,6 +4097,10 @@
return mIsRealTimeRecording;
}
+bool MPEG4Writer::isBackgroundMode() const {
+ return mIsBackgroundMode;
+}
+
bool MPEG4Writer::useNalLengthFour() {
return mUse4ByteNalLength;
}
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
new file mode 100644
index 0000000..5d80b30
--- /dev/null
+++ b/media/libstagefright/MediaAppender.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaAppender"
+
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Log.h>
+// TODO : check if this works for NDK apps without JVM
+// #include <media/ndk/NdkJavaVMHelperPriv.h>
+
+namespace android {
+
+struct MediaAppender::sampleDataInfo {
+ size_t size;
+ int64_t time;
+ size_t exTrackIndex;
+ sp<MetaData> meta;
+};
+
+sp<MediaAppender> MediaAppender::create(int fd, AppendMode mode) {
+ if (fd < 0) {
+ ALOGE("invalid file descriptor");
+ return nullptr;
+ }
+ if (!(mode >= APPEND_MODE_FIRST && mode <= APPEND_MODE_LAST)) {
+ ALOGE("invalid mode %d", mode);
+ return nullptr;
+ }
+ sp<MediaAppender> ma = new (std::nothrow) MediaAppender(fd, mode);
+ if (ma->init() != OK) {
+ return nullptr;
+ }
+ return ma;
+}
+
+// TODO: inject mediamuxer and mediaextractor objects.
+// TODO: @format is not required as an input if we can sniff the file and find the format of
+// the existing content.
+// TODO: Code it to the interface(MediaAppender), and have a separate MediaAppender NDK
+MediaAppender::MediaAppender(int fd, AppendMode mode)
+ : mFd(fd),
+ mMode(mode),
+ // TODO : check if this works for NDK apps without JVM
+ // mExtractor(new NuMediaExtractor(NdkJavaVMHelper::getJNIEnv() != nullptr
+ // ? NuMediaExtractor::EntryPoint::NDK_WITH_JVM
+ // : NuMediaExtractor::EntryPoint::NDK_NO_JVM)),
+ mExtractor(new (std::nothrow) NuMediaExtractor(NuMediaExtractor::EntryPoint::NDK_WITH_JVM)),
+ mTrackCount(0),
+ mState(UNINITIALIZED) {
+ ALOGV("MediaAppender::MediaAppender mode:%d", mode);
+ }
+
+status_t MediaAppender::init() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::init");
+ status_t status = mExtractor->setDataSource(mFd, 0, lseek(mFd, 0, SEEK_END));
+ if (status != OK) {
+ ALOGE("extractor_setDataSource failed, status :%d", status);
+ return status;
+ }
+
+ if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+ mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
+ } else {
+ ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+ return ERROR_UNSUPPORTED;
+ }
+
+ mTrackCount = mExtractor->countTracks();
+ ALOGV("mTrackCount:%zu", mTrackCount);
+ if (mTrackCount == 0) {
+ ALOGE("no tracks are present");
+ return ERROR_MALFORMED;
+ }
+ size_t exTrackIndex = 0;
+ ssize_t audioTrackIndex = -1, videoTrackIndex = -1;
+ bool audioSyncSampleTimeSet = false;
+
+ while (exTrackIndex < mTrackCount) {
+ sp<AMessage> fmt;
+ status = mExtractor->getTrackFormat(exTrackIndex, &fmt, 0);
+ if (status != OK) {
+ ALOGE("getTrackFormat failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+ return status;
+ }
+ AString mime;
+ if (fmt->findString("mime", &mime)) {
+ if (!strncasecmp(mime.c_str(), "video/", 6)) {
+ ALOGV("VideoTrack");
+ if (videoTrackIndex != -1) {
+ ALOGE("Not more than one video track is supported");
+ return ERROR_UNSUPPORTED;
+ }
+ videoTrackIndex = exTrackIndex;
+ } else if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+ ALOGV("AudioTrack");
+ if (audioTrackIndex != -1) {
+ ALOGE("Not more than one audio track is supported");
+ }
+ audioTrackIndex = exTrackIndex;
+ } else {
+ ALOGV("Neither Video nor Audio track");
+ }
+ }
+ mFmtIndexMap.emplace(exTrackIndex, fmt);
+ mSampleCountVect.emplace_back(0);
+ mMaxTimestampVect.emplace_back(0);
+ mLastSyncSampleTimeVect.emplace_back(0);
+ status = mExtractor->selectTrack(exTrackIndex);
+ if (status != OK) {
+ ALOGE("selectTrack failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+ return status;
+ }
+ ++exTrackIndex;
+ }
+
+ ALOGV("AudioTrackIndex:%zu, VideoTrackIndex:%zu", audioTrackIndex, videoTrackIndex);
+
+ do {
+ sampleDataInfo tmpSDI;
+ // TODO: read info into members of the struct sampleDataInfo directly
+ size_t sampleSize;
+ status = mExtractor->getSampleSize(&sampleSize);
+ if (status != OK) {
+ ALOGE("getSampleSize failed, status:%d", status);
+ return status;
+ }
+ mSampleSizeVect.emplace_back(sampleSize);
+ tmpSDI.size = sampleSize;
+ int64_t sampleTime = 0;
+ status = mExtractor->getSampleTime(&sampleTime);
+ if (status != OK) {
+ ALOGE("getSampleTime failed, status:%d", status);
+ return status;
+ }
+ mSampleTimeVect.emplace_back(sampleTime);
+ tmpSDI.time = sampleTime;
+ status = mExtractor->getSampleTrackIndex(&exTrackIndex);
+ if (status != OK) {
+ ALOGE("getSampleTrackIndex failed, status:%d", status);
+ return status;
+ }
+ mSampleIndexVect.emplace_back(exTrackIndex);
+ tmpSDI.exTrackIndex = exTrackIndex;
+ ++mSampleCountVect[exTrackIndex];
+ mMaxTimestampVect[exTrackIndex] = std::max(mMaxTimestampVect[exTrackIndex], sampleTime);
+ sp<MetaData> sampleMeta;
+ status = mExtractor->getSampleMeta(&sampleMeta);
+ if (status != OK) {
+ ALOGE("getSampleMeta failed, status:%d", status);
+ return status;
+ }
+ mSampleMetaVect.emplace_back(sampleMeta);
+ int32_t val = 0;
+ if (sampleMeta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ mLastSyncSampleTimeVect[exTrackIndex] = sampleTime;
+ }
+ tmpSDI.meta = sampleMeta;
+ mSDI.emplace_back(tmpSDI);
+ } while (mExtractor->advance() == OK);
+
+ mExtractor.clear();
+
+ std::sort(mSDI.begin(), mSDI.end(), [](sampleDataInfo& a, sampleDataInfo& b) {
+ int64_t aOffset, bOffset;
+ a.meta->findInt64(kKeySampleFileOffset, &aOffset);
+ b.meta->findInt64(kKeySampleFileOffset, &bOffset);
+ return aOffset < bOffset;
+ });
+ for (int64_t syncSampleTime : mLastSyncSampleTimeVect) {
+ ALOGV("before ignoring frames, mLastSyncSampleTimeVect:%lld", (long long)syncSampleTime);
+ }
+ ALOGV("mMode:%u", mMode);
+ if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex != -1 ) {
+ ALOGV("Video track is present");
+ bool lastVideoIframe = false;
+ size_t lastVideoIframeOffset = 0;
+ int64_t lastVideoSampleTime = -1;
+ for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend(); ++rItr) {
+ if (rItr->exTrackIndex != videoTrackIndex) {
+ continue;
+ }
+ if (lastVideoSampleTime == -1) {
+ lastVideoSampleTime = rItr->time;
+ }
+ int64_t offset = 0;
+ if (!rItr->meta->findInt64(kKeySampleFileOffset, &offset) || offset == 0) {
+ ALOGE("Missing offset");
+ return ERROR_MALFORMED;
+ }
+ ALOGV("offset:%lld", (long long)offset);
+ int32_t val = 0;
+ if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ ALOGV("sampleTime:%lld", (long long)rItr->time);
+ ALOGV("lastVideoSampleTime:%lld", (long long)lastVideoSampleTime);
+ if (lastVideoIframe == false && (lastVideoSampleTime - rItr->time) >
+ 1000000/* Track interleaving duration in MPEG4Writer*/) {
+ ALOGV("lastVideoIframe got chosen");
+ lastVideoIframe = true;
+ mLastSyncSampleTimeVect[videoTrackIndex] = rItr->time;
+ lastVideoIframeOffset = offset;
+ ALOGV("lastVideoIframeOffset:%lld", (long long)offset);
+ break;
+ }
+ }
+ }
+ if (lastVideoIframe == false) {
+ ALOGV("Need to rewrite all samples");
+ mLastSyncSampleTimeVect[videoTrackIndex] = 0;
+ lastVideoIframeOffset = 0;
+ }
+ unsigned int framesIgnoredCount = 0;
+ for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+ int64_t offset = 0;
+ ALOGV("trackIndex:%zu, %" PRId64 "", itr->exTrackIndex, itr->time);
+ if (itr->meta->findInt64(kKeySampleFileOffset, &offset) &&
+ offset >= lastVideoIframeOffset) {
+ ALOGV("offset:%lld", (long long)offset);
+ if (!audioSyncSampleTimeSet && audioTrackIndex != -1 &&
+ audioTrackIndex == itr->exTrackIndex) {
+ mLastSyncSampleTimeVect[audioTrackIndex] = itr->time;
+ audioSyncSampleTimeSet = true;
+ }
+ itr = mSDI.erase(itr);
+ ++framesIgnoredCount;
+ } else {
+ ++itr;
+ }
+ }
+ ALOGV("framesIgnoredCount:%u", framesIgnoredCount);
+ }
+
+ if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex == -1 &&
+ audioTrackIndex != -1) {
+ ALOGV("Only AudioTrack is present");
+ for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend(); ++rItr) {
+ int32_t val = 0;
+ if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ mLastSyncSampleTimeVect[audioTrackIndex] = rItr->time;
+ break;
+ }
+ }
+ unsigned int framesIgnoredCount = 0;
+ for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+ if (itr->time >= mLastSyncSampleTimeVect[audioTrackIndex]) {
+ itr = mSDI.erase(itr);
+ ++framesIgnoredCount;
+ } else {
+ ++itr;
+ }
+ }
+ ALOGV("framesIgnoredCount :%u", framesIgnoredCount);
+ }
+
+ for (size_t i = 0; i < mLastSyncSampleTimeVect.size(); ++i) {
+ ALOGV("mLastSyncSampleTimeVect[%zu]:%lld", i, (long long)mLastSyncSampleTimeVect[i]);
+ mFmtIndexMap[i]->setInt64(
+ "sample-time-before-append" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+ mLastSyncSampleTimeVect[i]);
+ }
+ for (size_t i = 0; i < mMaxTimestampVect.size(); ++i) {
+ ALOGV("mMaxTimestamp[%zu]:%lld", i, (long long)mMaxTimestampVect[i]);
+ }
+ for (size_t i = 0; i < mSampleCountVect.size(); ++i) {
+ ALOGV("SampleCountVect[%zu]:%zu", i, mSampleCountVect[i]);
+ }
+ mState = INITIALIZED;
+ return OK;
+}
+
+MediaAppender::~MediaAppender() {
+ ALOGV("MediaAppender::~MediaAppender");
+ mMuxer.clear();
+ mExtractor.clear();
+}
+
+status_t MediaAppender::start() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::start");
+ if (mState != INITIALIZED) {
+ ALOGE("MediaAppender::start() is called in invalid state %d", mState);
+ return INVALID_OPERATION;
+ }
+ mMuxer = new (std::nothrow) MediaMuxer(mFd, mFormat);
+ for (const auto& n : mFmtIndexMap) {
+ ssize_t muxIndex = mMuxer->addTrack(n.second);
+ if (muxIndex < 0) {
+ ALOGE("addTrack failed");
+ return UNKNOWN_ERROR;
+ }
+ mTrackIndexMap.emplace(n.first, muxIndex);
+ }
+ ALOGV("trackIndexmap size:%zu", mTrackIndexMap.size());
+
+ status_t status = mMuxer->start();
+ if (status != OK) {
+ ALOGE("muxer start failed:%d", status);
+ return status;
+ }
+
+ ALOGV("Sorting samples based on their offsets");
+ for (int i = 0; i < mSDI.size(); ++i) {
+ ALOGV("i:%d", i + 1);
+ /* TODO : Allocate a single allocation of the max size, and reuse it across ABuffers if
+ * using new ABuffer(void *, size_t).
+ */
+ sp<ABuffer> data = new (std::nothrow) ABuffer(mSDI[i].size);
+ if (data == nullptr) {
+ ALOGE("memory allocation failed");
+ return NO_MEMORY;
+ }
+ data->setRange(0, mSDI[i].size);
+ int32_t val = 0;
+ int sampleFlags = 0;
+ if (mSDI[i].meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+ }
+
+ int64_t val64;
+ if (mSDI[i].meta->findInt64(kKeySampleFileOffset, &val64)) {
+ ALOGV("SampleFileOffset Found :%zu:%lld:%lld", mSDI[i].exTrackIndex,
+ (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+ sp<AMessage> bufMeta = data->meta();
+ bufMeta->setInt64("sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+ val64);
+ }
+ if (mSDI[i].meta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+ ALOGV("kKeyLastSampleIndexInChunk Found %lld:%lld",
+ (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+ sp<AMessage> bufMeta = data->meta();
+ bufMeta->setInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ val64);
+ }
+ status = mMuxer->writeSampleData(data, mTrackIndexMap[mSDI[i].exTrackIndex], mSDI[i].time,
+ sampleFlags);
+ if (status != OK) {
+ ALOGE("muxer writeSampleData failed:%d", status);
+ return status;
+ }
+ }
+ mState = STARTED;
+ return OK;
+}
+
+status_t MediaAppender::stop() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::stop");
+ if (mState == STARTED) {
+ status_t status = mMuxer->stop();
+ if (status != OK) {
+ mState = ERROR;
+ } else {
+ mState = STOPPED;
+ }
+ return status;
+ } else {
+ ALOGE("stop() is called in invalid state %d", mState);
+ return INVALID_OPERATION;
+ }
+}
+
+ssize_t MediaAppender::getTrackCount() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::getTrackCount");
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackCount() is called in invalid state %d", mState);
+ return -1;
+ }
+ return mTrackCount;
+}
+
+sp<AMessage> MediaAppender::getTrackFormat(size_t idx) {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::getTrackFormat");
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackFormat() is called in invalid state %d", mState);
+ return nullptr;
+ }
+ if (idx < 0 || idx >= mTrackCount) {
+ ALOGE("getTrackFormat() idx is out of range");
+ return nullptr;
+ }
+ return mFmtIndexMap[idx];
+}
+
+status_t MediaAppender::writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex,
+ int64_t timeUs, uint32_t flags) {
+ std::scoped_lock lock(mMutex);
+ ALOGV("writeSampleData:trackIndex:%zu, time:%" PRId64 "", trackIndex, timeUs);
+ return mMuxer->writeSampleData(buffer, trackIndex, timeUs, flags);
+}
+
+status_t MediaAppender::setOrientationHint([[maybe_unused]] int degrees) {
+ ALOGE("setOrientationHint not supported. Has to be called prior to start on initial muxer");
+ return ERROR_UNSUPPORTED;
+};
+
+status_t MediaAppender::setLocation([[maybe_unused]] int latit, [[maybe_unused]] int longit) {
+ ALOGE("setLocation not supported. Has to be called prior to start on initial muxer");
+ return ERROR_UNSUPPORTED;
+}
+
+ssize_t MediaAppender::addTrack([[maybe_unused]] const sp<AMessage> &format) {
+ ALOGE("addTrack not supported");
+ return ERROR_UNSUPPORTED;
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 26cdec8..6b2e7be 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -102,6 +102,17 @@
static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
+static const char *kCodecColorFormat = "android.media.mediacodec.color-format";
+static const char *kCodecFrameRate = "android.media.mediacodec.frame-rate";
+static const char *kCodecCaptureRate = "android.media.mediacodec.capture-rate";
+static const char *kCodecOperatingRate = "android.media.mediacodec.operating-rate";
+static const char *kCodecPriority = "android.media.mediacodec.priority";
+static const char *kCodecRequestedVideoQPIMin = "android.media.mediacodec.video-qp-i-min";
+static const char *kCodecRequestedVideoQPIMax = "android.media.mediacodec.video-qp-i-max";
+static const char *kCodecRequestedVideoQPPMin = "android.media.mediacodec.video-qp-p-min";
+static const char *kCodecRequestedVideoQPPMax = "android.media.mediacodec.video-qp-p-max";
+static const char *kCodecRequestedVideoQPBMin = "android.media.mediacodec.video-qp-b-min";
+static const char *kCodecRequestedVideoQPBMax = "android.media.mediacodec.video-qp-b-max";
// NB: These are not yet exposed as public Java API constants.
static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
@@ -109,6 +120,7 @@
static const char *kCodecLevel = "android.media.mediacodec.level"; /* 0..n */
static const char *kCodecBitrateMode = "android.media.mediacodec.bitrate_mode"; /* CQ/VBR/CBR */
static const char *kCodecBitrate = "android.media.mediacodec.bitrate"; /* 0..n */
+static const char *kCodecOriginalBitrate = "android.media.mediacodec.original.bitrate"; /* 0..n */
static const char *kCodecMaxWidth = "android.media.mediacodec.maxwidth"; /* 0..n */
static const char *kCodecMaxHeight = "android.media.mediacodec.maxheight"; /* 0..n */
static const char *kCodecError = "android.media.mediacodec.errcode";
@@ -130,6 +142,8 @@
static const char *kCodecSampleRate = "android.media.mediacodec.sampleRate";
static const char *kCodecVideoEncodedBytes = "android.media.mediacodec.vencode.bytes";
static const char *kCodecVideoEncodedFrames = "android.media.mediacodec.vencode.frames";
+static const char *kCodecVideoInputBytes = "android.media.mediacodec.video.input.bytes";
+static const char *kCodecVideoInputFrames = "android.media.mediacodec.video.input.frames";
static const char *kCodecVideoEncodedDurationUs = "android.media.mediacodec.vencode.durationUs";
// the kCodecRecent* fields appear only in getMetrics() results
@@ -139,6 +153,8 @@
static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist"; /* in us */
+static const char *kCodecShapingEnhanced = "android.media.mediacodec.shaped"; /* 0/1 */
+
// XXX suppress until we get our representation right
static bool kEmitHistogram = false;
@@ -838,6 +854,8 @@
}
mediametrics_setInt64(mMetricsHandle, kCodecVideoEncodedDurationUs, duration);
mediametrics_setInt64(mMetricsHandle, kCodecVideoEncodedFrames, mFramesEncoded);
+ mediametrics_setInt64(mMetricsHandle, kCodecVideoInputFrames, mFramesInput);
+ mediametrics_setInt64(mMetricsHandle, kCodecVideoInputBytes, mBytesInput);
}
{
@@ -1050,7 +1068,7 @@
}
// when we send a buffer to the codec;
-void MediaCodec::statsBufferSent(int64_t presentationUs) {
+void MediaCodec::statsBufferSent(int64_t presentationUs, const sp<MediaCodecBuffer> &buffer) {
// only enqueue if we have a legitimate time
if (presentationUs <= 0) {
@@ -1064,6 +1082,11 @@
});
}
+ if (mIsVideo && (mFlags & kFlagIsEncoder)) {
+ mBytesInput += buffer->size();
+ mFramesInput++;
+ }
+
const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
BufferFlightTiming_t startdata = { presentationUs, nowNs };
@@ -1394,6 +1417,7 @@
* MediaFormat Shaping forward declarations
* including the property name we use for control.
*/
+static int enableMediaFormatShapingDefault = 1;
static const char enableMediaFormatShapingProperty[] = "debug.stagefright.enableshaping";
static void mapFormat(AString componentName, const sp<AMessage> &format, const char *kind,
bool reverse);
@@ -1446,6 +1470,50 @@
if (format->findInt32("max-height", &maxHeight)) {
mediametrics_setInt32(mMetricsHandle, kCodecMaxHeight, maxHeight);
}
+ int32_t colorFormat = -1;
+ if (format->findInt32("color-format", &colorFormat)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecColorFormat, colorFormat);
+ }
+ float frameRate = -1.0;
+ if (format->findFloat("frame-rate", &frameRate)) {
+ mediametrics_setDouble(mMetricsHandle, kCodecFrameRate, frameRate);
+ }
+ float captureRate = -1.0;
+ if (format->findFloat("capture-rate", &captureRate)) {
+ mediametrics_setDouble(mMetricsHandle, kCodecCaptureRate, captureRate);
+ }
+ float operatingRate = -1.0;
+ if (format->findFloat("operating-rate", &operatingRate)) {
+ mediametrics_setDouble(mMetricsHandle, kCodecOperatingRate, operatingRate);
+ }
+ int32_t priority = -1;
+ if (format->findInt32("priority", &priority)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecPriority, priority);
+ }
+ int32_t qpIMin = -1;
+ if (format->findInt32("video-qp-i-min", &qpIMin)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPIMin, qpIMin);
+ }
+ int32_t qpIMax = -1;
+ if (format->findInt32("video-qp-i-max", &qpIMax)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPIMax, qpIMax);
+ }
+ int32_t qpPMin = -1;
+ if (format->findInt32("video-qp-p-min", &qpPMin)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPPMin, qpPMin);
+ }
+ int32_t qpPMax = -1;
+ if (format->findInt32("video-qp-p-max", &qpPMax)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPPMax, qpPMax);
+ }
+ int32_t qpBMin = -1;
+ if (format->findInt32("video-qp-b-min", &qpBMin)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPBMin, qpBMin);
+ }
+ int32_t qpBMax = -1;
+ if (format->findInt32("video-qp-b-max", &qpBMax)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecRequestedVideoQPBMax, qpBMax);
+ }
}
// Prevent possible integer overflow in downstream code.
@@ -1469,7 +1537,8 @@
}
if (flags & CONFIGURE_FLAG_ENCODE) {
- int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty, 0);
+ int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty,
+ enableMediaFormatShapingDefault);
if (!enableShaping) {
ALOGI("format shaping disabled, property '%s'", enableMediaFormatShapingProperty);
} else {
@@ -1556,18 +1625,7 @@
static bool connectFormatShaper() {
static std::once_flag sCheckOnce;
-#if 0
- // an early return if the property says disabled means we skip loading.
- // that saves memory.
-
- // apply framework level modifications to the mediaformat for encoding
- // XXX: default off for a while during dogfooding
- int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty, 0);
-
- if (!enableShaping) {
- return true;
- }
-#endif
+ ALOGV("connectFormatShaper...");
std::call_once(sCheckOnce, [&](){
@@ -1672,6 +1730,8 @@
//
static const char *featurePrefix = "feature-";
static const int featurePrefixLen = strlen(featurePrefix);
+ static const char *tuningPrefix = "tuning-";
+ static const int tuningPrefixLen = strlen(tuningPrefix);
static const char *mappingPrefix = "mapping-";
static const int mappingPrefixLen = strlen(mappingPrefix);
@@ -1685,6 +1745,14 @@
intValue);
}
continue;
+ } else if (!strncmp(mapSrc, tuningPrefix, tuningPrefixLen)) {
+ AString value;
+ if (details->findString(mapSrc, &value)) {
+ ALOGV("-- tuning '%s' -> '%s'", mapSrc, value.c_str());
+ (void)(sShaperOps->setTuning)(shaperHandle, &mapSrc[tuningPrefixLen],
+ value.c_str());
+ }
+ continue;
} else if (!strncmp(mapSrc, mappingPrefix, mappingPrefixLen)) {
AString target;
if (details->findString(mapSrc, &target)) {
@@ -1801,10 +1869,20 @@
AMediaFormat_getFormat(updatedNdkFormat, &updatedFormat);
sp<AMessage> deltas = updatedFormat->changesFrom(format, false /* deep */);
- ALOGD("shapeMediaFormat: deltas: %s", deltas->debugString(2).c_str());
-
- // note that this means that for anything in both, the copy in deltas wins
- format->extend(deltas);
+ size_t changeCount = deltas->countEntries();
+ ALOGD("shapeMediaFormat: deltas(%zu): %s", changeCount, deltas->debugString(2).c_str());
+ if (changeCount > 0) {
+ if (mMetricsHandle != 0) {
+ mediametrics_setInt32(mMetricsHandle, kCodecShapingEnhanced, changeCount);
+ // save some old properties before we fold in the new ones
+ int32_t bitrate;
+ if (format->findInt32(KEY_BIT_RATE, &bitrate)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecOriginalBitrate, bitrate);
+ }
+ }
+ // NB: for any field in both format and deltas, the deltas copy wins
+ format->extend(deltas);
+ }
}
AMediaFormat_delete(updatedNdkFormat);
@@ -3504,6 +3582,11 @@
mTunneled = false;
}
+ int32_t background = 0;
+ if (format->findInt32("android._background-mode", &background) && background) {
+ androidSetThreadPriority(gettid(), ANDROID_PRIORITY_BACKGROUND);
+ }
+
mCodec->initiateConfigureComponent(format);
break;
}
@@ -4645,7 +4728,7 @@
info->mOwnedByClient = false;
info->mData.clear();
- statsBufferSent(timeUs);
+ statsBufferSent(timeUs, buffer);
}
return err;
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 876d06c..0107c32 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -169,9 +169,7 @@
}
status_t MediaCodecSource::Puller::setStopTimeUs(int64_t stopTimeUs) {
- sp<AMessage> msg = new AMessage(kWhatSetStopTimeUs, this);
- msg->setInt64("stop-time-us", stopTimeUs);
- return postSynchronouslyAndReturnError(msg);
+ return mSource->setStopTimeUs(stopTimeUs);
}
status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta, const sp<AMessage> ¬ify) {
@@ -189,19 +187,11 @@
}
void MediaCodecSource::Puller::stop() {
- bool interrupt = false;
- {
- // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
- // stop.
- Mutexed<Queue>::Locked queue(mQueue);
- queue->mPulling = false;
- interrupt = queue->mReadPendingSince && (queue->mReadPendingSince < ALooper::GetNowUs() - 1000000);
- queue->flush(); // flush any unprocessed pulled buffers
- }
-
- if (interrupt) {
- interruptSource();
- }
+ // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
+ // stop.
+ Mutexed<Queue>::Locked queue(mQueue);
+ queue->mPulling = false;
+ queue->flush(); // flush any unprocessed pulled buffers
}
void MediaCodecSource::Puller::interruptSource() {
@@ -685,9 +675,9 @@
if (mStopping && reachedEOS) {
ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
if (mPuller != NULL) {
- mPuller->stopSource();
+ mPuller->interruptSource();
}
- ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
+ ALOGI("source (%s) stopped", mIsVideo ? "video" : "audio");
// posting reply to everyone that's waiting
List<sp<AReplyToken>>::iterator it;
for (it = mStopReplyIDQueue.begin();
@@ -715,6 +705,9 @@
status_t MediaCodecSource::feedEncoderInputBuffers() {
MediaBufferBase* mbuf = NULL;
while (!mAvailEncoderInputIndices.empty() && mPuller->readBuffer(&mbuf)) {
+ if (!mEncoder) {
+ return BAD_VALUE;
+ }
size_t bufferIndex = *mAvailEncoderInputIndices.begin();
mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
@@ -893,7 +886,7 @@
{
int32_t eos = 0;
if (msg->findInt32("eos", &eos) && eos) {
- ALOGV("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
+ ALOGI("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
signalEOS();
break;
}
@@ -1111,12 +1104,7 @@
if (generation != mGeneration) {
break;
}
-
- if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
- ALOGV("source (%s) stopping", mIsVideo ? "video" : "audio");
- mPuller->interruptSource();
- ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
- }
+ ALOGD("source (%s) stopping stalled", mIsVideo ? "video" : "audio");
signalEOS();
break;
}
@@ -1148,7 +1136,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
params->setInt64(PARAMETER_KEY_OFFSET_TIME, mInputBufferTimeOffsetUs);
- err = mEncoder->setParameters(params);
+ err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
}
sp<AMessage> response = new AMessage;
@@ -1168,7 +1156,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
params->setInt64("stop-time-us", stopTimeUs);
- err = mEncoder->setParameters(params);
+ err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
} else {
err = mPuller->setStopTimeUs(stopTimeUs);
}
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c91386d..a946f71 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -76,6 +76,7 @@
mFileMeta.clear();
mWriter.clear();
mTrackList.clear();
+ mFormatList.clear();
}
ssize_t MediaMuxer::addTrack(const sp<AMessage> &format) {
@@ -109,6 +110,8 @@
ALOGW("addTrack() setCaptureRate failed :%d", result);
}
}
+
+ mFormatList.add(format);
return mTrackList.add(newTrack);
}
@@ -224,9 +227,42 @@
ALOGV("BUFFER_FLAG_EOS");
}
+ sp<AMessage> bufMeta = buffer->meta();
+ int64_t val64;
+ if (bufMeta->findInt64("sample-file-offset", &val64)) {
+ sampleMetaData.setInt64(kKeySampleFileOffset, val64);
+ }
+ if (bufMeta->findInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ &val64)) {
+ sampleMetaData.setInt64(kKeyLastSampleIndexInChunk, val64);
+ }
+
sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
// This pushBuffer will wait until the mediaBuffer is consumed.
return currentTrack->pushBuffer(mediaBuffer);
}
+ssize_t MediaMuxer::getTrackCount() {
+ Mutex::Autolock autoLock(mMuxerLock);
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackCount() must be called either in INITIALIZED or STARTED state");
+ return -1;
+ }
+ return mTrackList.size();
+}
+
+sp<AMessage> MediaMuxer::getTrackFormat([[maybe_unused]] size_t idx) {
+ Mutex::Autolock autoLock(mMuxerLock);
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackFormat() must be called either in INITIALIZED or STARTED state");
+ return nullptr;
+ }
+ if (idx < 0 || idx >= mFormatList.size()) {
+ ALOGE("getTrackFormat() idx is out of range");
+ return nullptr;
+ }
+ return mFormatList[idx];
+}
+
} // namespace android
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 24ba38a..2447f5e 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -133,6 +133,14 @@
if (format->mFormat->findInt64("target-time", &val64)) {
meta.setInt64(kKeyTargetTime, val64);
}
+ if (format->mFormat->findInt64("sample-file-offset", &val64)) {
+ meta.setInt64(kKeySampleFileOffset, val64);
+ }
+ if (format->mFormat->findInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ &val64)) {
+ meta.setInt64(kKeyLastSampleIndexInChunk, val64);
+ }
int32_t val32;
if (format->mFormat->findInt32("is-sync-frame", &val32)) {
meta.setInt32(kKeyIsSyncFrame, val32);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index f2c7dd6..f0383b5 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -189,6 +189,11 @@
return err;
}
+const char* NuMediaExtractor::getName() const {
+ Mutex::Autolock autoLock(mLock);
+ return mImpl == nullptr ? nullptr : mImpl->name().string();
+}
+
static String8 arrayToString(const std::vector<uint8_t> &array) {
String8 result;
for (size_t i = 0; i < array.size(); i++) {
diff --git a/media/libstagefright/OWNERS b/media/libstagefright/OWNERS
index 819389d..0cc2294 100644
--- a/media/libstagefright/OWNERS
+++ b/media/libstagefright/OWNERS
@@ -4,4 +4,8 @@
lajos@google.com
marcone@google.com
taklee@google.com
-wonsik@google.com
\ No newline at end of file
+wonsik@google.com
+
+# LON
+olly@google.com
+andrewlewis@google.com
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index dff7b22..7ce2968 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -34,6 +34,9 @@
"presubmit": [
{
"name": "mediacodecTest"
+ },
+ {
+ "name": "CtsMediaTranscodingTestCases"
}
],
"postsubmit": [
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 51d9730..4c18f87 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -47,6 +47,16 @@
#include <media/AudioParameter.h>
#include <system/audio.h>
+// TODO : Remove the defines once mainline media is built against NDK >= 31.
+// The mp4 extractor is part of mainline and builds against NDK 29 as of
+// writing. These keys are available only from NDK 31:
+#define AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION \
+ "mpegh-profile-level-indication"
+#define AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT \
+ "mpegh-reference-channel-layout"
+#define AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS \
+ "mpegh-compatible-sets"
+
namespace android {
static status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
@@ -725,16 +735,19 @@
}
};
-static std::vector<std::pair<const char *, uint32_t>> int64Mappings {
+static std::vector<std::pair<const char*, uint32_t>> int64Mappings {
{
- { "exif-offset", kKeyExifOffset },
- { "exif-size", kKeyExifSize },
- { "xmp-offset", kKeyXmpOffset },
- { "xmp-size", kKeyXmpSize },
- { "target-time", kKeyTargetTime },
- { "thumbnail-time", kKeyThumbnailTime },
- { "timeUs", kKeyTime },
- { "durationUs", kKeyDuration },
+ { "exif-offset", kKeyExifOffset},
+ { "exif-size", kKeyExifSize},
+ { "xmp-offset", kKeyXmpOffset},
+ { "xmp-size", kKeyXmpSize},
+ { "target-time", kKeyTargetTime},
+ { "thumbnail-time", kKeyThumbnailTime},
+ { "timeUs", kKeyTime},
+ { "durationUs", kKeyDuration},
+ { "sample-file-offset", kKeySampleFileOffset},
+ { "last-sample-index-in-chunk", kKeyLastSampleIndexInChunk},
+ { "sample-time-before-append", kKeySampleTimeBeforeAppend},
}
};
@@ -1082,6 +1095,25 @@
msg->setInt32("is-adts", isADTS);
}
+ int32_t mpeghProfileLevelIndication;
+ if (meta->findInt32(kKeyMpeghProfileLevelIndication, &mpeghProfileLevelIndication)) {
+ msg->setInt32(AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION,
+ mpeghProfileLevelIndication);
+ }
+ int32_t mpeghReferenceChannelLayout;
+ if (meta->findInt32(kKeyMpeghReferenceChannelLayout, &mpeghReferenceChannelLayout)) {
+ msg->setInt32(AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT,
+ mpeghReferenceChannelLayout);
+ }
+ if (meta->findData(kKeyMpeghCompatibleSets, &type, &data, &size)) {
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+ msg->setBuffer(AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS, buffer);
+ memcpy(buffer->data(), data, size);
+ }
+
int32_t aacProfile = -1;
if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
msg->setInt32("aac-profile", aacProfile);
@@ -1675,7 +1707,7 @@
if (msg->findString("mime", &mime)) {
meta->setCString(kKeyMIMEType, mime.c_str());
} else {
- ALOGI("did not find mime type");
+ ALOGV("did not find mime type");
return BAD_VALUE;
}
@@ -1704,6 +1736,12 @@
meta->setInt32(kKeyIsSyncFrame, 1);
}
+ // Mode for media transcoding.
+ int32_t isBackgroundMode;
+ if (msg->findInt32("android._background-mode", &isBackgroundMode) && isBackgroundMode != 0) {
+ meta->setInt32(isBackgroundMode, 1);
+ }
+
int32_t avgBitrate = 0;
int32_t maxBitrate;
if (msg->findInt32("bitrate", &avgBitrate) && avgBitrate > 0) {
@@ -1725,7 +1763,7 @@
meta->setInt32(kKeyWidth, width);
meta->setInt32(kKeyHeight, height);
} else {
- ALOGI("did not find width and/or height");
+ ALOGV("did not find width and/or height");
return BAD_VALUE;
}
@@ -1814,7 +1852,7 @@
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels) ||
!msg->findInt32("sample-rate", &sampleRate)) {
- ALOGI("did not find channel-count and/or sample-rate");
+ ALOGV("did not find channel-count and/or sample-rate");
return BAD_VALUE;
}
meta->setInt32(kKeyChannelCount, numChannels);
@@ -1841,6 +1879,23 @@
meta->setInt32(kKeyIsADTS, isADTS);
}
+ int32_t mpeghProfileLevelIndication = -1;
+ if (msg->findInt32(AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION,
+ &mpeghProfileLevelIndication)) {
+ meta->setInt32(kKeyMpeghProfileLevelIndication, mpeghProfileLevelIndication);
+ }
+ int32_t mpeghReferenceChannelLayout = -1;
+ if (msg->findInt32(AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT,
+ &mpeghReferenceChannelLayout)) {
+ meta->setInt32(kKeyMpeghReferenceChannelLayout, mpeghReferenceChannelLayout);
+ }
+ sp<ABuffer> mpeghCompatibleSets;
+ if (msg->findBuffer(AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS,
+ &mpeghCompatibleSets)) {
+ meta->setData(kKeyMpeghCompatibleSets, kTypeHCOS,
+ mpeghCompatibleSets->data(), mpeghCompatibleSets->size());
+ }
+
int32_t aacProfile = -1;
if (msg->findInt32("aac-profile", &aacProfile)) {
meta->setInt32(kKeyAACAOT, aacProfile);
@@ -2169,7 +2224,7 @@
}
info->duration_us = duration;
- int32_t brate = -1;
+ int32_t brate = 0;
if (!meta->findInt32(kKeyBitRate, &brate)) {
ALOGV("track of type '%s' does not publish bitrate", mime);
}
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index c216bc5..ada5d81 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -58,6 +58,8 @@
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC = "audio/eac3-joc";
const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 784e802..30d0ae6 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -146,6 +146,10 @@
int WriteOpusHeader(const OpusHeader &header, int input_sample_rate,
uint8_t* output, size_t output_size) {
// See https://wiki.xiph.org/OggOpus#ID_Header.
+ if (header.channels < 1 || header.channels > kMaxChannels) {
+ ALOGE("Invalid channel count: %d", header.channels);
+ return -1;
+ }
const size_t total_size = kOpusHeaderStreamMapOffset + header.channels;
if (output_size < total_size) {
ALOGE("Output buffer too small for header.");
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index e96243e..f5cecef 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -60,6 +60,8 @@
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3_JOC;
extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
@@ -90,6 +92,8 @@
kAudioEncodingPcm16bit = 2,
kAudioEncodingPcm8bit = 3,
kAudioEncodingPcmFloat = 4,
+ kAudioEncodingPcm24bitPacked = 21,
+ kAudioEncodingPcm32bit = 22,
};
} // namespace android
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index e97f6eb..b7c9062 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -234,9 +234,11 @@
}
// first handle global unsynchronization
+ bool hasGlobalUnsync = false;
if (header.flags & 0x80) {
ALOGV("removing unsynchronization");
+ hasGlobalUnsync = true;
removeUnsynchronization();
}
@@ -341,12 +343,12 @@
memcpy(copy, mData, size);
- bool success = removeUnsynchronizationV2_4(false /* iTunesHack */);
+ bool success = removeUnsynchronizationV2_4(false /* iTunesHack */, hasGlobalUnsync);
if (!success) {
memcpy(mData, copy, size);
mSize = size;
- success = removeUnsynchronizationV2_4(true /* iTunesHack */);
+ success = removeUnsynchronizationV2_4(true /* iTunesHack */, hasGlobalUnsync);
if (success) {
ALOGV("Had to apply the iTunes hack to parse this ID3 tag");
@@ -407,7 +409,7 @@
}
}
-bool ID3::removeUnsynchronizationV2_4(bool iTunesHack) {
+bool ID3::removeUnsynchronizationV2_4(bool iTunesHack, bool hasGlobalUnsync) {
size_t oldSize = mSize;
size_t offset = mFirstFrameOffset;
@@ -443,7 +445,7 @@
flags &= ~1;
}
- if ((flags & 2) && (dataSize >= 2)) {
+ if (!hasGlobalUnsync && (flags & 2) && (dataSize >= 2)) {
// This frame has "unsynchronization", so we have to replace occurrences
// of 0xff 0x00 with just 0xff in order to get the real data.
diff --git a/media/libstagefright/id3/test/AndroidTest.xml b/media/libstagefright/id3/test/AndroidTest.xml
index d6ea470..50f9253 100644
--- a/media/libstagefright/id3/test/AndroidTest.xml
+++ b/media/libstagefright/id3/test/AndroidTest.xml
@@ -19,7 +19,7 @@
<option name="cleanup" value="true" />
<option name="push" value="ID3Test->/data/local/tmp/ID3Test" />
<option name="push-file"
- key="https://storage.googleapis.com/android_media/frameworks/av/media/libstagefright/id3/test/ID3Test-1.1.zip?unzip=true"
+ key="https://storage.googleapis.com/android_media/frameworks/av/media/libstagefright/id3/test/ID3Test-1.2.zip?unzip=true"
value="/data/local/tmp/ID3TestRes/" />
</target_preparer>
diff --git a/media/libstagefright/id3/test/ID3Test.cpp b/media/libstagefright/id3/test/ID3Test.cpp
index 8db83cb..1ceeb6a 100644
--- a/media/libstagefright/id3/test/ID3Test.cpp
+++ b/media/libstagefright/id3/test/ID3Test.cpp
@@ -135,6 +135,7 @@
} else {
ASSERT_EQ(data, nullptr) << "Found album art when expected none!";
}
+
#if (LOG_NDEBUG == 0)
hexdump(data, dataSize > 128 ? 128 : dataSize);
#endif
@@ -186,7 +187,8 @@
"bbb_1sec_v23_3tags.mp3",
"bbb_1sec_v1_5tags.mp3",
"bbb_2sec_v24_unsynchronizedOneFrame.mp3",
- "bbb_2sec_v24_unsynchronizedAllFrames.mp3"));
+ "bbb_2sec_v24_unsynchronizedAllFrames.mp3",
+ "idv24_unsynchronized.mp3"));
INSTANTIATE_TEST_SUITE_P(
id3TestAll, ID3versionTest,
@@ -201,7 +203,8 @@
make_pair("bbb_1sec_v1_5tags.mp3", ID3::ID3_V1_1),
make_pair("bbb_1sec_v1_3tags.mp3", ID3::ID3_V1_1),
make_pair("bbb_2sec_v24_unsynchronizedOneFrame.mp3", ID3::ID3_V2_4),
- make_pair("bbb_2sec_v24_unsynchronizedAllFrames.mp3", ID3::ID3_V2_4)));
+ make_pair("bbb_2sec_v24_unsynchronizedAllFrames.mp3", ID3::ID3_V2_4),
+ make_pair("idv24_unsynchronized.mp3", ID3::ID3_V2_4)));
INSTANTIATE_TEST_SUITE_P(
id3TestAll, ID3textTagTest,
@@ -227,7 +230,9 @@
make_pair("bbb_2sec_1_image.mp3", true),
make_pair("bbb_2sec_2_image.mp3", true),
make_pair("bbb_2sec_largeSize.mp3", true),
- make_pair("bbb_1sec_v1_5tags.mp3", false)));
+ make_pair("bbb_1sec_v1_5tags.mp3", false),
+ make_pair("idv24_unsynchronized.mp3", true)
+ ));
INSTANTIATE_TEST_SUITE_P(id3TestAll, ID3multiAlbumArtTest,
::testing::Values(make_pair("bbb_1sec_v23.mp3", 0),
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 0be5896..bd0d27c 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -91,7 +91,7 @@
bool parseV1(DataSourceBase *source);
bool parseV2(DataSourceBase *source, off64_t offset);
void removeUnsynchronization();
- bool removeUnsynchronizationV2_4(bool iTunesHack);
+ bool removeUnsynchronizationV2_4(bool iTunesHack, bool hasGlobalUnsync);
static bool ParseSyncsafeInteger(const uint8_t encoded[4], size_t *x);
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 2582ed0..7c3eca6 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -97,6 +97,7 @@
sp<MetaData> mStartMeta;
status_t mInitCheck;
bool mIsRealTimeRecording;
+ bool mIsBackgroundMode;
bool mUse4ByteNalLength;
bool mIsFileSizeLimitExplicitlyRequested;
bool mPaused;
@@ -106,6 +107,7 @@
off64_t mOffset;
off64_t mPreAllocateFileEndOffset; //End of file offset during preallocation.
off64_t mMdatOffset;
+ off64_t mMaxOffsetAppend; // File offset written upto while appending.
off64_t mMdatEndOffset; // End offset of mdat atom.
uint8_t *mInMemoryCache;
off64_t mInMemoryCacheOffset;
@@ -274,6 +276,10 @@
// By default, real time recording is on.
bool isRealTimeRecording() const;
+ // Return whether the writer is used in background mode for media
+ // transcoding.
+ bool isBackgroundMode() const;
+
void lock();
void unlock();
diff --git a/media/libstagefright/include/media/stagefright/MediaAppender.h b/media/libstagefright/include/media/stagefright/MediaAppender.h
new file mode 100644
index 0000000..c2f6f10
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaAppender.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_APPENDER_H
+#define ANDROID_MEDIA_APPENDER_H
+
+#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <stack>
+
+namespace android {
+
+struct MediaAppender : public MediaMuxerBase {
+public:
+ enum AppendMode {
+ APPEND_MODE_FIRST = 0,
+ APPEND_MODE_IGNORE_LAST_VIDEO_GOP = APPEND_MODE_FIRST,
+ APPEND_MODE_ADD_TO_EXISTING_DATA = 1,
+ APPEND_MODE_LAST = APPEND_MODE_ADD_TO_EXISTING_DATA,
+ };
+
+ static sp<MediaAppender> create(int fd, AppendMode mode);
+
+ virtual ~MediaAppender();
+
+ status_t init();
+
+ status_t start();
+
+ status_t stop();
+
+ status_t writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex, int64_t timeUs,
+ uint32_t flags);
+
+ status_t setOrientationHint(int degrees);
+
+ status_t setLocation(int latitude, int longitude);
+
+ ssize_t addTrack(const sp<AMessage> &format);
+
+ ssize_t getTrackCount();
+
+ sp<AMessage> getTrackFormat(size_t idx);
+
+private:
+ MediaAppender(int fd, AppendMode mode);
+
+ int mFd;
+ MediaMuxer::OutputFormat mFormat;
+ AppendMode mMode;
+ sp<NuMediaExtractor> mExtractor;
+ sp<MediaMuxer> mMuxer;
+ size_t mTrackCount;
+ // Map track index given by extractor to the ones received from muxer.
+ std::map<size_t, ssize_t> mTrackIndexMap;
+ // Count of the samples in each track, indexed by extractor track ids.
+ std::vector<size_t> mSampleCountVect;
+ // Extractor track index of samples.
+ std::vector<size_t> mSampleIndexVect;
+ // Track format indexed by extractor track ids.
+ std::map<size_t, sp<AMessage>> mFmtIndexMap;
+ // Size of samples.
+ std::vector<size_t> mSampleSizeVect;
+ // Presentation time stamp of samples.
+ std::vector<int64_t> mSampleTimeVect;
+ // Timestamp of last sample of tracks.
+ std::vector<int64_t> mMaxTimestampVect;
+ // Metadata of samples.
+ std::vector<sp<MetaData>> mSampleMetaVect;
+ std::mutex mMutex;
+ // Timestamp of the last sync sample of tracks.
+ std::vector<int64_t> mLastSyncSampleTimeVect;
+
+ struct sampleDataInfo;
+ std::vector<sampleDataInfo> mSDI;
+
+ enum : int {
+ UNINITIALIZED,
+ INITIALIZED,
+ STARTED,
+ STOPPED,
+ ERROR,
+ } mState GUARDED_BY(mMutex);
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_APPENDER_H
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 3f93e6d..0584054 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -572,8 +572,9 @@
int64_t mBytesEncoded = 0;
int64_t mEarliestEncodedPtsUs = INT64_MAX;
int64_t mLatestEncodedPtsUs = INT64_MIN;
- int32_t mFramesEncoded = 0;
-
+ int64_t mFramesEncoded = 0;
+ int64_t mBytesInput = 0;
+ int64_t mFramesInput = 0;
int64_t mNumLowLatencyEnables; // how many times low latency mode is enabled
int64_t mNumLowLatencyDisables; // how many times low latency mode is disabled
@@ -590,7 +591,7 @@
sp<BatteryChecker> mBatteryChecker;
- void statsBufferSent(int64_t presentationUs);
+ void statsBufferSent(int64_t presentationUs, const sp<MediaCodecBuffer> &buffer);
void statsBufferReceived(int64_t presentationUs, const sp<MediaCodecBuffer> &buffer);
enum {
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 1a5609a..6371769 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -684,6 +684,7 @@
constexpr char FEATURE_AdaptivePlayback[] = "adaptive-playback";
constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
constexpr char FEATURE_PartialFrame[] = "partial-frame";
+constexpr char FEATURE_QpBounds[] = "qp-bounds";
constexpr char FEATURE_SecurePlayback[] = "secure-playback";
constexpr char FEATURE_TunneledPlayback[] = "tunneled-playback";
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index a1b9465..e97a65e 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -22,7 +22,12 @@
#include <utils/Vector.h>
#include <utils/threads.h>
+#include <map>
+#include <mutex>
+#include <vector>
+
#include "media/stagefright/foundation/ABase.h"
+#include "MediaMuxerBase.h"
namespace android {
@@ -33,6 +38,7 @@
struct MediaSource;
class MetaData;
struct MediaWriter;
+struct NuMediaExtractor;
// MediaMuxer is used to mux multiple tracks into a video. Currently, we only
// support a mp4 file as the output.
@@ -40,19 +46,8 @@
// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
// If muxing operation need to be cancelled, the app is responsible for
// deleting the output file after stop.
-struct MediaMuxer : public RefBase {
+struct MediaMuxer : public MediaMuxerBase {
public:
- // Please update media/java/android/media/MediaMuxer.java if the
- // OutputFormat is updated.
- enum OutputFormat {
- OUTPUT_FORMAT_MPEG_4 = 0,
- OUTPUT_FORMAT_WEBM = 1,
- OUTPUT_FORMAT_THREE_GPP = 2,
- OUTPUT_FORMAT_HEIF = 3,
- OUTPUT_FORMAT_OGG = 4,
- OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
- };
-
// Construct the muxer with the file descriptor. Note that the MediaMuxer
// will close this file at stop().
MediaMuxer(int fd, OutputFormat format);
@@ -117,10 +112,25 @@
status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
int64_t timeUs, uint32_t flags) ;
+ /**
+ * Gets the number of tracks added successfully. Should be called in
+ * INITIALIZED(after constructor) or STARTED(after start()) state.
+ * @return the number of tracks or -1 in wrong state.
+ */
+ ssize_t getTrackCount();
+
+ /**
+ * Gets the format of the track by their index.
+ * @param idx : index of the track whose format is wanted.
+ * @return smart pointer to AMessage containing the format details.
+ */
+ sp<AMessage> getTrackFormat(size_t idx);
+
private:
const OutputFormat mFormat;
sp<MediaWriter> mWriter;
Vector< sp<MediaAdapter> > mTrackList; // Each track has its MediaAdapter.
+ Vector< sp<AMessage> > mFormatList; // Format of each track.
sp<MetaData> mFileMeta; // Metadata for the whole file.
Mutex mMuxerLock;
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxerBase.h b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
new file mode 100644
index 0000000..f02d510
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_MUXER_BASE_H_
+#define MEDIA_MUXER_BASE_H_
+
+#include <utils/RefBase.h>
+#include "media/stagefright/foundation/ABase.h"
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+
+// MediaMuxer is used to mux multiple tracks into a video. Currently, we only
+// support a mp4 file as the output.
+// The expected calling order of the functions is:
+// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
+// If muxing operation need to be cancelled, the app is responsible for
+// deleting the output file after stop.
+struct MediaMuxerBase : public RefBase {
+public:
+ // Please update media/java/android/media/MediaMuxer.java if the
+ // OutputFormat is updated.
+ enum OutputFormat {
+ OUTPUT_FORMAT_MPEG_4 = 0,
+ OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_THREE_GPP = 2,
+ OUTPUT_FORMAT_HEIF = 3,
+ OUTPUT_FORMAT_OGG = 4,
+ OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
+ };
+
+ // Construct the muxer with the file descriptor. Note that the MediaMuxer
+ // will close this file at stop().
+ MediaMuxerBase() {};
+
+ virtual ~MediaMuxerBase() {};
+
+ /**
+ * Add a track with its format information. This should be
+ * called before start().
+ * @param format the track's format.
+ * @return the track's index or negative number if error.
+ */
+ virtual ssize_t addTrack(const sp<AMessage> &format) = 0;
+
+ /**
+ * Start muxing. Make sure all the tracks have been added before
+ * calling this.
+ */
+ virtual status_t start() = 0;
+
+ /**
+ * Set the orientation hint.
+ * @param degrees The rotation degrees. It has to be either 0,
+ * 90, 180 or 270.
+ * @return OK if no error.
+ */
+ virtual status_t setOrientationHint(int degrees) = 0;
+
+ /**
+ * Set the location.
+ * @param latitude The latitude in degree x 1000. Its value must be in the range
+ * [-900000, 900000].
+ * @param longitude The longitude in degree x 1000. Its value must be in the range
+ * [-1800000, 1800000].
+ * @return OK if no error.
+ */
+ virtual status_t setLocation(int latitude, int longitude) = 0;
+
+ /**
+ * Stop muxing.
+ * This method is a blocking call. Depending on how
+ * much data is bufferred internally, the time needed for stopping
+ * the muxer may be time consuming. UI thread is
+ * not recommended for launching this call.
+ * @return OK if no error.
+ */
+ virtual status_t stop() = 0;
+
+ /**
+ * Send a sample buffer for muxing.
+ * The buffer can be reused once this method returns. Typically,
+ * this function won't be blocked for very long, and thus there
+ * is no need to use a separate thread calling this method to
+ * push a buffer.
+ * @param buffer the incoming sample buffer.
+ * @param trackIndex the buffer's track index number.
+ * @param timeUs the buffer's time stamp.
+ * @param flags the only supported flag for now is
+ * MediaCodec::BUFFER_FLAG_SYNCFRAME.
+ * @return OK if no error.
+ */
+ virtual status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
+ int64_t timeUs, uint32_t flags) = 0 ;
+
+ /**
+ * Gets the number of tracks added successfully. Should be called in
+ * INITIALIZED(after constructor) or STARTED(after start()) state.
+ * @return the number of tracks or -1 in wrong state.
+ */
+ virtual ssize_t getTrackCount() = 0;
+
+ /**
+ * Gets the format of the track by their index.
+ * @param idx : index of the track whose format is wanted.
+ * @return smart pointer to AMessage containing the format details.
+ */
+ virtual sp<AMessage> getTrackFormat(size_t idx) = 0;
+
+private:
+
+ DISALLOW_EVIL_CONSTRUCTORS(MediaMuxerBase);
+};
+
+} // namespace android
+
+#endif // MEDIA_MUXER_BASE_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 940bd86..c80012e 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -127,6 +127,8 @@
kKeyTrackTimeStatus = 'tktm', // int64_t
kKeyRealTimeRecording = 'rtrc', // bool (int32_t)
+ kKeyBackgroundMode = 'bkmd', // bool (int32_t)
+
kKeyNumBuffers = 'nbbf', // int32_t
// Ogg files can be tagged to be automatically looping...
@@ -153,6 +155,10 @@
kKeyIsADTS = 'adts', // bool (int32_t)
kKeyAACAOT = 'aaot', // int32_t
+ kKeyMpeghProfileLevelIndication = 'hpli', // int32_t
+ kKeyMpeghReferenceChannelLayout = 'hrcl', // int32_t
+ kKeyMpeghCompatibleSets = 'hcos', // raw data
+
// If a MediaBuffer's data represents (at least partially) encrypted
// data, the following fields aid in decryption.
// The data can be thought of as pairs of plain and encrypted data
@@ -264,6 +270,11 @@
// Slow-motion markers
kKeySlowMotionMarkers = 'slmo', // raw data, byte array following spec for
// MediaFormat#KEY_SLOW_MOTION_MARKERS
+
+ kKeySampleFileOffset = 'sfof', // int64_t, sample's offset in a media file.
+ kKeyLastSampleIndexInChunk = 'lsic', //int64_t, index of last sample in a chunk.
+ kKeySampleTimeBeforeAppend = 'lsba', // int64_t, timestamp of last sample of a track.
+
};
enum {
@@ -273,6 +284,7 @@
kTypeAV1C = 'av1c',
kTypeDVCC = 'dvcc',
kTypeD263 = 'd263',
+ kTypeHCOS = 'hcos',
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index d8f2b00..6aa7c0f 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -100,6 +100,10 @@
status_t getAudioPresentations(size_t trackIdx, AudioPresentationCollection *presentations);
+ status_t setPlaybackId(const String8& playbackId);
+
+ const char* getName() const;
+
protected:
virtual ~NuMediaExtractor();
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 5a9760d..67c6102 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -146,7 +146,10 @@
};
static std::vector<std::string> names = {
prefixes[0] + variants[0] + ".xml",
- prefixes[1] + variants[1] + ".xml"
+ prefixes[1] + variants[1] + ".xml",
+
+ // shaping information is not currently variant specific.
+ "media_codecs_shaping.xml"
};
return names;
}
@@ -347,6 +350,7 @@
status_t addFeature(const char **attrs);
status_t addLimit(const char **attrs);
status_t addMapping(const char **attrs);
+ status_t addTuning(const char **attrs);
status_t addQuirk(const char **attrs, const char *prefix = nullptr);
status_t addSetting(const char **attrs, const char *prefix = nullptr);
status_t enterMediaCodec(const char **attrs, bool encoder);
@@ -429,7 +433,7 @@
if (findFileInDirs(searchDirs, fileName, &path)) {
err = parseXmlPath(path);
} else {
- ALOGD("Cannot find %s in search path", fileName.c_str());
+ ALOGI("Did not find %s in search path", fileName.c_str());
}
res = combineStatus(res, err);
}
@@ -439,7 +443,7 @@
status_t MediaCodecsXmlParser::Impl::parseXmlPath(const std::string &path) {
std::lock_guard<std::mutex> guard(mLock);
if (!fileExists(path)) {
- ALOGD("Cannot find %s", path.c_str());
+ ALOGV("Cannot find %s", path.c_str());
mParsingStatus = combineStatus(mParsingStatus, NAME_NOT_FOUND);
return NAME_NOT_FOUND;
}
@@ -743,7 +747,8 @@
// ignore limits and features specified outside of type
if (!mState->inType()
&& (strEq(name, "Limit") || strEq(name, "Feature")
- || strEq(name, "Variant") || strEq(name, "Mapping"))) {
+ || strEq(name, "Variant") || strEq(name, "Mapping")
+ || strEq(name, "Tuning"))) {
PLOGD("ignoring %s specified outside of a Type", name);
return;
} else if (strEq(name, "Limit")) {
@@ -752,6 +757,8 @@
err = addFeature(attrs);
} else if (strEq(name, "Mapping")) {
err = addMapping(attrs);
+ } else if (strEq(name, "Tuning")) {
+ err = addTuning(attrs);
} else if (strEq(name, "Variant") && section != SECTION_VARIANT) {
err = limitVariants(attrs);
mState->enterSection(err == OK ? SECTION_VARIANT : SECTION_UNKNOWN);
@@ -1445,6 +1452,45 @@
return OK;
}
+status_t MediaCodecsXmlParser::Impl::Parser::addTuning(const char **attrs) {
+ CHECK(mState->inType());
+ size_t i = 0;
+ const char *a_name = nullptr;
+ const char *a_value = nullptr;
+
+ while (attrs[i] != nullptr) {
+ CHECK((i & 1) == 0);
+ if (attrs[i + 1] == nullptr) {
+ PLOGD("Mapping: attribute '%s' is null", attrs[i]);
+ return BAD_VALUE;
+ }
+
+ if (strEq(attrs[i], "name")) {
+ a_name = attrs[++i];
+ } else if (strEq(attrs[i], "value")) {
+ a_value = attrs[++i];
+ } else {
+ PLOGD("Tuning: ignoring unrecognized attribute '%s'", attrs[i]);
+ ++i;
+ }
+ ++i;
+ }
+
+ // Every tuning must have both fields
+ if (a_name == nullptr) {
+ PLOGD("Tuning with no 'name' attribute");
+ return BAD_VALUE;
+ }
+
+ if (a_value == nullptr) {
+ PLOGD("Tuning with no 'value' attribute");
+ return BAD_VALUE;
+ }
+
+ mState->addDetail(std::string("tuning-") + a_name, a_value);
+ return OK;
+}
+
status_t MediaCodecsXmlParser::Impl::Parser::addAlias(const char **attrs) {
CHECK(mState->inCodec());
size_t i = 0;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index 6f55dc0..ecfd85e 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -87,6 +87,7 @@
method public String getName();
method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public String getRank();
+ method public java.util.List<media.codecs.Tuning> getTuning_optional();
method public String getType();
method public java.util.List<media.codecs.Type> getType_optional();
method public String getUpdate();
@@ -136,6 +137,14 @@
method public java.util.List<media.codecs.Setting> getVariant_optional();
}
+ public class Tuning {
+ ctor public Tuning();
+ method public String getName();
+ method public String getValue();
+ method public void setName(String);
+ method public void setValue(String);
+ }
+
public class Type {
ctor public Type();
method public java.util.List<media.codecs.Alias> getAlias();
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 30974f6..c9a7efc 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -64,6 +64,7 @@
<xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Mapping" type="Mapping" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Tuning" type="Tuning" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Variant" type="Variant" minOccurs="0" maxOccurs="unbounded"/>
</xs:choice>
<xs:attribute name="name" type="xs:string"/>
@@ -128,6 +129,10 @@
<xs:attribute name="kind" type="xs:string"/>
<xs:attribute name="value" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Tuning">
+ <xs:attribute name="name" type="xs:string"/>
+ <xs:attribute name="value" type="xs:string"/>
+ </xs:complexType>
<xs:complexType name="Include">
<xs:attribute name="href" type="xs:string"/>
</xs:complexType>
diff --git a/media/libstagefright/xmlparser/test/XMLParserTest.cpp b/media/libstagefright/xmlparser/test/XMLParserTest.cpp
index c411c8d..7629d97 100644
--- a/media/libstagefright/xmlparser/test/XMLParserTest.cpp
+++ b/media/libstagefright/xmlparser/test/XMLParserTest.cpp
@@ -138,6 +138,12 @@
pair<string, string>("mapping-fire-from", "to"),
},
{}, "");
+ setCodecProperties("test11.encoder", true, 11, {}, {}, {}, "video/av01",
+ {
+ pair<string, string>("tuning-hungry", "yes"),
+ pair<string, string>("tuning-pi", "3.1415"),
+ },
+ {}, "");
setRoleProperties("audio_decoder.mp3", false, 1, "audio/mpeg", "test1.decoder",
{pair<string, string>("attribute::disabled", "present"),
@@ -180,6 +186,11 @@
setRoleProperties("video_encoder.hevc", true, 10, "video/hevc", "test10.encoder",
{ pair<string, string>("mapping-fire-from", "to")});
+ setRoleProperties("video_encoder.av01", true, 11, "video/av01", "test11.encoder",
+ {pair<string, string>("tuning-hungry", "yes"),
+ pair<string, string>("tuning-pi", "3.1415")
+ });
+
setServiceAttribute(
{pair<string, string>("domain-telephony", "0"), pair<string, string>("domain-tv", "0"),
pair<string, string>("setting2", "0"), pair<string, string>("variant-variant1", "0")});
diff --git a/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml b/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
index c8913e5..8cae423 100644
--- a/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
+++ b/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
@@ -83,5 +83,10 @@
<MediaCodec name="test10.encoder" type="video/hevc" >
<Mapping kind="fire" name="from" value="to"/>
</MediaCodec>
+ <!-- entry for testing Tuning -->
+ <MediaCodec name="test11.encoder" type="video/av01" >
+ <Tuning name="hungry" value="yes"/>
+ <Tuning name="pi" value="3.1415"/>
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index c82e532..05373c9 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -7,9 +7,3 @@
group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
ioprio rt 4
task_profiles ProcessCapacityHigh HighPerformance
-
-# media.transcoding service is defined on com.android.media apex which goes back
-# to API29, but we only want it started on API31+ devices. So we declare it as
-# "disabled" and start it explicitly on boot.
-on boot
- start media.transcoding
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 3007574..8d527e9 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -85,7 +85,9 @@
cc_library_shared {
name: "libmediandk",
- llndk_stubs: "libmediandk.llndk",
+ llndk: {
+ symbol_file: "libmediandk.map.txt",
+ },
srcs: [
"NdkJavaVMHelper.cpp",
@@ -168,14 +170,6 @@
},
}
-llndk_library {
- name: "libmediandk.llndk",
- symbol_file: "libmediandk.map.txt",
- export_include_dirs: [
- "include",
- ],
-}
-
cc_library {
name: "libmediandk_utils",
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index b019448..05115b9 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -40,6 +40,14 @@
*/
AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
+
+ /**
+ * Device specific 10 bits depth RAW image format.
+ *
+ * <p>Unprocessed implementation-dependent raw depth measurements, opaque with 10 bit samples
+ * and device specific bit layout.</p>
+ */
+ AIMAGE_FORMAT_RAW_DEPTH10 = 0x1003,
};
// TODO: this only supports ImageReader
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index b75901a..1067e24 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -72,6 +72,7 @@
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
case AIMAGE_FORMAT_DEPTH_JPEG:
+ case AIMAGE_FORMAT_RAW_DEPTH10:
return true;
case AIMAGE_FORMAT_PRIVATE:
// For private format, cpu usage is prohibited.
@@ -102,6 +103,7 @@
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
case AIMAGE_FORMAT_DEPTH_JPEG:
+ case AIMAGE_FORMAT_RAW_DEPTH10:
return 1;
case AIMAGE_FORMAT_PRIVATE:
return 0;
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0c65e9e..07fc5de 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -419,6 +419,7 @@
EXPORT
media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt) {
+ ALOGV("AMediaExtractor_getSampleFormat");
if (fmt == NULL) {
return AMEDIA_ERROR_INVALID_PARAMETER;
}
@@ -428,6 +429,9 @@
if (err != OK) {
return translate_error(err);
}
+#ifdef LOG_NDEBUG
+ sampleMeta->dumpToLog();
+#endif
sp<AMessage> meta;
AMediaFormat_getFormat(fmt, &meta);
@@ -483,6 +487,19 @@
meta->setBuffer(AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO, audioPresentationsData);
}
+ int64_t val64;
+ if (sampleMeta->findInt64(kKeySampleFileOffset, &val64)) {
+ meta->setInt64("sample-file-offset", val64);
+ ALOGV("SampleFileOffset Found");
+ }
+ if (sampleMeta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+ meta->setInt64("last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ val64);
+ ALOGV("kKeyLastSampleIndexInChunk Found");
+ }
+
+ ALOGV("AMediaFormat_toString:%s", AMediaFormat_toString(fmt));
+
return AMEDIA_OK;
}
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 1773023..c1793ce 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -334,6 +334,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME = "is-sync-frame";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK = "last-sample-index-in-chunk";
EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
@@ -359,7 +360,9 @@
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET = "sample-file-offset";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND = "sample-time-before-append";
EXPORT const char* AMEDIAFORMAT_KEY_SAR_HEIGHT = "sar-height";
EXPORT const char* AMEDIAFORMAT_KEY_SAR_WIDTH = "sar-width";
EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index d1992bf..1965e62 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -17,28 +17,24 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "NdkMediaMuxer"
-
-#include <media/NdkMediaMuxer.h>
+#include <android_util_Binder.h>
+#include <jni.h>
+#include <media/IMediaHTTPService.h>
#include <media/NdkMediaCodec.h>
#include <media/NdkMediaErrorPriv.h>
#include <media/NdkMediaFormatPriv.h>
-
-
-#include <utils/Log.h>
-#include <utils/StrongPointer.h>
+#include <media/NdkMediaMuxer.h>
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaMuxer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaMuxer.h>
-#include <media/IMediaHTTPService.h>
-#include <android_util_Binder.h>
-
-#include <jni.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
using namespace android;
struct AMediaMuxer {
- sp<MediaMuxer> mImpl;
-
+ sp<MediaMuxerBase> mImpl;
};
extern "C" {
@@ -46,8 +42,15 @@
EXPORT
AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) {
ALOGV("ctor");
- AMediaMuxer *mData = new AMediaMuxer();
- mData->mImpl = new MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+ AMediaMuxer *mData = new (std::nothrow) AMediaMuxer();
+ if (mData == nullptr) {
+ return nullptr;
+ }
+ mData->mImpl = new (std::nothrow) MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+ if (mData->mImpl == nullptr) {
+ delete mData;
+ return nullptr;
+ }
return mData;
}
@@ -94,6 +97,34 @@
muxer->mImpl->writeSampleData(buf, trackIdx, info->presentationTimeUs, info->flags));
}
+EXPORT
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) {
+ ALOGV("append");
+ AMediaMuxer* mData = new (std::nothrow) AMediaMuxer();
+ if (mData == nullptr) {
+ return nullptr;
+ }
+ mData->mImpl = MediaAppender::create(fd, (android::MediaAppender::AppendMode)mode);
+ if (mData->mImpl == nullptr) {
+ delete mData;
+ return nullptr;
+ }
+ return mData;
+}
+
+EXPORT
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer* muxer) {
+ return muxer->mImpl->getTrackCount();
+}
+
+EXPORT
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) {
+ sp<AMessage> format = muxer->mImpl->getTrackFormat(idx);
+ if (format != nullptr) {
+ return AMediaFormat_fromMsg(&format);
+ }
+ return nullptr;
+}
} // extern "C"
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index e19dd3a..71bc6d9 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -50,7 +50,10 @@
*/
typedef struct AImage AImage;
-// Formats not listed here will not be supported by AImageReader
+/**
+ * AImage supported formats: AImageReader only guarantees the support for the formats
+ * listed here.
+ */
enum AIMAGE_FORMATS {
/**
* 32 bits RGBA format, 8 bits for each of the four channels.
@@ -813,7 +816,7 @@
* Available since API level 26.
*
* @param image the {@link AImage} of interest.
- * @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
+ * @param buffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
* handle.
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index d86f3c7..4bd7f2a 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -328,10 +328,10 @@
* still acquire images from this {@link AImageReader} and access {@link AHardwareBuffer} via
* {@link AImage_getHardwareBuffer()}. The {@link AHardwareBuffer} gained this way can then
* be passed back to hardware (such as GPU or hardware encoder if supported) for future processing.
- * For example, you can obtain an {@link EGLClientBuffer} from the {@link AHardwareBuffer} by using
- * {@link eglGetNativeClientBufferANDROID} extension and pass that {@link EGLClientBuffer} to {@link
- * eglCreateImageKHR} to create an {@link EGLImage} resource type, which may then be bound to a
- * texture via {@link glEGLImageTargetTexture2DOES} on supported devices. This can be useful for
+ * For example, you can obtain an EGLClientBuffer from the {@link AHardwareBuffer} by using
+ * eglGetNativeClientBufferANDROID extension and pass that EGLClientBuffer to
+ * eglCreateImageKHR to create an EGLImage resource type, which may then be bound to a
+ * texture via glEGLImageTargetTexture2DOES on supported devices. This can be useful for
* transporting textures that may be shared cross-process.</p>
* <p>In general, when software access to image data is not necessary, an {@link AImageReader}
* created with {@link AIMAGE_FORMAT_PRIVATE} format is more efficient, compared with {@link
@@ -339,7 +339,7 @@
*
* <p>Note that not all format and usage flag combination is supported by the {@link AImageReader},
* especially if \c format is {@link AIMAGE_FORMAT_PRIVATE}, \c usage must not include either
- * {@link AHARDWAREBUFFER_USAGE_READ_RARELY} or {@link AHARDWAREBUFFER_USAGE_READ_OFTEN}</p>
+ * {@link AHARDWAREBUFFER_USAGE_CPU_READ_RARELY} or {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}</p>
*
* @param width The default width in pixels of the Images that this reader will produce.
* @param height The default height in pixels of the Images that this reader will produce.
@@ -358,7 +358,7 @@
* <th>Compatible usage flags</th>
* </tr>
* <tr>
- * <td>non-{@link AIMAGE_FORMAT_PRIVATE PRIVATE} formats defined in {@link AImage.h}
+ * <td>non-{@link AIMAGE_FORMAT_PRIVATE} formats defined in {@link NdkImage.h}
* </td>
* <td>{@link AHARDWAREBUFFER_USAGE_CPU_READ_RARELY} or
* {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}</td>
@@ -441,6 +441,10 @@
AImageReader* reader,
AHardwareBuffer* buffer);
+/**
+ * A listener to the AHardwareBuffer removal event, use
+ * {@link AImageReader_setBufferRemovedListener} to register the listener object to AImageReader.
+ */
typedef struct AImageReader_BufferRemovedListener {
/// Optional application context passed as the first parameter of the callback.
void* context;
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index 2be1d6e..02fdc79 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -40,7 +40,11 @@
__BEGIN_DECLS
+/**
+ * Media error message types returned from NDK media functions.
+ */
typedef enum {
+ /** The requested media operation completed successfully. */
AMEDIA_OK = 0,
/**
@@ -55,14 +59,34 @@
AMEDIACODEC_ERROR_RECLAIMED = 1101,
AMEDIA_ERROR_BASE = -10000,
+
+ /** The called media function failed with an unknown error. */
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
+
+ /** The input media data is corrupt or incomplete. */
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
+
+ /** The required operation or media formats are not supported. */
AMEDIA_ERROR_UNSUPPORTED = AMEDIA_ERROR_BASE - 2,
+
+ /** An invalid (or already closed) object is used in the function call. */
AMEDIA_ERROR_INVALID_OBJECT = AMEDIA_ERROR_BASE - 3,
+
+ /** At least one of the invalid parameters is used. */
AMEDIA_ERROR_INVALID_PARAMETER = AMEDIA_ERROR_BASE - 4,
+
+ /** The media object is not in the right state for the required operation. */
AMEDIA_ERROR_INVALID_OPERATION = AMEDIA_ERROR_BASE - 5,
+
+ /** Media stream ends while processing the requested operation. */
AMEDIA_ERROR_END_OF_STREAM = AMEDIA_ERROR_BASE - 6,
+
+ /** An Error occurred when the Media object is carrying IO operation. */
AMEDIA_ERROR_IO = AMEDIA_ERROR_BASE - 7,
+
+ /** The required operation would have to be blocked (on I/O or others),
+ * but blocking is not enabled.
+ */
AMEDIA_ERROR_WOULD_BLOCK = AMEDIA_ERROR_BASE - 8,
AMEDIA_DRM_ERROR_BASE = -20000,
@@ -77,10 +101,20 @@
AMEDIA_DRM_LICENSE_EXPIRED = AMEDIA_DRM_ERROR_BASE - 9,
AMEDIA_IMGREADER_ERROR_BASE = -30000,
+
+ /** There are no more image buffers to read/write image data. */
AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE = AMEDIA_IMGREADER_ERROR_BASE - 1,
+
+ /** The AImage object has used up the allowed maximum image buffers. */
AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED = AMEDIA_IMGREADER_ERROR_BASE - 2,
+
+ /** The required image buffer could not be locked to read. */
AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 3,
+
+ /** The media data or buffer could not be unlocked. */
AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 4,
+
+ /** The media/buffer needs to be locked to perform the required operation. */
AMEDIA_IMGREADER_IMAGE_NOT_LOCKED = AMEDIA_IMGREADER_ERROR_BASE - 5,
} media_status_t;
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 476bbd9..fbd855d 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -307,6 +307,9 @@
extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_CSD_AV1C __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_KEY_XMP_OFFSET __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_KEY_XMP_SIZE __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MIN __INTRODUCED_IN(31);
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 519e249..d7eccb8 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -54,6 +54,17 @@
AMEDIAMUXER_OUTPUT_FORMAT_THREE_GPP = 2,
} OutputFormat;
+typedef enum {
+ /* Last group of pictures(GOP) of video track can be incomplete, so it would be safe to
+ * scrap that and rewrite. If both audio and video tracks are present in a file, then
+ * samples of audio track after last GOP of video would be scrapped too.
+ * If only audio track is present, then no sample would be discarded.
+ */
+ AMEDIAMUXER_APPEND_IGNORE_LAST_VIDEO_GOP = 0,
+ // Keep all existing samples as it is and append new samples after that only.
+ AMEDIAMUXER_APPEND_TO_EXISTING_DATA = 1,
+} AppendMode;
+
/**
* Create new media muxer.
*
@@ -138,6 +149,46 @@
size_t trackIdx, const uint8_t *data,
const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
+/**
+ * Creates a new media muxer for appending data to an existing MPEG4 file.
+ * This is a synchronous API call and could take a while to return if the existing file is large.
+ * Only works for MPEG4 files matching one of the following characteristics:
+ * <ul>
+ * <li>a single audio track.</li>
+ * <li>a single video track.</li>
+ * <li>a single audio and a single video track.</li>
+ * </ul>
+ * @param fd Must be opened with read and write permission. Does not take ownership of
+ * this fd i.e., caller is responsible for closing fd.
+ * @param mode Specifies how data will be appended; the AppendMode enum describes
+ * the possible methods for appending..
+ * @return Pointer to AMediaMuxer if the file(fd) has tracks already, otherwise, nullptr.
+ * {@link AMediaMuxer_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) __INTRODUCED_IN(31);
+
+/**
+ * Returns the number of tracks added in the file passed to {@link AMediaMuxer_new} or
+ * the number of existing tracks in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns -1.
+ *
+ * Available since API level 31.
+ */
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer*) __INTRODUCED_IN(31);
+
+/**
+ * Returns AMediaFormat of the added track with index idx in the file passed to
+ * {@link AMediaMuxer_new} or the AMediaFormat of the existing track with index idx
+ * in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns nullptr.
+ * {@link AMediaFormat_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) __INTRODUCED_IN(31);
+
__END_DECLS
#endif // _NDK_MEDIA_MUXER_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index eead681..7e9e57e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -109,6 +109,7 @@
AMEDIAFORMAT_KEY_IS_SYNC_FRAME; # var introduced=29
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
+ AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK; # var introduced=31
AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
@@ -134,6 +135,8 @@
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
+ AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET; # var introduced=31
+ AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND; # var introduced=31
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
AMEDIAFORMAT_KEY_SAR_HEIGHT; # var introduced=29
AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
@@ -286,7 +289,10 @@
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
+ AMediaMuxer_append; # introduced=31
AMediaMuxer_delete;
+ AMediaMuxer_getTrackCount; # introduced=31
+ AMediaMuxer_getTrackFormat; # introduced=31
AMediaMuxer_new;
AMediaMuxer_setLocation;
AMediaMuxer_setOrientationHint;
diff --git a/media/tests/SampleVideoEncoder/README.md b/media/tests/SampleVideoEncoder/README.md
index 074c939..2e275c5 100644
--- a/media/tests/SampleVideoEncoder/README.md
+++ b/media/tests/SampleVideoEncoder/README.md
@@ -2,7 +2,7 @@
This is a sample android application for encoding AVC/HEVC streams with B-Frames enabled. It uses MediaRecorder APIs to record B-frames enabled video from camera2 input and MediaCodec APIs to encode reference test vector using input surface.
-This page describes how to get started with the Encoder App.
+This page describes how to get started with the Encoder App and how to run the tests for it.
# Getting Started
@@ -33,6 +33,17 @@
After installing the app, a TextureView showing camera preview is dispalyed on one third of the screen. It also features checkboxes to select either avc/hevc and hw/sw codecs. It also has an option to select either MediaRecorder APIs or MediaCodec, along with the 'Start' button to start/stop recording.
+# Running Tests
+
+The app also contains a test, which will test the MediaCodec APIs for encoding avc/hevc streams with B-frames enabled. This does not require us to use application UI.
+
+## Running the tests using atest
+Note that atest command will install the SampleVideoEncoder app on the device.
+
+Command to run the tests:
+```
+atest SampleVideoEncoder
+```
# Ouput
@@ -40,3 +51,6 @@
```
/storage/emulated/0/Android/data/com.android.media.samplevideoencoder/files/
```
+
+The total number of I-frames, P-frames and B-frames after encoding has been done using MediaCodec APIs are displayed on the screen.
+The results of the tests can be obtained from the logcats of the test.
diff --git a/media/tests/SampleVideoEncoder/app/Android.bp b/media/tests/SampleVideoEncoder/app/Android.bp
index 3a66955..58b219b 100644
--- a/media/tests/SampleVideoEncoder/app/Android.bp
+++ b/media/tests/SampleVideoEncoder/app/Android.bp
@@ -23,7 +23,7 @@
default_applicable_licenses: ["frameworks_av_license"],
}
-android_app {
+android_test {
name: "SampleVideoEncoder",
manifest: "src/main/AndroidManifest.xml",
@@ -41,6 +41,10 @@
"androidx.annotation_annotation",
"androidx.appcompat_appcompat",
"androidx-constraintlayout_constraintlayout",
+ "junit",
+ "androidx.test.core",
+ "androidx.test.runner",
+ "hamcrest-library",
],
javacflags: [
diff --git a/media/tests/SampleVideoEncoder/app/AndroidTest.xml b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
new file mode 100644
index 0000000..91f4304
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs SampleVideoEncoder Tests">
+ <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+ <option name="cleanup-apks" value="false" />
+ <option name="test-file-name" value="SampleVideoEncoder.apk" />
+ </target_preparer>
+
+ <option name="test-tag" value="SampleVideoEncoder" />
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.media.samplevideoencoder" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="hidden-api-checks" value="false"/>
+ </test>
+</configuration>
diff --git a/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
new file mode 100644
index 0000000..1ef332e
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media.samplevideoencoder.tests;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import android.content.Context;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import com.android.media.samplevideoencoder.MediaCodecSurfaceEncoder;
+import com.android.media.samplevideoencoder.R;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertThat;
+
+@RunWith(Parameterized.class)
+public class SampleVideoEncoderTest {
+ private static final String TAG = SampleVideoEncoderTest.class.getSimpleName();
+ private final Context mContext;
+ private int mMaxBFrames;
+ private int mInputResId;
+ private String mMime;
+ private boolean mIsSoftwareEncoder;
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> inputFiles() {
+ return Arrays.asList(new Object[][]{
+ // Parameters: MimeType, isSoftwareEncoder, maxBFrames
+ {MediaFormat.MIMETYPE_VIDEO_AVC, false, 1},
+ {MediaFormat.MIMETYPE_VIDEO_AVC, true, 1},
+ {MediaFormat.MIMETYPE_VIDEO_HEVC, false, 1},
+ {MediaFormat.MIMETYPE_VIDEO_HEVC, true, 1}});
+ }
+
+ public SampleVideoEncoderTest(String mimeType, boolean isSoftwareEncoder, int maxBFrames) {
+ this.mContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
+ this.mInputResId = R.raw.crowd_1920x1080_25fps_4000kbps_h265;
+ this.mMime = mimeType;
+ this.mIsSoftwareEncoder = isSoftwareEncoder;
+ this.mMaxBFrames = maxBFrames;
+ }
+
+ private String getOutputPath() {
+ File dir = mContext.getExternalFilesDir(null);
+ if (dir == null) {
+ Log.e(TAG, "Cannot get external directory path to save output video");
+ return null;
+ }
+ String videoPath = dir.getAbsolutePath() + "/Video-" + System.currentTimeMillis() + ".mp4";
+ Log.i(TAG, "Output video is saved at: " + videoPath);
+ return videoPath;
+ }
+
+ @Test
+ public void testMediaSurfaceEncoder() throws IOException, InterruptedException {
+ String outputFilePath = getOutputPath();
+ MediaCodecSurfaceEncoder surfaceEncoder =
+ new MediaCodecSurfaceEncoder(mContext, mInputResId, mMime, mIsSoftwareEncoder,
+ outputFilePath, mMaxBFrames);
+ int encodingStatus = surfaceEncoder.startEncodingSurface();
+ assertThat(encodingStatus, is(equalTo(0)));
+ int[] frameNumArray = surfaceEncoder.getFrameTypes();
+ Log.i(TAG, "Results: I-Frames: " + frameNumArray[0] + "; P-Frames: " + frameNumArray[1] +
+ "\n " + "; B-Frames:" + frameNumArray[2]);
+ assertNotEquals("Encoder mime: " + mMime + " isSoftware: " + mIsSoftwareEncoder +
+ " failed to generate B Frames", frameNumArray[2], 0);
+ }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
index ed668bb..b17541d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
@@ -38,4 +38,8 @@
</activity>
</application>
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.media.samplevideoencoder"
+ android:label="SampleVideoEncoder Test"/>
+
</manifest>
\ No newline at end of file
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
index 33e81bb..a7a353c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
@@ -56,6 +56,7 @@
import android.util.Log;
import android.util.Size;
import android.widget.RadioGroup;
+import android.widget.TextView;
import android.widget.Toast;
import java.lang.ref.WeakReference;
@@ -80,6 +81,14 @@
private static final int VIDEO_BITRATE = 8000000 /* 8 Mbps */;
private static final int VIDEO_FRAMERATE = 30;
+ /**
+ * Constant values to frame types assigned here are internal to this app.
+ * These values does not correspond to the actual values defined in avc/hevc specifications.
+ */
+ public static final int FRAME_TYPE_I = 0;
+ public static final int FRAME_TYPE_P = 1;
+ public static final int FRAME_TYPE_B = 2;
+
private String mMime = MediaFormat.MIMETYPE_VIDEO_AVC;
private String mOutputVideoPath = null;
@@ -89,6 +98,7 @@
private boolean mIsRecording;
private AutoFitTextureView mTextureView;
+ private TextView mTextView;
private CameraDevice mCameraDevice;
private CameraCaptureSession mPreviewSession;
private CaptureRequest.Builder mPreviewBuilder;
@@ -101,6 +111,8 @@
private Button mStartButton;
+ private int[] mFrameTypeOccurrences;
+
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
@@ -129,6 +141,8 @@
final CheckBox checkBox_mr = findViewById(R.id.checkBox_media_recorder);
final CheckBox checkBox_mc = findViewById(R.id.checkBox_media_codec);
mTextureView = findViewById(R.id.texture);
+ mTextView = findViewById(R.id.textViewResults);
+
checkBox_mr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
@@ -162,6 +176,7 @@
@Override
public void onClick(View v) {
if (v.getId() == R.id.start_button) {
+ mTextView.setText(null);
if (mIsMediaRecorder) {
if (mIsRecording) {
stopRecordingVideo();
@@ -198,6 +213,7 @@
mainActivity.mOutputVideoPath);
try {
encodingStatus = codecSurfaceEncoder.startEncodingSurface();
+ mainActivity.mFrameTypeOccurrences = codecSurfaceEncoder.getFrameTypes();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
@@ -211,6 +227,13 @@
if (encodingStatus == 0) {
Toast.makeText(mainActivity.getApplicationContext(), "Encoding Completed",
Toast.LENGTH_SHORT).show();
+ mainActivity.mTextView.append("\n Encoded stream contains: ");
+ mainActivity.mTextView.append("\n Number of I-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_I]);
+ mainActivity.mTextView.append("\n Number of P-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_P]);
+ mainActivity.mTextView.append("\n Number of B-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_B]);
} else {
Toast.makeText(mainActivity.getApplicationContext(),
"Error occurred while " + "encoding", Toast.LENGTH_SHORT).show();
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
index 146a475..011c38c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
@@ -31,10 +31,14 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Arrays;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
public class MediaCodecSurfaceEncoder {
private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
-
private static final boolean DEBUG = false;
private static final int VIDEO_BITRATE = 8000000 /*8 Mbps*/;
private static final int VIDEO_FRAMERATE = 30;
@@ -44,6 +48,8 @@
private final String mMime;
private final String mOutputPath;
private int mTrackID = -1;
+ private int mFrameNum = 0;
+ private int[] mFrameTypeOccurrences = {0, 0, 0};
private Surface mSurface;
private MediaExtractor mExtractor;
@@ -128,8 +134,10 @@
mEncoder.reset();
mSurface.release();
mSurface = null;
+ Log.i(TAG, "Number of I-frames = " + mFrameTypeOccurrences[FRAME_TYPE_I]);
+ Log.i(TAG, "Number of P-frames = " + mFrameTypeOccurrences[FRAME_TYPE_P]);
+ Log.i(TAG, "Number of B-frames = " + mFrameTypeOccurrences[FRAME_TYPE_B]);
}
-
mEncoder.release();
mDecoder.release();
mExtractor.release();
@@ -193,6 +201,8 @@
mSawEncOutputEOS = false;
mDecOutputCount = 0;
mEncOutputCount = 0;
+ mFrameNum = 0;
+ Arrays.fill(mFrameTypeOccurrences, 0);
}
private void configureCodec(MediaFormat decFormat, MediaFormat encFormat) {
@@ -336,6 +346,21 @@
}
if (info.size > 0) {
ByteBuffer buf = mEncoder.getOutputBuffer(bufferIndex);
+ // Parse the buffer to get the frame type
+ if (DEBUG) Log.d(TAG, "[ Frame : " + (mFrameNum++) + " ]");
+ int frameTypeResult = -1;
+ if (mMime == MediaFormat.MIMETYPE_VIDEO_AVC) {
+ frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromAVC(buf);
+ } else if (mMime == MediaFormat.MIMETYPE_VIDEO_HEVC){
+ frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromHEVC(buf);
+ } else {
+ Log.e(TAG, "Mime type " + mMime + " is not supported.");
+ return;
+ }
+ if (frameTypeResult != -1) {
+ mFrameTypeOccurrences[frameTypeResult]++;
+ }
+
if (mMuxer != null) {
if (mTrackID == -1) {
mTrackID = mMuxer.addTrack(mEncoder.getOutputFormat());
@@ -353,4 +378,8 @@
private boolean hasSeenError() {
return mAsyncHandleDecoder.hasSeenError() || mAsyncHandleEncoder.hasSeenError();
}
+
+ public int[] getFrameTypes() {
+ return mFrameTypeOccurrences;
+ }
}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
new file mode 100644
index 0000000..efff4fd
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+import android.util.Log;
+
+import java.nio.ByteBuffer;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
+
+public class NalUnitUtil {
+ private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
+ private static final boolean DEBUG = false;
+
+ public static int findNalUnit(byte[] dataArray, int pos, int limit) {
+ int startOffset = 0;
+ if (limit - pos < 4) {
+ return startOffset;
+ }
+ if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 1) {
+ startOffset = 3;
+ } else {
+ if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 0 &&
+ dataArray[pos + 3] == 1) {
+ startOffset = 4;
+ }
+ }
+ return startOffset;
+ }
+
+ private static int getAVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+ return dataArray[nalUnitOffset] & 0x1F;
+ }
+
+ private static int parseAVCNALUnitData(byte[] dataArray, int offset, int limit) {
+ ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+ bitArray.reset(dataArray, offset, limit);
+
+ bitArray.skipBit(); // forbidden_zero_bit
+ bitArray.readBits(2); // nal_ref_idc
+ bitArray.skipBits(5); // nal_unit_type
+
+ bitArray.readUEV(); // first_mb_in_slice
+ if (!bitArray.canReadUEV()) {
+ return -1;
+ }
+ int sliceType = bitArray.readUEV();
+ if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+ if (sliceType == 0) {
+ return FRAME_TYPE_P;
+ } else if (sliceType == 1) {
+ return FRAME_TYPE_B;
+ } else if (sliceType == 2) {
+ return FRAME_TYPE_I;
+ } else {
+ return -1;
+ }
+ }
+
+ private static int getHEVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+ return (dataArray[nalUnitOffset] & 0x7E) >> 1;
+ }
+
+ private static int parseHEVCNALUnitData(byte[] dataArray, int offset, int limit,
+ int nalUnitType) {
+ // nal_unit_type values from H.265/HEVC Table 7-1.
+ final int BLA_W_LP = 16;
+ final int RSV_IRAP_VCL23 = 23;
+
+ ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+ bitArray.reset(dataArray, offset, limit);
+
+ bitArray.skipBit(); // forbidden zero bit
+ bitArray.readBits(6); // nal_unit_header
+ bitArray.readBits(6); // nuh_layer_id
+ bitArray.readBits(3); // nuh_temporal_id_plus1
+
+ // Parsing slice_segment_header values from H.265/HEVC Table 7.3.6.1
+ boolean first_slice_segment = bitArray.readBit(); // first_slice_segment_in_pic_flag
+ if (!first_slice_segment) return -1;
+ if (nalUnitType >= BLA_W_LP && nalUnitType <= RSV_IRAP_VCL23) {
+ bitArray.readBit(); // no_output_of_prior_pics_flag
+ }
+ bitArray.readUEV(); // slice_pic_parameter_set_id
+ // Assume num_extra_slice_header_bits element of PPS data to be 0
+ int sliceType = bitArray.readUEV();
+ if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+ if (sliceType == 0) {
+ return FRAME_TYPE_B;
+ } else if (sliceType == 1) {
+ return FRAME_TYPE_P;
+ } else if (sliceType == 2) {
+ return FRAME_TYPE_I;
+ } else {
+ return -1;
+ }
+ }
+
+ public static int getStandardizedFrameTypesFromAVC(ByteBuffer buf) {
+ int limit = buf.limit();
+ byte[] dataArray = new byte[buf.remaining()];
+ buf.get(dataArray);
+ int frameType = -1;
+ for (int pos = 0; pos + 3 < limit; ) {
+ int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+ if (startOffset != 0) {
+ int nalUnitType = getAVCNalUnitType(dataArray, (pos + startOffset));
+ if (DEBUG) {
+ Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+ Log.d(TAG, "NalUnitType = " + nalUnitType);
+ }
+ // SLICE_NAL = 1; IDR_SLICE_NAL = 5
+ if (nalUnitType == 1 || nalUnitType == 5) {
+ frameType = parseAVCNALUnitData(dataArray, (pos + startOffset),
+ (limit - pos - startOffset));
+ break;
+ }
+ pos += 3;
+ } else {
+ pos++;
+ }
+ }
+ return frameType;
+ }
+
+ public static int getStandardizedFrameTypesFromHEVC(ByteBuffer buf) {
+ int limit = buf.limit();
+ byte[] dataArray = new byte[buf.remaining()];
+ buf.get(dataArray);
+ int frameType = -1;
+ for (int pos = 0; pos + 3 < limit; ) {
+ int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+ if (startOffset != 0) {
+ int nalUnitType = NalUnitUtil.getHEVCNalUnitType(dataArray, (pos + startOffset));
+ if (DEBUG) {
+ Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+ Log.d(TAG, "NalUnitType = " + nalUnitType);
+ }
+ // Parse NALUnits containing slice_headers which lies in the range of 0 to 21
+ if (nalUnitType >= 0 && nalUnitType <= 21) {
+ frameType = parseHEVCNALUnitData(dataArray, (pos + startOffset),
+ (limit - pos - startOffset), nalUnitType);
+ break;
+ }
+ pos += 3;
+ } else {
+ pos++;
+ }
+ }
+ return frameType;
+ }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
new file mode 100644
index 0000000..e4bfaa3
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+public class ParsableBitArray {
+ public byte[] data;
+ private int byteOffset;
+ private int bitOffset;
+ private int byteLimit;
+
+ public ParsableBitArray(byte[] dataArray) {
+ this(dataArray, dataArray.length);
+ }
+
+ public ParsableBitArray(byte[] dataArray, int limit) {
+ this.data = dataArray;
+ byteLimit = limit;
+ }
+
+ public void reset(byte[] data, int offset, int limit) {
+ this.data = data;
+ byteOffset = offset;
+ bitOffset = 0;
+ byteLimit = limit;
+ }
+
+ public void skipBit() {
+ if (++bitOffset == 8) {
+ bitOffset = 0;
+ byteOffset++;
+ }
+ }
+
+ public void skipBits(int numBits) {
+ int numBytes = numBits / 8;
+ byteOffset += numBytes;
+ bitOffset += numBits - (numBytes * 8);
+ if (bitOffset > 7) {
+ byteOffset++;
+ bitOffset -= 8;
+ }
+ }
+
+ public boolean readBit() {
+ boolean returnValue = (data[byteOffset] & (0x80 >> bitOffset)) != 0;
+ skipBit();
+ return returnValue;
+ }
+
+ public int readBits(int numBits) {
+ if (numBits == 0) {
+ return 0;
+ }
+ int returnValue = 0;
+ bitOffset += numBits;
+ while (bitOffset > 8) {
+ bitOffset -= 8;
+ returnValue |= (data[byteOffset++] & 0xFF) << bitOffset;
+ }
+ returnValue |= (data[byteOffset] & 0xFF) >> (8 - bitOffset);
+ returnValue &= 0xFFFFFFFF >>> (32 - numBits);
+ if (bitOffset == 8) {
+ bitOffset = 0;
+ byteOffset++;
+ }
+ return returnValue;
+ }
+
+ public boolean canReadUEV() {
+ int initialByteOffset = byteOffset;
+ int initialBitOffset = bitOffset;
+ int leadingZeros = 0;
+ while (byteOffset < byteLimit && !readBit()) {
+ leadingZeros++;
+ }
+ boolean hitLimit = byteOffset == byteLimit;
+ byteOffset = initialByteOffset;
+ bitOffset = initialBitOffset;
+ return !hitLimit && canReadBits(leadingZeros * 2 + 1);
+ }
+
+ public int readUEV() {
+ int leadingZeros = 0;
+ while (!readBit()) {
+ leadingZeros++;
+ }
+ return (1 << leadingZeros) - 1 + (leadingZeros > 0 ? readBits(leadingZeros) : 0);
+ }
+
+ public boolean canReadBits(int numBits) {
+ int oldByteOffset = byteOffset;
+ int numBytes = numBits / 8;
+ int newByteOffset = byteOffset + numBytes;
+ int newBitOffset = bitOffset + numBits - (numBytes * 8);
+ if (newBitOffset > 7) {
+ newByteOffset++;
+ newBitOffset -= 8;
+ }
+ for (int i = oldByteOffset + 1; i <= newByteOffset && newByteOffset < byteLimit; i++) {
+ if (shouldSkipByte(i)) {
+ // Skip the byte and check three bytes ahead.
+ newByteOffset++;
+ i += 2;
+ }
+ }
+ return newByteOffset < byteLimit || (newByteOffset == byteLimit && newBitOffset == 0);
+ }
+
+ private boolean shouldSkipByte(int offset) {
+ return (2 <= offset && offset < byteLimit && data[offset] == (byte) 0x03 &&
+ data[offset - 2] == (byte) 0x00 && data[offset - 1] == (byte) 0x00);
+ }
+
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
index 164e02a..017012d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
@@ -124,4 +124,15 @@
</FrameLayout>
+ <TextView
+ android:id="@+id/textViewResults"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="10dp"
+ android:fontFamily="sans-serif-medium"
+ android:textSize="18sp"
+ android:textStyle="normal"
+ app:layout_constraintStart_toStartOf="parent"
+ app:layout_constraintTop_toBottomOf = "@+id/frameLayout2" />
+
</androidx.constraintlayout.widget.ConstraintLayout>
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 26cdc3a..9e48c1f 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -37,6 +37,8 @@
],
static_libs: [
"libc_malloc_debug_backtrace",
+ "libbatterystats_aidl",
+ "libprocessinfoservice_aidl",
],
shared_libs: [
"libaudioclient_aidl_conversion",
@@ -44,12 +46,16 @@
"libbinder",
"libcutils",
"liblog",
+ "libpermission",
"libutils",
"libhidlbase",
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
"media_permission-aidl-cpp",
],
+ export_static_lib_headers: [
+ "libbatterystats_aidl",
+ ],
logtags: ["EventLogTags.logtags"],
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 19225d3..e212794 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -21,9 +21,9 @@
#include <media/stagefright/ProcessInfo.h>
#include <binder/IPCThreadState.h>
-#include <binder/IProcessInfoService.h>
#include <binder/IServiceManager.h>
#include <private/android_filesystem_config.h>
+#include <processinfo/IProcessInfoService.h>
namespace android {
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index e2e1043..c19fe38 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -114,15 +114,14 @@
if (int32_t mode = appOps.startOpNoThrow(op, identity.uid,
resolvedOpPackageName, /*startIfModeDefault*/ false,
VALUE_OR_FATAL(aidl2legacy_optional_string_view_optional_String16(
- identity.attributionTag)), msg) != AppOpsManager::MODE_ALLOWED) {
+ identity.attributionTag)), msg) == AppOpsManager::MODE_ERRORED) {
ALOGE("Request start for \"%s\" (uid %d) denied by app op: %d, mode: %d",
String8(resolvedOpPackageName).c_str(), identity.uid, op, mode);
return false;
}
} else {
- // Always use OP_RECORD_AUDIO for checks at creation time.
if (int32_t mode = appOps.checkOp(op, uid,
- resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+ resolvedOpPackageName) == AppOpsManager::MODE_ERRORED) {
ALOGE("Request check for \"%s\" (uid %d) denied by app op: %d, mode: %d",
String8(resolvedOpPackageName).c_str(), identity.uid, op, mode);
return false;
@@ -212,7 +211,6 @@
if (ok) {
static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
- // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
ok = PermissionCache::checkPermission(sCaptureHotwordAllowed, pid, uid);
}
if (!ok) ALOGV("android.permission.CAPTURE_AUDIO_HOTWORD");
@@ -299,6 +297,10 @@
return identity;
}
+void purgePermissionCache() {
+ PermissionCache::purgeCache();
+}
+
status_t checkIMemory(const sp<IMemory>& iMemory)
{
if (iMemory == 0) {
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index 187ef7c..b245834 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -10,6 +10,7 @@
cc_defaults {
name: "libmediautils_fuzzer_defaults",
shared_libs: [
+ "libbatterystats_aidl",
"libbinder",
"libcutils",
"liblog",
diff --git a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
index 4521853..130feee 100644
--- a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
+++ b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#define LOG_TAG "BatteryNotifierFuzzer"
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
#include <binder/IServiceManager.h>
#include <utils/String16.h>
#include <android/log.h>
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index a4e42ad..3812d7a 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -17,7 +17,7 @@
#ifndef MEDIA_BATTERY_NOTIFIER_H
#define MEDIA_BATTERY_NOTIFIER_H
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
#include <utils/Singleton.h>
#include <utils/String8.h>
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 9a3c6fb..6e75746 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -97,6 +97,7 @@
bool dumpAllowed();
bool modifyPhoneStateAllowed(const media::permission::Identity& identity);
bool bypassInterruptionPolicyAllowed(const media::permission::Identity& identity);
+void purgePermissionCache();
media::permission::Identity getCallingIdentity();
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 2294c49..a7d47fb 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -74,6 +74,7 @@
"libmediautils",
"libnbaio",
"libnblog",
+ "libpermission",
"libpowermanager",
"libmediautils",
"libmemunreachable",
@@ -95,6 +96,7 @@
],
export_shared_lib_headers: [
+ "libpermission",
"media_permission-aidl-cpp",
],
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7a89805..20812bf 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -163,6 +163,33 @@
}
};
+// TODO b/182392769: use identity util
+/* static */
+media::permission::Identity AudioFlinger::checkIdentityPackage(
+ const media::permission::Identity& identity) {
+ Vector<String16> packages;
+ PermissionController{}.getPackagesForUid(identity.uid, packages);
+
+ Identity checkedIdentity = identity;
+ if (!identity.packageName.has_value() || identity.packageName.value().size() == 0) {
+ if (!packages.isEmpty()) {
+ checkedIdentity.packageName =
+ std::move(legacy2aidl_String16_string(packages[0]).value());
+ }
+ } else {
+ String16 opPackageLegacy = VALUE_OR_FATAL(
+ aidl2legacy_string_view_String16(identity.packageName.value_or("")));
+ if (std::find_if(packages.begin(), packages.end(),
+ [&opPackageLegacy](const auto& package) {
+ return opPackageLegacy == package; }) == packages.end()) {
+ ALOGW("The package name(%s) provided does not correspond to the uid %d",
+ identity.packageName.value_or("").c_str(), identity.uid);
+ checkedIdentity.packageName = std::optional<std::string>();
+ }
+ }
+ return checkedIdentity;
+}
+
// ----------------------------------------------------------------------------
std::string formatToString(audio_format_t format) {
@@ -278,6 +305,21 @@
return NO_ERROR;
}
+status_t AudioFlinger::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ Mutex::Autolock _l(mLock);
+ mAudioVibratorInfos = vibratorInfos;
+ return NO_ERROR;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
+ if (mAudioVibratorInfos.empty()) {
+ return nullptr;
+ }
+ return &mAudioVibratorInfos.front();
+}
+
AudioFlinger::~AudioFlinger()
{
while (!mRecordThreads.isEmpty()) {
@@ -2142,7 +2184,7 @@
&output.notificationFrameCount,
callingPid, adjIdentity, &output.flags,
input.clientInfo.clientTid,
- &lStatus, portId);
+ &lStatus, portId, input.maxSharedAudioHistoryMs);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
// lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
@@ -4122,7 +4164,8 @@
case TransactionCode::SET_MIC_MUTE:
case TransactionCode::SET_LOW_RAM_DEVICE:
case TransactionCode::SYSTEM_READY:
- case TransactionCode::SET_AUDIO_HAL_PIDS: {
+ case TransactionCode::SET_AUDIO_HAL_PIDS:
+ case TransactionCode::SET_VIBRATOR_INFOS: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1cfdffc..c66ecb0 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -129,6 +129,9 @@
public:
static void instantiate() ANDROID_API;
+ static media::permission::Identity checkIdentityPackage(
+ const media::permission::Identity& identity);
+
status_t dump(int fd, const Vector<String16>& args) override;
// IAudioFlinger interface, in binder opcode order
@@ -267,6 +270,8 @@
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids);
+ virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
// end of IAudioFlinger interface
@@ -296,6 +301,8 @@
void updateDownStreamPatches_l(const struct audio_patch *patch,
const std::set<audio_io_handle_t> streams);
+ const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
static const size_t kLogMemorySize = 400 * 1024;
@@ -669,6 +676,8 @@
virtual binder::Status setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction);
virtual binder::Status setPreferredMicrophoneFieldDimension(float zoom);
+ virtual binder::Status shareAudioHistory(const std::string& sharedAudioPackageName,
+ int64_t sharedAudioStartMs);
private:
const sp<RecordThread::RecordTrack> mRecordTrack;
@@ -971,7 +980,12 @@
SimpleLog mAppSetParameterLog;
SimpleLog mSystemSetParameterLog;
+ std::vector<media::AudioVibratorInfo> mAudioVibratorInfos;
+
static inline constexpr const char *mMetricsId = AMEDIAMETRICS_KEY_AUDIO_FLINGER;
+
+ // Keep in sync with java definition in media/java/android/media/AudioRecord.java
+ static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 7e06096..d8565bd 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -173,22 +173,15 @@
return status;
}
-audio_format_t AudioStreamOut::getFormat() const
+audio_config_base_t AudioStreamOut::getAudioProperties() const
{
- audio_format_t result;
- return stream->getFormat(&result) == OK ? result : AUDIO_FORMAT_INVALID;
-}
-
-uint32_t AudioStreamOut::getSampleRate() const
-{
- uint32_t result;
- return stream->getSampleRate(&result) == OK ? result : 0;
-}
-
-audio_channel_mask_t AudioStreamOut::getChannelMask() const
-{
- audio_channel_mask_t result;
- return stream->getChannelMask(&result) == OK ? result : AUDIO_CHANNEL_INVALID;
+ audio_config_base_t result = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (stream->getAudioProperties(&result) != OK) {
+ result.sample_rate = 0;
+ result.channel_mask = AUDIO_CHANNEL_INVALID;
+ result.format = AUDIO_FORMAT_INVALID;
+ }
+ return result;
}
int AudioStreamOut::flush()
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 16fbcf2..565f43a 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -81,22 +81,14 @@
virtual size_t getFrameSize() const { return mHalFrameSize; }
/**
- * @return format from the perspective of the application and the AudioFlinger.
+ * @return audio stream configuration: channel mask, format, sample rate:
+ * - channel mask from the perspective of the application and the AudioFlinger,
+ * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI;
+ * - format from the perspective of the application and the AudioFlinger;
+ * - sample rate from the perspective of the application and the AudioFlinger,
+ * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
*/
- virtual audio_format_t getFormat() const;
-
- /**
- * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
- * @return sample rate from the perspective of the application and the AudioFlinger.
- */
- virtual uint32_t getSampleRate() const;
-
- /**
- * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
- * @return channel mask from the perspective of the application and the AudioFlinger.
- */
- virtual audio_channel_mask_t getChannelMask() const;
-
+ virtual audio_config_base_t getAudioProperties() const;
virtual status_t flush();
virtual status_t standby();
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 031e0cf..d75b13b 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1585,6 +1585,34 @@
return status;
}
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+{
+ if (mStatus != NO_ERROR) {
+ return mStatus;
+ }
+ if (!isHapticGenerator()) {
+ ALOGW("Should not set vibrator info for effects that are not HapticGenerator");
+ return INVALID_OPERATION;
+ }
+
+ std::vector<uint8_t> request(
+ sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+ effect_param_t *param = (effect_param_t*) request.data();
+ param->psize = sizeof(int32_t);
+ param->vsize = 2 * sizeof(float);
+ *(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
+ float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
+ vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
+ vibratorInfoPtr[1] = vibratorInfo->qFactor;
+ std::vector<uint8_t> response;
+ status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(response.size() != sizeof(status_t));
+ status = *reinterpret_cast<const status_t*>(response.data());
+ }
+ return status;
+}
+
static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
std::stringstream ss;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 8e82d53..9da95bc 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -258,6 +258,7 @@
bool isHapticGenerator() const;
status_t setHapticIntensity(int id, int intensity);
+ status_t setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
void dump(int fd, const Vector<String16>& args);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 2e59baa..2436248 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -219,6 +219,10 @@
void flushAck();
bool isResumePending();
void resumeAck();
+ // For direct or offloaded tracks ensure that the pause state is acknowledged
+ // by the playback thread in case of an immediate flush.
+ bool isPausePending() const { return mPauseHwPending; }
+ void pauseAck();
void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
@@ -314,6 +318,7 @@
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
+ bool mPauseHwPending = false; // direct/offload track request for thread pause
audio_output_flags_t mFlags;
// If the last track change was notified to the client with readAndClearHasChanged
std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 5f248e1..b953c0b 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -69,7 +69,8 @@
const media::permission::Identity& identity,
audio_input_flags_t flags,
track_type type,
- audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
+ int64_t startTimeMs = -1);
virtual ~RecordTrack();
virtual status_t initCheck() const;
@@ -107,6 +108,9 @@
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
+ status_t shareAudioHistory(const std::string& sharedAudioPackageName,
+ int64_t sharedAudioStartMs);
+ int64_t startTimeMs() { return mStartTimeMs; }
static bool checkServerLatencySupported(
audio_format_t format, audio_input_flags_t flags) {
@@ -146,8 +150,9 @@
bool mSilenced;
// used to enforce OP_RECORD_AUDIO
- uid_t mUid;
sp<OpRecordAudioMonitor> mOpRecordAudioMonitor;
+ std::string mSharedAudioPackageName = {};
+ int64_t mStartTimeMs = -1;
};
// playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7f91a54..6da4543 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -700,6 +700,13 @@
return sendConfigEvent_l(configEvent);
}
+void AudioFlinger::ThreadBase::sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)
+{
+ ALOG_ASSERT(type() == RECORD, "sendResizeBufferConfigEvent_l() called on non record thread");
+ sp<ConfigEvent> configEvent =
+ (ConfigEvent *)new ResizeBufferConfigEvent(maxSharedAudioHistoryMs);
+ sendConfigEvent_l(configEvent);
+}
// post condition: mConfigEvents.isEmpty()
void AudioFlinger::ThreadBase::processConfigEvents_l()
@@ -758,6 +765,11 @@
(UpdateOutDevicesConfigEventData *)event->mData.get();
updateOutDevices(data->mOutDevices);
} break;
+ case CFG_EVENT_RESIZE_BUFFER: {
+ ResizeBufferConfigEventData *data =
+ (ResizeBufferConfigEventData *)event->mData.get();
+ resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
+ } break;
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
break;
@@ -1079,6 +1091,11 @@
ALOGE("%s should only be called in RecordThread", __func__);
}
+void AudioFlinger::ThreadBase::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs __unused)
+{
+ ALOGE("%s should only be called in RecordThread", __func__);
+}
+
void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused)
{
sp<ThreadBase> thread = mThread.promote();
@@ -1439,6 +1456,16 @@
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
}
+ if (effect->isHapticGenerator()) {
+ // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
+ // for the HapticGenerator.
+ const media::AudioVibratorInfo* defaultVibratorInfo =
+ mAudioFlinger->getDefaultVibratorInfo_l();
+ if (defaultVibratorInfo != nullptr) {
+ // Only set the vibrator info when it is a valid one.
+ effect->setVibratorInfo(defaultVibratorInfo);
+ }
+ }
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
lStatus = handle->initCheck();
@@ -2757,8 +2784,9 @@
void AudioFlinger::PlaybackThread::readOutputParameters_l()
{
// unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
- mSampleRate = mOutput->getSampleRate();
- mChannelMask = mOutput->getChannelMask();
+ const audio_config_base_t audioConfig = mOutput->getAudioProperties();
+ mSampleRate = audioConfig.sample_rate;
+ mChannelMask = audioConfig.channel_mask;
if (!audio_is_output_channel(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
@@ -2771,11 +2799,11 @@
mBalance.setChannelMask(mChannelMask);
// Get actual HAL format.
- status_t result = mOutput->stream->getFormat(&mHALFormat);
+ status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
// Get format from the shim, which will be different than the HAL format
// if playing compressed audio over HDMI passthrough.
- mFormat = mOutput->getFormat();
+ mFormat = audioConfig.format;
if (!audio_is_valid_format(mFormat)) {
LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
}
@@ -5869,8 +5897,15 @@
sp<Track> l = mActiveTracks.getLatest();
bool last = l.get() == track;
- if (track->isPausing()) {
- track->setPaused();
+ if (track->isPausePending()) {
+ track->pauseAck();
+ // It is possible a track might have been flushed or stopped.
+ // Other operations such as flush pending might occur on the next prepare.
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ // Always perform pause, as an immediate flush will change
+ // the pause state to be no longer isPausing().
if (mHwSupportsPause && last && !mHwPaused) {
doHwPause = true;
mHwPaused = true;
@@ -6412,8 +6447,15 @@
continue;
}
- if (track->isPausing()) {
- track->setPaused();
+ if (track->isPausePending()) {
+ track->pauseAck();
+ // It is possible a track might have been flushed or stopped.
+ // Other operations such as flush pending might occur on the next prepare.
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ // Always perform pause if last, as an immediate flush will change
+ // the pause state to be no longer isPausing().
if (last) {
if (mHwSupportsPause && !mHwPaused) {
doHwPause = true;
@@ -7741,7 +7783,8 @@
audio_input_flags_t *flags,
pid_t tid,
status_t *status,
- audio_port_handle_t portId)
+ audio_port_handle_t portId,
+ int32_t maxSharedAudioHistoryMs)
{
size_t frameCount = *pFrameCount;
size_t notificationFrameCount = *pNotificationFrameCount;
@@ -7750,6 +7793,7 @@
audio_input_flags_t inputFlags = mInput->flags;
audio_input_flags_t requestedFlags = *flags;
uint32_t sampleRate;
+ Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
lStatus = initCheck();
if (lStatus != NO_ERROR) {
@@ -7763,6 +7807,23 @@
goto Exit;
}
+ if (maxSharedAudioHistoryMs != 0) {
+ if (!captureHotwordAllowed(checkedIdentity)) {
+ lStatus = PERMISSION_DENIED;
+ goto Exit;
+ }
+ //TODO: b/185972521 allow resampling buffer resizing on fast mixers by pausing
+ // the fast mixer thread while resizing the buffer in the normal thread
+ if (hasFastCapture()) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ if (maxSharedAudioHistoryMs < 0
+ || maxSharedAudioHistoryMs > AudioFlinger::kMaxSharedAudioHistoryMs) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ }
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
@@ -7871,11 +7932,18 @@
{ // scope for mLock
Mutex::Autolock _l(mLock);
+ long startTimeMs = -1;
+ if (!mSharedAudioPackageName.empty()
+ && mSharedAudioPackageName == checkedIdentity.packageName
+ && mSharedAudioSessionId == sessionId
+ && captureHotwordAllowed(checkedIdentity)) {
+ startTimeMs = mSharedAudioStartMs;
+ }
track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
- identity, *flags, TrackBase::TYPE_DEFAULT, portId);
+ checkedIdentity, *flags, TrackBase::TYPE_DEFAULT, portId, startTimeMs);
lStatus = track->initCheck();
if (lStatus != NO_ERROR) {
@@ -7891,6 +7959,11 @@
// so ask activity manager to do this on our behalf
sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
}
+
+ if (maxSharedAudioHistoryMs != 0) {
+ sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
+ }
+
}
lStatus = NO_ERROR;
@@ -8083,6 +8156,9 @@
{
ALOGV("RecordThread::getActiveMicrophones");
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
return status;
}
@@ -8092,6 +8168,9 @@
{
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
return mInput->stream->setPreferredMicrophoneDirection(direction);
}
@@ -8099,9 +8178,43 @@
{
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
}
+status_t AudioFlinger::RecordThread::shareAudioHistory(
+ const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
+ int64_t sharedAudioStartMs) {
+ AutoMutex _l(mLock);
+ return shareAudioHistory_l(sharedAudioPackageName, sharedSessionId, sharedAudioStartMs);
+}
+
+status_t AudioFlinger::RecordThread::shareAudioHistory_l(
+ const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
+ int64_t sharedAudioStartMs) {
+ if (hasFastCapture()) {
+ return BAD_VALUE;
+ }
+ if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
+ return BAD_VALUE;
+ }
+ if (sharedAudioStartMs < 0 || sharedAudioStartMs * mSampleRate / 1000 > mRsmpInRear) {
+ return BAD_VALUE;
+ }
+
+ mSharedAudioPackageName = sharedAudioPackageName;
+ if (mSharedAudioPackageName.empty()) {
+ mSharedAudioSessionId = AUDIO_SESSION_NONE;
+ mSharedAudioStartMs = -1;
+ } else {
+ mSharedAudioSessionId = sharedSessionId;
+ mSharedAudioStartMs = sharedAudioStartMs;
+ }
+ return NO_ERROR;
+}
+
void AudioFlinger::RecordThread::updateMetadata_l()
{
if (mInput == nullptr || mInput->stream == nullptr ||
@@ -8133,6 +8246,7 @@
{
track->terminate();
track->mState = TrackBase::STOPPED;
+
// active tracks are removed by threadLoop()
if (mActiveTracks.indexOf(track) < 0) {
removeTrack_l(track);
@@ -8240,8 +8354,23 @@
{
sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
RecordThread *recordThread = (RecordThread *) threadBase.get();
- mRsmpInFront = recordThread->mRsmpInRear;
mRsmpInUnrel = 0;
+ const int32_t rear = recordThread->mRsmpInRear;
+ ssize_t deltaFrames = 0;
+ if (mRecordTrack->startTimeMs() >= 0) {
+ int32_t startFrames = mRecordTrack->startTimeMs() * recordThread->sampleRate() / 1000;
+ // start frame has to be in the past
+ //TODO: b/185972521 fix in case rear or startFrames wrap around
+ if (startFrames > rear) {
+ startFrames = rear;
+ }
+ deltaFrames = rear - startFrames;
+ // start frame cannot be further in the past than start of resampling buffer
+ if ((size_t) deltaFrames > recordThread->mRsmpInFrames) {
+ deltaFrames = recordThread->mRsmpInFrames;
+ }
+ }
+ mRsmpInFront = audio_utils::safe_sub_overflow(rear, static_cast<int32_t>(deltaFrames));
}
void AudioFlinger::RecordThread::ResamplerBufferProvider::sync(
@@ -8424,13 +8553,11 @@
}
if (reconfig) {
if (status == BAD_VALUE) {
- uint32_t sRate;
- audio_channel_mask_t channelMask;
- audio_format_t format;
- if (mInput->stream->getAudioProperties(&sRate, &channelMask, &format) == OK &&
- audio_is_linear_pcm(format) && audio_is_linear_pcm(reqFormat) &&
- sRate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
- audio_channel_count_from_in_mask(channelMask) <= FCC_8) {
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (mInput->stream->getAudioProperties(&config) == OK &&
+ audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
+ config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
+ audio_channel_count_from_in_mask(config.channel_mask) <= FCC_8) {
status = NO_ERROR;
}
}
@@ -8508,31 +8635,10 @@
ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
"mBufferSize=%zu, mFrameCount=%zu",
this, mChannelCount, mFormat, mFrameSize, mBufferSize, mFrameCount);
- // This is the formula for calculating the temporary buffer size.
- // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
- // 1 full output buffer, regardless of the alignment of the available input.
- // The value is somewhat arbitrary, and could probably be even larger.
- // A larger value should allow more old data to be read after a track calls start(),
- // without increasing latency.
- //
- // Note this is independent of the maximum downsampling ratio permitted for capture.
- mRsmpInFrames = mFrameCount * 7;
- mRsmpInFramesP2 = roundup(mRsmpInFrames);
- free(mRsmpInBuffer);
- mRsmpInBuffer = NULL;
- // TODO optimize audio capture buffer sizes ...
- // Here we calculate the size of the sliding buffer used as a source
- // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7).
- // For current HAL frame counts, this is usually 2048 = 40 ms. It would
- // be better to have it derived from the pipe depth in the long term.
- // The current value is higher than necessary. However it should not add to latency.
-
- // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
- mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
- (void)posix_memalign(&mRsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
- // if posix_memalign fails, will segv here.
- memset(mRsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
+ // mRsmpInFrames must be 0 before calling resizeInputBuffer_l for the first time
+ mRsmpInFrames = 0;
+ resizeInputBuffer_l();
// AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
// But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -8715,6 +8821,124 @@
}
}
+int32_t AudioFlinger::RecordThread::getOldestFront_l()
+{
+ if (mTracks.size() == 0) {
+ return 0;
+ }
+ //TODO: b/185972521 fix in case of wrap around on one track:
+ // want the max(rear - front) for all tracks.
+ int32_t front = INT_MAX;
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ front = std::min(front, mTracks[i]->mResamplerBufferProvider->getFront());
+ }
+ // discard any audio past the buffer size
+ if (audio_utils::safe_add_overflow(front, (int32_t)mRsmpInFrames) < mRsmpInRear) {
+ front = audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
+ }
+ return front;
+}
+
+void AudioFlinger::RecordThread::updateFronts_l(int32_t offset)
+{
+ if (offset == 0) {
+ return;
+ }
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ int32_t front = mTracks[i]->mResamplerBufferProvider->getFront();
+ front = audio_utils::safe_sub_overflow(front, offset);
+ mTracks[i]->mResamplerBufferProvider->setFront(front);
+ }
+}
+
+void AudioFlinger::RecordThread::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)
+{
+ // This is the formula for calculating the temporary buffer size.
+ // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
+ // 1 full output buffer, regardless of the alignment of the available input.
+ // The value is somewhat arbitrary, and could probably be even larger.
+ // A larger value should allow more old data to be read after a track calls start(),
+ // without increasing latency.
+ //
+ // Note this is independent of the maximum downsampling ratio permitted for capture.
+ size_t minRsmpInFrames = mFrameCount * 7;
+
+ // maxSharedAudioHistoryMs != 0 indicates a request to possibly make some part of the audio
+ // capture history available to another client using the same session ID:
+ // dimension the resampler input buffer accordingly.
+
+ // Get oldest client read position: getOldestFront_l() must be called before altering
+ // mRsmpInRear, or mRsmpInFrames
+ int32_t previousFront = getOldestFront_l();
+ size_t previousRsmpInFramesP2 = mRsmpInFramesP2;
+ int32_t previousRear = mRsmpInRear;
+ mRsmpInRear = 0;
+
+ if (maxSharedAudioHistoryMs != 0) {
+ // resizeInputBuffer_l should never be called with a non zero shared history if the
+ // buffer was not already allocated
+ ALOG_ASSERT(mRsmpInBuffer != nullptr && mRsmpInFrames != 0,
+ "resizeInputBuffer_l() called with shared history and unallocated buffer");
+ size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
+ // never reduce resampler input buffer size
+ if (rsmpInFrames < mRsmpInFrames) {
+ return;
+ }
+ mRsmpInFrames = rsmpInFrames;
+ }
+ // Note: mRsmpInFrames is 0 when called with maxSharedAudioHistoryMs equals to 0 so it is always
+ // initialized
+ if (mRsmpInFrames < minRsmpInFrames) {
+ mRsmpInFrames = minRsmpInFrames;
+ }
+ mRsmpInFramesP2 = roundup(mRsmpInFrames);
+
+ // TODO optimize audio capture buffer sizes ...
+ // Here we calculate the size of the sliding buffer used as a source
+ // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7).
+ // For current HAL frame counts, this is usually 2048 = 40 ms. It would
+ // be better to have it derived from the pipe depth in the long term.
+ // The current value is higher than necessary. However it should not add to latency.
+
+ // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
+ mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
+
+ void *rsmpInBuffer;
+ (void)posix_memalign(&rsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
+ // if posix_memalign fails, will segv here.
+ memset(rsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
+
+ // Copy audio history if any from old buffer before freeing it
+ if (previousRear != 0) {
+ ALOG_ASSERT(mRsmpInBuffer != nullptr,
+ "resizeInputBuffer_l() called with null buffer but frames already read from HAL");
+
+ ssize_t unread = audio_utils::safe_sub_overflow(previousRear, previousFront);
+ previousFront &= previousRsmpInFramesP2 - 1;
+ size_t part1 = previousRsmpInFramesP2 - previousFront;
+ if (part1 > (size_t) unread) {
+ part1 = unread;
+ }
+ if (part1 != 0) {
+ memcpy(rsmpInBuffer, (const uint8_t*)mRsmpInBuffer + previousFront * mFrameSize,
+ part1 * mFrameSize);
+ mRsmpInRear = part1;
+ part1 = unread - part1;
+ if (part1 != 0) {
+ memcpy((uint8_t*)rsmpInBuffer + mRsmpInRear * mFrameSize,
+ (const uint8_t*)mRsmpInBuffer, part1 * mFrameSize);
+ mRsmpInRear += part1;
+ }
+ }
+ // Update front for all clients according to new rear
+ updateFronts_l(audio_utils::safe_sub_overflow(previousRear, mRsmpInRear));
+ } else {
+ mRsmpInRear = 0;
+ }
+ free(mRsmpInBuffer);
+ mRsmpInBuffer = rsmpInBuffer;
+}
+
void AudioFlinger::RecordThread::addPatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index e63642b..03ed6fd 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -53,6 +53,7 @@
CFG_EVENT_CREATE_AUDIO_PATCH,
CFG_EVENT_RELEASE_AUDIO_PATCH,
CFG_EVENT_UPDATE_OUT_DEVICE,
+ CFG_EVENT_RESIZE_BUFFER
};
class ConfigEventData: public RefBase {
@@ -242,6 +243,28 @@
virtual ~UpdateOutDevicesConfigEvent();
};
+ class ResizeBufferConfigEventData : public ConfigEventData {
+ public:
+ explicit ResizeBufferConfigEventData(int32_t maxSharedAudioHistoryMs) :
+ mMaxSharedAudioHistoryMs(maxSharedAudioHistoryMs) {}
+
+ virtual void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
+ }
+
+ int32_t mMaxSharedAudioHistoryMs;
+ };
+
+ class ResizeBufferConfigEvent : public ConfigEvent {
+ public:
+ explicit ResizeBufferConfigEvent(int32_t maxSharedAudioHistoryMs) :
+ ConfigEvent(CFG_EVENT_RESIZE_BUFFER) {
+ mData = new ResizeBufferConfigEventData(maxSharedAudioHistoryMs);
+ }
+
+ virtual ~ResizeBufferConfigEvent() {}
+ };
+
class PMDeathRecipient : public IBinder::DeathRecipient {
public:
explicit PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
@@ -306,6 +329,7 @@
status_t sendReleaseAudioPatchConfigEvent(audio_patch_handle_t handle);
status_t sendUpdateOutDeviceConfigEvent(
const DeviceDescriptorBaseVector& outDevices);
+ void sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs);
void processConfigEvents_l();
virtual void cacheParameters_l() = 0;
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
@@ -314,6 +338,9 @@
virtual void updateOutDevices(const DeviceDescriptorBaseVector& outDevices);
virtual void toAudioPortConfig(struct audio_port_config *config) = 0;
+ virtual void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs = 0);
+
+
// see note at declaration of mStandby, mOutDevice and mInDevice
bool standby() const { return mStandby; }
@@ -1613,6 +1640,9 @@
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+ int32_t getFront() const { return mRsmpInFront; }
+ void setFront(int32_t front) { mRsmpInFront = front; }
private:
RecordTrack * const mRecordTrack;
size_t mRsmpInUnrel; // unreleased frames remaining from
@@ -1662,7 +1692,8 @@
audio_input_flags_t *flags,
pid_t tid,
status_t *status /*non-NULL*/,
- audio_port_handle_t portId);
+ audio_port_handle_t portId,
+ int32_t maxSharedAudioHistoryMs);
status_t start(RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
@@ -1686,6 +1717,7 @@
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
void updateOutDevices(const DeviceDescriptorBaseVector& outDevices) override;
+ void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs = 0) override;
void addPatchTrack(const sp<PatchRecord>& record);
void deletePatchTrack(const sp<PatchRecord>& record);
@@ -1741,6 +1773,13 @@
&& inDeviceType() == mTimestampCorrectedDevice;
}
+ status_t shareAudioHistory(const std::string& sharedAudioPackageName,
+ audio_session_t sharedSessionId = AUDIO_SESSION_NONE,
+ int64_t sharedAudioStartMs = -1);
+ status_t shareAudioHistory_l(const std::string& sharedAudioPackageName,
+ audio_session_t sharedSessionId = AUDIO_SESSION_NONE,
+ int64_t sharedAudioStartMs = -1);
+
protected:
void dumpInternals_l(int fd, const Vector<String16>& args) override;
void dumpTracks_l(int fd, const Vector<String16>& args) override;
@@ -1754,6 +1793,9 @@
void checkBtNrec_l();
+ int32_t getOldestFront_l();
+ void updateFronts_l(int32_t offset);
+
AudioStreamIn *mInput;
Source *mSource;
SortedVector < sp<RecordTrack> > mTracks;
@@ -1819,6 +1861,10 @@
int64_t mFramesRead = 0; // continuous running counter.
DeviceDescriptorBaseVector mOutDevices;
+
+ std::string mSharedAudioPackageName = {};
+ long mSharedAudioStartMs = 0;
+ audio_session_t mSharedAudioSessionId = AUDIO_SESSION_NONE;
};
class MmapThread : public ThreadBase
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index db7528d..6549236 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -525,31 +525,8 @@
return nullptr;
}
- // TODO b/182392769: use identity util
- std::optional<std::string> opPackageNameStr = identity.packageName;
- if (!identity.packageName.has_value()) {
- // If no package name is provided by the client, use the first associated with the uid
- if (!packages.isEmpty()) {
- opPackageNameStr =
- VALUE_OR_FATAL(legacy2aidl_String16_string(packages[0]));
- }
- } else {
- // If the provided package name is invalid, we force app ops denial by clearing the package
- // name passed to OpPlayAudioMonitor
- String16 opPackageLegacy = VALUE_OR_FATAL(
- aidl2legacy_string_view_String16(opPackageNameStr.value_or("")));
- if (std::find_if(packages.begin(), packages.end(),
- [&opPackageLegacy](const auto& package) {
- return opPackageLegacy == package; }) == packages.end()) {
- ALOGW("The package name(%s) provided does not correspond to the uid %d, "
- "force muting the track", opPackageNameStr.value().c_str(), uid);
- // Set null package name so hasOpPlayAudio will always return false.
- opPackageNameStr = std::optional<std::string>();
- }
- }
- Identity adjIdentity = identity;
- adjIdentity.packageName = opPackageNameStr;
- return new OpPlayAudioMonitor(adjIdentity, attr.usage, id);
+ Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
+ return new OpPlayAudioMonitor(checkedIdentity, attr.usage, id);
}
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
@@ -1219,6 +1196,9 @@
mState = PAUSING;
ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
__func__, mId, (int)mThreadIoHandle);
+ if (isOffloadedOrDirect()) {
+ mPauseHwPending = true;
+ }
playbackThread->broadcast_l();
break;
@@ -1306,6 +1286,11 @@
mFlushHwPending = false;
}
+void AudioFlinger::PlaybackThread::Track::pauseAck()
+{
+ mPauseHwPending = false;
+}
+
void AudioFlinger::PlaybackThread::Track::reset()
{
// Do not reset twice to avoid discarding data written just after a flush and before
@@ -2235,24 +2220,12 @@
return nullptr;
}
- if (!identity.packageName.has_value() || identity.packageName.value().size() == 0) {
- Vector<String16> packages;
- // no package name, happens with SL ES clients
- // query package manager to find one
- PermissionController permissionController;
- permissionController.getPackagesForUid(identity.uid, packages);
- if (packages.isEmpty()) {
- return nullptr;
- } else {
- Identity adjIdentity = identity;
- adjIdentity.packageName =
- VALUE_OR_FATAL(legacy2aidl_String16_string(packages[0]));
- ALOGV("using identity:%s", adjIdentity.toString().c_str());
- return new OpRecordAudioMonitor(adjIdentity);
- }
+ Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
+ if (!checkedIdentity.packageName.has_value()
+ || checkedIdentity.packageName.value().size() == 0) {
+ return nullptr;
}
-
- return new OpRecordAudioMonitor(identity);
+ return new OpRecordAudioMonitor(checkedIdentity);
}
AudioFlinger::RecordThread::OpRecordAudioMonitor::OpRecordAudioMonitor(
@@ -2378,6 +2351,12 @@
return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
}
+binder::Status AudioFlinger::RecordHandle::shareAudioHistory(
+ const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
+ return binderStatusFromStatusT(
+ mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
+}
+
// ----------------------------------------------------------------------------
#undef LOG_TAG
#define LOG_TAG "AF::RecordTrack"
@@ -2398,7 +2377,8 @@
const Identity& identity,
audio_input_flags_t flags,
track_type type,
- audio_port_handle_t portId)
+ audio_port_handle_t portId,
+ int64_t startTimeMs)
: TrackBase(thread, client, attr, sampleRate, format,
channelMask, frameCount, buffer, bufferSize, sessionId,
creatorPid,
@@ -2415,7 +2395,8 @@
mRecordBufferConverter(NULL),
mFlags(flags),
mSilenced(false),
- mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(identity, attr))
+ mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(identity, attr)),
+ mStartTimeMs(startTimeMs)
{
if (mCblk == NULL) {
return;
@@ -2525,6 +2506,9 @@
Mutex::Autolock _l(thread->mLock);
RecordThread *recordThread = (RecordThread *) thread.get();
priorState = mState;
+ if (!mSharedAudioPackageName.empty()) {
+ recordThread->shareAudioHistory_l("");
+ }
recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
}
// APM portid/client management done outside of lock.
@@ -2711,6 +2695,37 @@
}
}
+status_t AudioFlinger::RecordThread::RecordTrack::shareAudioHistory(
+ const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
+
+ const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ if (callingUid != mUid || callingPid != mCreatorPid) {
+ return PERMISSION_DENIED;
+ }
+
+ Identity identity{};
+ identity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ identity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
+ if (!captureHotwordAllowed(identity)) {
+ return PERMISSION_DENIED;
+ }
+
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ status_t status = recordThread->shareAudioHistory(
+ sharedAudioPackageName, mSessionId, sharedAudioStartMs);
+ if (status == NO_ERROR) {
+ mSharedAudioPackageName = sharedAudioPackageName;
+ }
+ return status;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+
// ----------------------------------------------------------------------------
#undef LOG_TAG
#define LOG_TAG "AF::PatchRecord"
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 0537365..552919d 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -226,6 +226,8 @@
return AUDIO_DEVICE_OUT_SPEAKER_SAFE;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) {
return AUDIO_DEVICE_OUT_HDMI_ARC;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_EARC) != 0) {
+ return AUDIO_DEVICE_OUT_HDMI_EARC;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) {
return AUDIO_DEVICE_OUT_AUX_LINE;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) {
@@ -240,4 +242,4 @@
return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0];
}
}
-}
\ No newline at end of file
+}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c6bdb04..c2a20c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -77,6 +77,7 @@
sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
const DeviceVector &availableDeviceTypes,
+ uid_t uid,
sp<AudioPolicyMix> *policyMix) const;
/**
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 05ec69e..20b4044 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -226,7 +226,9 @@
add(devices);
return size();
}
- return SortedVector::merge(devices);
+ ssize_t ret = SortedVector::merge(devices);
+ refreshTypes();
+ return ret;
}
/**
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c024a85..b209a88 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -391,6 +391,7 @@
sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
audio_source_t inputSource,
const DeviceVector &availDevices,
+ uid_t uid,
sp<AudioPolicyMix> *policyMix) const
{
for (size_t i = 0; i < size(); i++) {
@@ -402,7 +403,11 @@
if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
mix->mCriteria[j].mValue.mSource == inputSource) ||
(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mValue.mSource != inputSource)) {
+ mix->mCriteria[j].mValue.mSource != inputSource) ||
+ (RULE_MATCH_UID == mix->mCriteria[j].mRule &&
+ mix->mCriteria[j].mValue.mUid == uid) ||
+ (RULE_EXCLUDE_UID == mix->mCriteria[j].mRule &&
+ mix->mCriteria[j].mValue.mUid != uid)) {
// assuming PolicyMix only for remote submix for input
// so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 562c213..84ed656 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -253,6 +253,18 @@
// Children: ModulesTraits, VolumeTraits, SurroundSoundTraits (optional)
};
+// Deleter using free() for use with std::unique_ptr<>. See also UniqueCPtr<> below.
+struct FreeDelete {
+ // NOTE: Deleting a const object is valid but free() takes a non-const pointer.
+ void operator()(const void* ptr) const {
+ free(const_cast<void*>(ptr));
+ }
+};
+
+// Alias for std::unique_ptr<> that uses the C function free() to delete objects.
+template <typename T>
+using UniqueCPtr = std::unique_ptr<T, FreeDelete>;
+
template <class T>
constexpr void (*xmlDeleter)(T* t);
template <>
@@ -608,7 +620,7 @@
}
// Tokenize and Convert Sources name to port pointer
PolicyAudioPortVector sources;
- std::unique_ptr<char[]> sourcesLiteral{strndup(
+ UniqueCPtr<char> sourcesLiteral{strndup(
sourcesAttr.c_str(), strlen(sourcesAttr.c_str()))};
char *devTag = strtok(sourcesLiteral.get(), ",");
while (devTag != NULL) {
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 9bef97c..0f8b0a5 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -170,11 +170,13 @@
status_t getMediaDevicesForRole(device_role_t role, const DeviceVector& availableDevices,
DeviceVector& devices) const;
+ void dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const;
+
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
- ProductStrategyPreferredRoutingMap mProductStrategyPreferredDevices;
- CapturePresetDevicesRoleMap mCapturePresetDevicesRole;
+ ProductStrategyDevicesRoleMap mProductStrategyDeviceRoleMap;
+ CapturePresetDevicesRoleMap mCapturePresetDevicesRoleMap;
VolumeGroupMap mVolumeGroups;
LastRemovableMediaDevices mLastRemovableMediaDevices;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 54625ea..2aa2f9a 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -18,20 +18,20 @@
#include "VolumeGroup.h"
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <HandleGenerator.h>
-#include <string>
-#include <vector>
#include <map>
-#include <utils/Errors.h>
-#include <utils/String8.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <HandleGenerator.h>
#include <media/AudioAttributes.h>
#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
-
-#include <vector>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
namespace android {
@@ -170,11 +170,12 @@
product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
};
-class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t,
- AudioDeviceTypeAddrVector>
-{
-public:
- void dump(String8 *dst, int spaces = 0) const;
-};
+using ProductStrategyDevicesRoleMap =
+ std::map<std::pair<product_strategy_t, device_role_t>, AudioDeviceTypeAddrVector>;
+
+void dumpProductStrategyDevicesRoleMap(
+ const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+ String8 *dst,
+ int spaces);
} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 37e4caa..150a9a8 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -17,6 +17,10 @@
#define LOG_TAG "APM::AudioPolicyEngine/Base"
//#define LOG_NDEBUG 0
+#include <functional>
+#include <string>
+#include <sys/stat.h>
+
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
#include <TypeConverter.h>
@@ -148,10 +152,15 @@
});
return iter != end(volumeGroups);
};
+ auto fileExists = [](const char* path) {
+ struct stat fileStat;
+ return stat(path, &fileStat) == 0 && S_ISREG(fileStat.st_mode);
+ };
- auto result = engineConfig::parse();
+ auto result = fileExists(engineConfig::DEFAULT_PATH) ?
+ engineConfig::parse(engineConfig::DEFAULT_PATH) : engineConfig::ParsingResult{};
if (result.parsedConfig == nullptr) {
- ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
+ ALOGD("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
engineConfig::Config config = gDefaultEngineConfig;
android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
result = {std::make_unique<engineConfig::Config>(config),
@@ -342,23 +351,33 @@
return NO_ERROR;
}
-status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
- const AudioDeviceTypeAddrVector &devices)
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s invalid strategy %u", __func__, strategy);
+namespace {
+template <typename T>
+status_t setDevicesRoleForT(
+ std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, const AudioDeviceTypeAddrVector &devices,
+ const std::string& logStr, std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
case DEVICE_ROLE_PREFERRED:
- mProductStrategyPreferredDevices[strategy] = devices;
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support set devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
+ case DEVICE_ROLE_DISABLED: {
+ tDevicesRoleMap[std::make_pair(t, role)] = devices;
+ // The preferred devices and disabled devices are mutually exclusive. Once a device is added
+ // the a list, it must be removed from the other one.
+ const device_role_t roleToRemove = role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED
+ : DEVICE_ROLE_PREFERRED;
+ auto it = tDevicesRoleMap.find(std::make_pair(t, roleToRemove));
+ if (it != tDevicesRoleMap.end()) {
+ it->second = excludeDeviceTypeAddrsFrom(it->second, devices);
+ if (it->second.empty()) {
+ tDevicesRoleMap.erase(it);
+ }
+ }
+ } break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it is no need to set device role as none for a strategy.
default:
@@ -368,28 +387,26 @@
return NO_ERROR;
}
-status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s invalid strategy %u", __func__, strategy);
+template <typename T>
+status_t removeAllDevicesRoleForT(
+ std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, const std::string& logStr, std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
case DEVICE_ROLE_PREFERRED:
- if (mProductStrategyPreferredDevices.erase(strategy) == 0) {
- // no preferred device was set
+ case DEVICE_ROLE_DISABLED:
+ if (tDevicesRoleMap.erase(std::make_pair(t, role)) == 0) {
+ // no preferred/disabled device was set
return NAME_NOT_FOUND;
}
break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support remove devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it makes no sense to remove devices with
- // role as DEVICE_ROLE_NONE for a strategy
+ // role as DEVICE_ROLE_NONE
default:
ALOGE("%s invalid role %d", __func__, role);
return BAD_VALUE;
@@ -397,25 +414,26 @@
return NO_ERROR;
}
-status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
- AudioDeviceTypeAddrVector &devices) const
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s unknown strategy %u", __func__, strategy);
+template <typename T>
+status_t getDevicesRoleForT(
+ const std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, AudioDeviceTypeAddrVector &devices, const std::string& logStr,
+ std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
- case DEVICE_ROLE_PREFERRED: {
- // preferred device for this strategy?
- auto devIt = mProductStrategyPreferredDevices.find(strategy);
- if (devIt == mProductStrategyPreferredDevices.end()) {
- ALOGV("%s no preferred device for strategy %u", __func__, strategy);
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ auto it = tDevicesRoleMap.find(std::make_pair(t, role));
+ if (it == tDevicesRoleMap.end()) {
+ ALOGV("%s no device as role %u for %s %u", __func__, role, logStr.c_str(), t);
return NAME_NOT_FOUND;
}
- devices = devIt->second;
+ devices = it->second;
} break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as the DEVICE_ROLE_NONE is never set
@@ -426,32 +444,45 @@
return NO_ERROR;
}
+} // namespace
+
+status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return setDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return removeAllDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+ AudioDeviceTypeAddrVector &devices) const
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return getDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
status_t EngineBase::setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
const AudioDeviceTypeAddrVector &devices)
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- mCapturePresetDevicesRole[audioSource][role] = devices;
- // When the devices are set as preferred devices, remove them from the disabled devices.
- doRemoveDevicesRoleForCapturePreset(
- audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support setting devices role as disabled for capture preset.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as it is no need to set device role as none
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return setDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
}
status_t EngineBase::addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
@@ -464,19 +495,20 @@
switch (role) {
case DEVICE_ROLE_PREFERRED:
- mCapturePresetDevicesRole[audioSource][role] = excludeDeviceTypeAddrsFrom(
- mCapturePresetDevicesRole[audioSource][role], devices);
- for (const auto& device : devices) {
- mCapturePresetDevicesRole[audioSource][role].push_back(device);
+ case DEVICE_ROLE_DISABLED: {
+ const auto audioSourceRole = std::make_pair(audioSource, role);
+ mCapturePresetDevicesRoleMap[audioSourceRole] = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRoleMap[audioSourceRole], devices);
+ for (const auto &device : devices) {
+ mCapturePresetDevicesRoleMap[audioSourceRole].push_back(device);
}
// When the devices are set as preferred devices, remove them from the disabled devices.
doRemoveDevicesRoleForCapturePreset(
- audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support setting devices role as disabled for capture preset.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
+ audioSource,
+ role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED : DEVICE_ROLE_PREFERRED,
+ devices,
+ false /*forceMatched*/);
+ } break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it is no need to set device role as none
default:
@@ -502,21 +534,22 @@
switch (role) {
case DEVICE_ROLE_PREFERRED:
case DEVICE_ROLE_DISABLED: {
- if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
- mCapturePresetDevicesRole[audioSource].count(role) == 0) {
+ const auto audioSourceRole = std::make_pair(audioSource, role);
+ if (mCapturePresetDevicesRoleMap.find(audioSourceRole) ==
+ mCapturePresetDevicesRoleMap.end()) {
return NAME_NOT_FOUND;
}
AudioDeviceTypeAddrVector remainingDevices = excludeDeviceTypeAddrsFrom(
- mCapturePresetDevicesRole[audioSource][role], devices);
+ mCapturePresetDevicesRoleMap[audioSourceRole], devices);
if (forceMatched && remainingDevices.size() !=
- mCapturePresetDevicesRole[audioSource][role].size() - devices.size()) {
+ mCapturePresetDevicesRoleMap[audioSourceRole].size() - devices.size()) {
// There are some devices from `devicesToRemove` that are not shown in the cached record
return BAD_VALUE;
}
- mCapturePresetDevicesRole[audioSource][role] = remainingDevices;
- if (mCapturePresetDevicesRole[audioSource][role].empty()) {
+ mCapturePresetDevicesRoleMap[audioSourceRole] = remainingDevices;
+ if (mCapturePresetDevicesRoleMap[audioSourceRole].empty()) {
// Remove the role when device list is empty
- mCapturePresetDevicesRole[audioSource].erase(role);
+ mCapturePresetDevicesRoleMap.erase(audioSourceRole);
}
} break;
case DEVICE_ROLE_NONE:
@@ -532,63 +565,21 @@
status_t EngineBase::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
device_role_t role)
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
- mCapturePresetDevicesRole[audioSource].erase(role) == 0) {
- // no preferred device for the given audio source
- return NAME_NOT_FOUND;
- }
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support remove devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as it makes no sense to remove devices with
- // role as DEVICE_ROLE_NONE for a strategy
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return removeAllDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, "audio source" /*logStr*/, p);
}
status_t EngineBase::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
device_role_t role, AudioDeviceTypeAddrVector &devices) const
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- return BAD_VALUE;
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- case DEVICE_ROLE_DISABLED: {
- if (mCapturePresetDevicesRole.count(audioSource) == 0) {
- return NAME_NOT_FOUND;
- }
- auto devIt = mCapturePresetDevicesRole.at(audioSource).find(role);
- if (devIt == mCapturePresetDevicesRole.at(audioSource).end()) {
- ALOGV("%s no devices role(%d) for capture preset %u", __func__, role, audioSource);
- return NAME_NOT_FOUND;
- }
-
- devices = devIt->second;
- } break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return getDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
}
status_t EngineBase::getMediaDevicesForRole(device_role_t role,
@@ -630,10 +621,22 @@
return activeDevices;
}
+void EngineBase::dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*sDevice role per capture preset dump:", spaces, "");
+ for (const auto& [capturePresetRolePair, devices] : mCapturePresetDevicesRoleMap) {
+ dst->appendFormat("\n%*sCapture preset(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+ capturePresetRolePair.first, capturePresetRolePair.second,
+ dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
+ }
+ dst->appendFormat("\n");
+}
+
void EngineBase::dump(String8 *dst) const
{
mProductStrategies.dump(dst, 2);
- mProductStrategyPreferredDevices.dump(dst, 2);
+ dumpProductStrategyDevicesRoleMap(mProductStrategyDeviceRoleMap, dst, 2);
+ dumpCapturePresetDevicesRoleMap(dst, 2);
mVolumeGroups.dump(dst, 2);
}
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index d4cea5a..b3d144f 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -320,14 +320,15 @@
}
}
-void ProductStrategyPreferredRoutingMap::dump(android::String8* dst, int spaces) const {
- dst->appendFormat("\n%*sPreferred devices per product strategy dump:", spaces, "");
- for (const auto& iter : *this) {
- dst->appendFormat("\n%*sStrategy %u %s",
- spaces + 2, "",
- (uint32_t) iter.first,
- dumpAudioDeviceTypeAddrVector(iter.second, true /*includeSensitiveInfo*/)
- .c_str());
+void dumpProductStrategyDevicesRoleMap(
+ const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+ String8 *dst,
+ int spaces) {
+ dst->appendFormat("\n%*sDevice role per product strategy dump:", spaces, "");
+ for (const auto& [strategyRolePair, devices] : productStrategyDeviceRoleMap) {
+ dst->appendFormat("\n%*sStrategy(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+ strategyRolePair.first, strategyRolePair.second,
+ dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
}
dst->appendFormat("\n");
}
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 7cfef5b..1c86051 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -643,7 +643,11 @@
xmlDocPtr doc;
doc = xmlParseFile(path);
if (doc == NULL) {
- ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ // It is OK not to find an engine config file at the default location
+ // as the caller will default to hardcoded default config
+ if (strncmp(path, DEFAULT_PATH, strlen(DEFAULT_PATH))) {
+ ALOGW("%s: Could not parse document %s", __FUNCTION__, path);
+ }
return {nullptr, 0};
}
xmlNodePtr cur = xmlDocGetRootElement(doc);
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index f0a01d3..518f86e 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -16,6 +16,8 @@
#pragma once
+#include <utility>
+
#include <AudioPolicyManagerObserver.h>
#include <media/AudioProductStrategy.h>
#include <media/AudioVolumeGroup.h>
@@ -35,7 +37,7 @@
using StrategyVector = std::vector<product_strategy_t>;
using VolumeGroupVector = std::vector<volume_group_t>;
using CapturePresetDevicesRoleMap =
- std::map<audio_source_t, std::map<device_role_t, AudioDeviceTypeAddrVector>>;
+ std::map<std::pair<audio_source_t, device_role_t>, AudioDeviceTypeAddrVector>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -171,8 +173,10 @@
* @param[out] mix to be used if a mix has been installed for the given audio attributes.
* @return selected input device for the audio attributes, may be null if error.
*/
- virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const = 0;
+ virtual sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const = 0;
/**
* Get the legacy stream type for a given audio attributes.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 6d42fcf..b0c376a 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -310,6 +310,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid,
sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -328,7 +329,10 @@
return device;
}
- device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+ availableInputDevices,
+ uid,
+ mix);
if (device != nullptr) {
return device;
}
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 3b371d8..d8e2742 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -61,8 +61,10 @@
DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
bool fromCache = false) const override;
- sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+ sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 5083b14..43b3dd2 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -172,12 +172,6 @@
logging.info("added stub input device mask")
# Transform input source in inclusive criterion
- shift = len(all_component_types['OutputDevicesMask'])
- if shift > 32:
- logging.critical("OutputDevicesMask incompatible with criterion representation on 32 bits")
- logging.info("EXIT ON FAILURE")
- exit(1)
-
for component_types in all_component_types:
values = ','.join('{}:{}'.format(value, key) for key, value in all_component_types[component_types].items())
logging.info("{}: <{}>".format(component_types, values))
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 1a903a6..edcdf5a 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -196,7 +196,7 @@
if (desc->isActive() && !audio_is_linear_pcm(desc->getFormat())) {
availableOutputDevices.remove(desc->devices().getDevicesFromTypes({
AUDIO_DEVICE_OUT_HDMI, AUDIO_DEVICE_OUT_SPDIF,
- AUDIO_DEVICE_OUT_HDMI_ARC}));
+ AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_EARC}));
}
}
} break;
@@ -366,7 +366,9 @@
if (strategy == STRATEGY_MEDIA) {
// ARC, SPDIF and AUX_LINE can co-exist with others.
devices3 = availableOutputDevices.getDevicesFromTypes({
- AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_SPDIF, AUDIO_DEVICE_OUT_AUX_LINE});
+ AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+ AUDIO_DEVICE_OUT_SPDIF, AUDIO_DEVICE_OUT_AUX_LINE,
+ });
}
devices2.add(devices3);
@@ -707,6 +709,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid,
sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -726,7 +729,10 @@
return device;
}
- device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+ availableInputDevices,
+ uid,
+ mix);
if (device != nullptr) {
return device;
}
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 98f59d3..595e289 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -62,8 +62,10 @@
DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
bool fromCache = false) const override;
- sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+ sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2b9f8d7..485188a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -284,7 +284,7 @@
setOutputDevices(desc, newDevices, force, 0);
}
if (!desc->isDuplicated() && desc->mProfile->hasDynamicAudioProfile() &&
- desc->devices() != activeMediaDevices &&
+ !activeMediaDevices.empty() && desc->devices() != activeMediaDevices &&
desc->supportsDevicesForPlayback(activeMediaDevices)) {
// Reopen the output to query the dynamic profiles when there is not active
// clients or all active clients will be rerouted. Otherwise, set the flag
@@ -2228,7 +2228,9 @@
} else {
// Prevent from storing invalid requested device id in clients
requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
- device = mEngine->getInputDeviceForAttributes(attributes, &policyMix);
+ device = mEngine->getInputDeviceForAttributes(attributes, uid, &policyMix);
+ ALOGV_IF(device != nullptr, "%s found device type is 0x%X",
+ __FUNCTION__, device->type());
}
if (device == nullptr) {
ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
@@ -2346,6 +2348,21 @@
return input;
}
+ // Reuse an already opened input if a client with the same session ID already exists
+ // on that input
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ sp <AudioInputDescriptor> desc = mInputs.valueAt(i);
+ if (desc->mProfile != profile) {
+ continue;
+ }
+ RecordClientVector clients = desc->clientsList();
+ for (const auto &client : clients) {
+ if (session == client->session()) {
+ return desc->mIoHandle;
+ }
+ }
+ }
+
if (!profile->canOpenNewIo()) {
for (size_t i = 0; i < mInputs.size(); ) {
sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
@@ -2614,7 +2631,7 @@
bool close = false;
for (const auto& client : input->clientsList()) {
sp<DeviceDescriptor> device =
- mEngine->getInputDeviceForAttributes(client->attributes());
+ mEngine->getInputDeviceForAttributes(client->attributes(), client->uid());
if (!input->supportedDevices().contains(device)) {
close = true;
break;
@@ -5858,12 +5875,22 @@
// If we are not in call and no client is active on this input, this methods returns
// a null sp<>, causing the patch on the input stream to be released.
- audio_attributes_t attributes = inputDesc->getHighestPriorityAttributes();
+ audio_attributes_t attributes;
+ uid_t uid;
+ sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
+ if (topClient != nullptr) {
+ attributes = topClient->attributes();
+ uid = topClient->uid();
+ } else {
+ attributes = { .source = AUDIO_SOURCE_DEFAULT };
+ uid = 0;
+ }
+
if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
}
if (attributes.source != AUDIO_SOURCE_DEFAULT) {
- device = mEngine->getInputDeviceForAttributes(attributes);
+ device = mEngine->getInputDeviceForAttributes(attributes, uid);
}
return device;
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index d5ba756..14be671 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -23,6 +23,7 @@
],
shared_libs: [
+ "libactivitymanager_aidl",
"libaudioclient",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
@@ -67,6 +68,7 @@
],
export_shared_lib_headers: [
+ "libactivitymanager_aidl",
"libsensorprivacy",
"media_permission-aidl-cpp",
],
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 639fa58..551013f 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -197,6 +197,7 @@
mAudioPolicyManager->setPhoneState(state);
mPhoneState = state;
mPhoneStateOwnerUid = uid;
+ updateUidStates_l();
return Status::ok();
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 4ffa9cc..fb38e3d 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -537,35 +537,34 @@
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
-// The client source is virtual (remote submix, call audio TX or RX...)
-// OR The user the client is running in has microphone sensor privacy disabled
-// AND The client is the assistant
-// AND an accessibility service is on TOP or a RTT call is active
-// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR uses VOICE_RECOGNITION AND is on TOP
-// OR uses HOTWORD
-// AND there is no active privacy sensitive capture or call
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// OR The client is an accessibility service
-// AND Is on TOP
-// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR The assistant is not on TOP
-// AND there is no active privacy sensitive capture or call
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// AND is on TOP
+// The client is the assistant
+// AND an accessibility service is on TOP or a RTT call is active
// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR the client source is HOTWORD
-// AND is on TOP
-// OR all active clients are using HOTWORD source
-// AND no call is active
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// OR the client is the current InputMethodService
-// AND a RTT call is active AND the source is VOICE_RECOGNITION
-// OR Any client
-// AND The assistant is not on TOP
-// AND is on TOP or latest started
+// OR uses VOICE_RECOGNITION AND is on TOP
+// OR uses HOTWORD
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// OR The client is an accessibility service
+// AND Is on TOP
+// AND the source is VOICE_RECOGNITION or HOTWORD
+// OR The assistant is not on TOP
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// AND is on TOP
+// AND the source is VOICE_RECOGNITION or HOTWORD
+// OR the client source is virtual (remote submix, call audio TX or RX...)
+// OR the client source is HOTWORD
+// AND is on TOP
+// OR all active clients are using HOTWORD source
+// AND no call is active
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// OR the client is the current InputMethodService
+// AND a RTT call is active AND the source is VOICE_RECOGNITION
+// OR Any client
+// AND The assistant is not on TOP
+// AND is on TOP or latest started
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
sp<AudioRecordClient> topActive;
@@ -596,8 +595,7 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
uid_t currentUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(current->identity.uid));
- if (!current->active || (!isVirtualSource(current->attributes.source)
- && isUserSensorPrivacyEnabledForUid(currentUid))) {
+ if (!current->active) {
continue;
}
@@ -734,9 +732,6 @@
if (isVirtualSource(source)) {
// Allow capture for virtual (remote submix, call audio TX or RX...) sources
allowCapture = true;
- } else if (isUserSensorPrivacyEnabledForUid(currentUid)) {
- // If sensor privacy is enabled, don't allow capture
- allowCapture = false;
} else if (mUidPolicy->isAssistantUid(currentUid)) {
// For assistant allow capture if:
// An accessibility service is on TOP or a RTT call is active
@@ -1024,6 +1019,9 @@
return handleResetUidState(args, err);
} else if (args.size() >= 2 && args[0] == String16("get-uid-state")) {
return handleGetUidState(args, out, err);
+ } else if (args.size() >= 1 && args[0] == String16("purge_permission-cache")) {
+ purgePermissionCache();
+ return NO_ERROR;
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
return NO_ERROR;
@@ -1145,16 +1143,6 @@
return NO_INIT;
}
-bool AudioPolicyService::isUserSensorPrivacyEnabledForUid(uid_t uid) {
- userid_t userId = multiuser_get_user_id(uid);
- if (mMicrophoneSensorPrivacyPolicies.find(userId) == mMicrophoneSensorPrivacyPolicies.end()) {
- sp<SensorPrivacyPolicy> userPolicy = new SensorPrivacyPolicy(this);
- userPolicy->registerSelfForMicrophoneOnly(userId);
- mMicrophoneSensorPrivacyPolicies[userId] = userPolicy;
- }
- return mMicrophoneSensorPrivacyPolicies[userId]->isSensorPrivacyEnabled();
-}
-
status_t AudioPolicyService::printHelp(int out) {
return dprintf(out, "Audio policy service commands:\n"
" get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 145ba06..00d9670 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -346,8 +346,6 @@
status_t validateUsage(audio_usage_t usage);
status_t validateUsage(audio_usage_t usage, const media::permission::Identity& identity);
- bool isUserSensorPrivacyEnabledForUid(uid_t uid);
-
void updateUidStates();
void updateUidStates_l() REQUIRES(mLock);
@@ -908,8 +906,6 @@
void *mLibraryHandle = nullptr;
CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
-
- std::map<userid_t, sp<SensorPrivacyPolicy>> mMicrophoneSensorPrivacyPolicies;
};
} // namespace android
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index a0b35a8..07c889b 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -113,6 +113,8 @@
"libutilscallstack",
"libutils",
"libbinder",
+ "libactivitymanager_aidl",
+ "libpermission",
"libcutils",
"libmedia",
"libmediautils",
@@ -154,12 +156,15 @@
],
static_libs: [
+ "libprocessinfoservice_aidl",
"libbinderthreadstateutils",
"media_permission-aidl-cpp",
],
export_shared_lib_headers: [
"libbinder",
+ "libactivitymanager_aidl",
+ "libpermission",
"libcamera_client",
"libfmq",
"libsensorprivacy",
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 6cd20a1..3e6a7c7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -41,7 +41,6 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <binder/PermissionController.h>
-#include <binder/ProcessInfoService.h>
#include <binder/IResultReceiver.h>
#include <binderthreadstate/CallerUtils.h>
#include <cutils/atomic.h>
@@ -57,6 +56,7 @@
#include <media/IMediaHTTPService.h>
#include <media/mediaplayer.h>
#include <mediautils/BatteryNotifier.h>
+#include <processinfo/ProcessInfoService.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <utils/String16.h>
@@ -91,6 +91,8 @@
using hardware::ICameraServiceListener;
using hardware::camera::common::V1_0::CameraDeviceStatus;
using hardware::camera::common::V1_0::TorchModeStatus;
+using hardware::camera2::ICameraInjectionCallback;
+using hardware::camera2::ICameraInjectionSession;
using hardware::camera2::utils::CameraIdAndSessionConfiguration;
using hardware::camera2::utils::ConcurrentCameraIdCombination;
@@ -127,6 +129,8 @@
sCameraSendSystemEventsPermission("android.permission.CAMERA_SEND_SYSTEM_EVENTS");
static const String16 sCameraOpenCloseListenerPermission(
"android.permission.CAMERA_OPEN_CLOSE_LISTENER");
+static const String16
+ sCameraInjectExternalCameraPermission("android.permission.CAMERA_INJECT_EXTERNAL_CAMERA");
static constexpr int32_t kVendorClientScore = resource_policy::PERCEPTIBLE_APP_ADJ;
static constexpr int32_t kVendorClientState = ActivityManager::PROCESS_STATE_PERSISTENT_UI;
@@ -145,6 +149,7 @@
void CameraService::onFirstRef()
{
+
ALOGI("CameraService process starting");
BnCameraService::onFirstRef();
@@ -165,6 +170,7 @@
mUidPolicy->registerSelf();
mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
mSensorPrivacyPolicy->registerSelf();
+ mInjectionStatusListener = new InjectionStatusListener(this);
mAppOps.setCameraAudioRestriction(mAudioRestriction);
sp<HidlCameraService> hcs = HidlCameraService::getInstance(this);
if (hcs->registerAsService() != android::OK) {
@@ -224,10 +230,16 @@
return OK;
}
-void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status) {
+void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status,
+ SystemCameraKind systemCameraKind) {
Mutex::Autolock lock(mStatusListenerLock);
-
for (auto& i : mListenerList) {
+ if (shouldSkipStatusUpdates(systemCameraKind, i->isVendorListener(), i->getListenerPid(),
+ i->getListenerUid())) {
+ ALOGV("Skipping torch callback for system-only camera device %s",
+ cameraId.c_str());
+ continue;
+ }
i->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
}
}
@@ -236,10 +248,7 @@
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
mUidPolicy->unregisterSelf();
mSensorPrivacyPolicy->unregisterSelf();
-
- for (auto const& [_, policy] : mCameraSensorPrivacyPolicies) {
- policy->unregisterSelf();
- }
+ mInjectionStatusListener->removeListener();
}
void CameraService::onNewProviderRegistered() {
@@ -317,7 +326,7 @@
Mutex::Autolock al(mTorchStatusMutex);
mTorchStatusMap.add(id, TorchModeStatus::AVAILABLE_OFF);
- broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF);
+ broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF, deviceKind);
}
updateCameraNumAndIds();
@@ -478,12 +487,19 @@
void CameraService::onTorchStatusChanged(const String8& cameraId,
TorchModeStatus newStatus) {
+ SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
+ status_t res = getSystemCameraKind(cameraId, &systemCameraKind);
+ if (res != OK) {
+ ALOGE("%s: Could not get system camera kind for camera id %s", __FUNCTION__,
+ cameraId.string());
+ return;
+ }
Mutex::Autolock al(mTorchStatusMutex);
- onTorchStatusChangedLocked(cameraId, newStatus);
+ onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
}
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
- TorchModeStatus newStatus) {
+ TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
__FUNCTION__, cameraId.string(), newStatus);
@@ -532,8 +548,7 @@
}
}
}
-
- broadcastTorchModeStatus(cameraId, newStatus);
+ broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
}
static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
@@ -743,6 +758,10 @@
return Status::ok();
}
+void CameraService::clearCachedVariables() {
+ BasicClient::BasicClient::sCameraService = nullptr;
+}
+
int CameraService::getDeviceVersion(const String8& cameraId, int* facing, int* orientation) {
ATRACE_CALL();
@@ -788,8 +807,8 @@
Status CameraService::makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
const std::optional<String16>& featureId, const String8& cameraId,
- int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
- int deviceVersion, apiLevel effectiveApiLevel,
+ int api1CameraId, int facing, int sensorOrientation, int clientPid, uid_t clientUid,
+ int servicePid, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
// Create CameraClient based on device version reported by the HAL.
@@ -812,13 +831,13 @@
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new Camera2Client(cameraService, tmp, packageName, featureId,
cameraId, api1CameraId,
- facing, clientPid, clientUid,
+ facing, sensorOrientation, clientPid, clientUid,
servicePid);
} else { // Camera2 API route
sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
*client = new CameraDeviceClient(cameraService, tmp, packageName, featureId,
- cameraId, facing, clientPid, clientUid, servicePid);
+ cameraId, facing, sensorOrientation, clientPid, clientUid, servicePid);
}
break;
default:
@@ -1633,7 +1652,7 @@
sp<BasicClient> tmp = nullptr;
if(!(ret = makeClient(this, cameraCb, clientPackageName, clientFeatureId,
- cameraId, api1CameraId, facing,
+ cameraId, api1CameraId, facing, orientation,
clientPid, clientUid, getpid(),
deviceVersion, effectiveApiLevel,
/*out*/&tmp)).isOk()) {
@@ -1699,8 +1718,9 @@
// Set camera muting behavior
if (client->supportsCameraMute()) {
- client->setCameraMute(mOverrideCameraMuteMode ||
- isUserSensorPrivacyEnabledForUid(clientUid));
+ bool isCameraPrivacyEnabled =
+ mSensorPrivacyPolicy->isCameraPrivacyEnabled(multiuser_get_user_id(clientUid));
+ client->setCameraMute(mOverrideCameraMuteMode || isCameraPrivacyEnabled);
}
if (shimUpdateOnly) {
@@ -1812,6 +1832,10 @@
String8 id = String8(cameraId.string());
int uid = CameraThreadState::getCallingUid();
+ if (shouldRejectSystemCameraConnection(id)) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to set torch mode"
+ " for system only device %s: ", id.string());
+ }
// verify id is valid.
auto state = getCameraState(id);
if (state == nullptr) {
@@ -2014,7 +2038,50 @@
return Status::ok();
}
- Status CameraService::getConcurrentCameraIds(
+Status CameraService::notifyDisplayConfigurationChange() {
+ ATRACE_CALL();
+ const int callingPid = CameraThreadState::getCallingPid();
+ const int selfPid = getpid();
+
+ // Permission checks
+ if (callingPid != selfPid) {
+ // Ensure we're being called by system_server, or similar process with
+ // permissions to notify the camera service about system events
+ if (!checkCallingPermission(sCameraSendSystemEventsPermission)) {
+ const int uid = CameraThreadState::getCallingUid();
+ ALOGE("Permission Denial: cannot send updates to camera service about orientation"
+ " changes from pid=%d, uid=%d", callingPid, uid);
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to send updates to camera service about orientation"
+ " changes from pid=%d, uid=%d", callingPid, uid);
+ }
+ }
+
+ Mutex::Autolock lock(mServiceLock);
+
+ // Don't do anything if rotate-and-crop override via cmd is active
+ if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return Status::ok();
+
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr) {
+ if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
+ basicClient->getPackageName(), basicClient->getCameraOrientation(),
+ basicClient->getCameraFacing())) {
+ basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
+ } else {
+ basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE);
+ }
+ }
+ }
+ }
+
+ return Status::ok();
+}
+
+Status CameraService::getConcurrentCameraIds(
std::vector<ConcurrentCameraIdCombination>* concurrentCameraIds) {
ATRACE_CALL();
if (!concurrentCameraIds) {
@@ -2097,10 +2164,15 @@
return addListenerHelper(listener, cameraStatuses);
}
+binder::Status CameraService::addListenerTest(const sp<hardware::ICameraServiceListener>& listener,
+ std::vector<hardware::CameraStatus>* cameraStatuses) {
+ return addListenerHelper(listener, cameraStatuses, false, true);
+}
+
Status CameraService::addListenerHelper(const sp<ICameraServiceListener>& listener,
/*out*/
std::vector<hardware::CameraStatus> *cameraStatuses,
- bool isVendorListener) {
+ bool isVendorListener, bool isProcessLocalTest) {
ATRACE_CALL();
@@ -2131,7 +2203,7 @@
sp<ServiceListener> serviceListener =
new ServiceListener(this, listener, clientUid, clientPid, isVendorListener,
openCloseCallbackAllowed);
- auto ret = serviceListener->initialize();
+ auto ret = serviceListener->initialize(isProcessLocalTest);
if (ret != NO_ERROR) {
String8 msg = String8::format("Failed to initialize service listener: %s (%d)",
strerror(-ret), ret);
@@ -2168,6 +2240,11 @@
return shouldSkipStatusUpdates(deviceKind, isVendorListener, clientPid,
clientUid);}), cameraStatuses->end());
+ //cameraStatuses will have non-eligible camera ids removed.
+ std::set<String16> idsChosenForCallback;
+ for (const auto &s : *cameraStatuses) {
+ idsChosenForCallback.insert(String16(s.cameraId));
+ }
/*
* Immediately signal current torch status to this listener only
@@ -2177,7 +2254,11 @@
Mutex::Autolock al(mTorchStatusMutex);
for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
String16 id = String16(mTorchStatusMap.keyAt(i).string());
- listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+ // The camera id is visible to the client. Fine to send torch
+ // callback.
+ if (idsChosenForCallback.find(id) != idsChosenForCallback.end()) {
+ listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+ }
}
}
@@ -2311,6 +2392,42 @@
return Status::ok();
}
+Status CameraService::injectCamera(
+ const String16& packageName, const String16& internalCamId,
+ const String16& externalCamId,
+ const sp<ICameraInjectionCallback>& callback,
+ /*out*/
+ sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession) {
+ ATRACE_CALL();
+
+ if (!checkCallingPermission(sCameraInjectExternalCameraPermission)) {
+ const int pid = CameraThreadState::getCallingPid();
+ const int uid = CameraThreadState::getCallingUid();
+ ALOGE("Permission Denial: can't inject camera pid=%d, uid=%d", pid, uid);
+ return STATUS_ERROR(ERROR_PERMISSION_DENIED,
+ "Permission Denial: no permission to inject camera");
+ }
+
+ ALOGV(
+ "%s: Package name = %s, Internal camera ID = %s, External camera ID = "
+ "%s",
+ __FUNCTION__, String8(packageName).string(),
+ String8(internalCamId).string(), String8(externalCamId).string());
+
+ binder::Status ret = binder::Status::ok();
+ // TODO: Implement the injection camera function.
+ // ret = internalInjectCamera(...);
+ // if(!ret.isOk()) {
+ // mInjectionStatusListener->notifyInjectionError(...);
+ // return ret;
+ // }
+
+ mInjectionStatusListener->addListener(callback);
+ *cameraInjectionSession = new CameraInjectionSession(this);
+
+ return ret;
+}
+
void CameraService::removeByClient(const BasicClient* client) {
Mutex::Autolock lock(mServiceLock);
for (auto& i : mActiveClientManager.getAll()) {
@@ -2649,6 +2766,11 @@
ATRACE_CALL();
LOG1("playSound(%d)", kind);
+ if (kind < 0 || kind >= NUM_SOUNDS) {
+ ALOGE("%s: Invalid sound id requested: %d", __FUNCTION__, kind);
+ return;
+ }
+
Mutex::Autolock lock(mSoundLock);
loadSoundLocked(kind);
sp<MediaPlayer> player = mSoundPlayer[kind];
@@ -2665,13 +2787,13 @@
const String16& clientPackageName,
const std::optional<String16>& clientFeatureId,
const String8& cameraIdStr,
- int api1CameraId, int cameraFacing,
+ int api1CameraId, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid,
int servicePid) :
CameraService::BasicClient(cameraService,
IInterface::asBinder(cameraClient),
clientPackageName, clientFeatureId,
- cameraIdStr, cameraFacing,
+ cameraIdStr, cameraFacing, sensorOrientation,
clientPid, clientUid,
servicePid),
mCameraId(api1CameraId)
@@ -2701,22 +2823,23 @@
CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
const String16& clientPackageName, const std::optional<String16>& clientFeatureId,
- const String8& cameraIdStr, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid,
int servicePid):
- mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing),
+ mDestructionStarted(false),
+ mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing), mOrientation(sensorOrientation),
mClientPackageName(clientPackageName), mClientFeatureId(clientFeatureId),
mClientPid(clientPid), mClientUid(clientUid),
mServicePid(servicePid),
mDisconnected(false), mUidIsTrusted(false),
mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE),
- mRemoteBinder(remoteCallback)
+ mRemoteBinder(remoteCallback),
+ mOpsActive(false),
+ mOpsStreaming(false)
{
if (sCameraService == nullptr) {
sCameraService = cameraService;
}
- mOpsActive = false;
- mDestructionStarted = false;
// In some cases the calling code has no access to the package it runs under.
// For example, NDK camera API.
@@ -2801,6 +2924,13 @@
return mClientPackageName;
}
+int CameraService::BasicClient::getCameraFacing() const {
+ return mCameraFacing;
+}
+
+int CameraService::BasicClient::getCameraOrientation() const {
+ return mOrientation;
+}
int CameraService::BasicClient::getClientPid() const {
return mClientPid;
@@ -2844,6 +2974,29 @@
}
}
+status_t CameraService::BasicClient::handleAppOpMode(int32_t mode) {
+ if (mode == AppOpsManager::MODE_ERRORED) {
+ ALOGI("Camera %s: Access for \"%s\" has been revoked",
+ mCameraIdStr.string(), String8(mClientPackageName).string());
+ return PERMISSION_DENIED;
+ } else if (!mUidIsTrusted && mode == AppOpsManager::MODE_IGNORED) {
+ // If the calling Uid is trusted (a native service), the AppOpsManager could
+ // return MODE_IGNORED. Do not treat such case as error.
+ bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid,
+ mClientPackageName);
+ bool isCameraPrivacyEnabled =
+ sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled(
+ multiuser_get_user_id(mClientUid));
+ if (!isUidActive || !isCameraPrivacyEnabled) {
+ ALOGI("Camera %s: Access for \"%s\" has been restricted",
+ mCameraIdStr.string(), String8(mClientPackageName).string());
+ // Return the same error as for device policy manager rejection
+ return -EACCES;
+ }
+ }
+ return OK;
+}
+
status_t CameraService::BasicClient::startCameraOps() {
ATRACE_CALL();
@@ -2854,26 +3007,16 @@
if (mAppOpsManager != nullptr) {
// Notify app ops that the camera is not available
mOpsCallback = new OpsCallback(this);
- int32_t res;
mAppOpsManager->startWatchingMode(AppOpsManager::OP_CAMERA,
mClientPackageName, mOpsCallback);
- res = mAppOpsManager->startOpNoThrow(AppOpsManager::OP_CAMERA, mClientUid,
- mClientPackageName, /*startIfModeDefault*/ false, mClientFeatureId,
- String16("start camera ") + String16(mCameraIdStr));
- if (res == AppOpsManager::MODE_ERRORED) {
- ALOGI("Camera %s: Access for \"%s\" has been revoked",
- mCameraIdStr.string(), String8(mClientPackageName).string());
- return PERMISSION_DENIED;
- }
-
- // If the calling Uid is trusted (a native service), the AppOpsManager could
- // return MODE_IGNORED. Do not treat such case as error.
- if (!mUidIsTrusted && res == AppOpsManager::MODE_IGNORED) {
- ALOGI("Camera %s: Access for \"%s\" has been restricted",
- mCameraIdStr.string(), String8(mClientPackageName).string());
- // Return the same error as for device policy manager rejection
- return -EACCES;
+ // Just check for camera acccess here on open - delay startOp until
+ // camera frames start streaming in startCameraStreamingOps
+ int32_t mode = mAppOpsManager->checkOp(AppOpsManager::OP_CAMERA, mClientUid,
+ mClientPackageName);
+ status_t res = handleAppOpMode(mode);
+ if (res != OK) {
+ return res;
}
}
@@ -2890,17 +3033,69 @@
return OK;
}
+status_t CameraService::BasicClient::startCameraStreamingOps() {
+ ATRACE_CALL();
+
+ if (!mOpsActive) {
+ ALOGE("%s: Calling streaming start when not yet active", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if (mOpsStreaming) {
+ ALOGV("%s: Streaming already active!", __FUNCTION__);
+ return OK;
+ }
+
+ ALOGV("%s: Start camera streaming ops, package name = %s, client UID = %d",
+ __FUNCTION__, String8(mClientPackageName).string(), mClientUid);
+
+ if (mAppOpsManager != nullptr) {
+ int32_t mode = mAppOpsManager->startOpNoThrow(AppOpsManager::OP_CAMERA, mClientUid,
+ mClientPackageName, /*startIfModeDefault*/ false, mClientFeatureId,
+ String16("start camera ") + String16(mCameraIdStr));
+ status_t res = handleAppOpMode(mode);
+ if (res != OK) {
+ return res;
+ }
+ }
+
+ mOpsStreaming = true;
+
+ return OK;
+}
+
+status_t CameraService::BasicClient::finishCameraStreamingOps() {
+ ATRACE_CALL();
+
+ if (!mOpsActive) {
+ ALOGE("%s: Calling streaming start when not yet active", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if (!mOpsStreaming) {
+ ALOGV("%s: Streaming not active!", __FUNCTION__);
+ return OK;
+ }
+
+ if (mAppOpsManager != nullptr) {
+ mAppOpsManager->finishOp(AppOpsManager::OP_CAMERA, mClientUid,
+ mClientPackageName, mClientFeatureId);
+ mOpsStreaming = false;
+ }
+
+ return OK;
+}
+
status_t CameraService::BasicClient::finishCameraOps() {
ATRACE_CALL();
+ if (mOpsStreaming) {
+ // Make sure we've notified everyone about camera stopping
+ finishCameraStreamingOps();
+ }
+
// Check if startCameraOps succeeded, and if so, finish the camera op
if (mOpsActive) {
- // Notify app ops that the camera is available again
- if (mAppOpsManager != nullptr) {
- mAppOpsManager->finishOp(AppOpsManager::OP_CAMERA, mClientUid,
- mClientPackageName, mClientFeatureId);
- mOpsActive = false;
- }
+ mOpsActive = false;
+
// This function is called when a client disconnects. This should
// release the camera, but actually only if it was in a proper
// functional state, i.e. with status NOT_AVAILABLE
@@ -2951,15 +3146,22 @@
block();
} else if (res == AppOpsManager::MODE_IGNORED) {
bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid, mClientPackageName);
+ bool isCameraPrivacyEnabled =
+ sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled(
+ multiuser_get_user_id(mClientUid));
ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d",
mCameraIdStr.string(), String8(mClientPackageName).string(),
mUidIsTrusted, isUidActive);
// If the calling Uid is trusted (a native service), or the client Uid is active (WAR for
// b/175320666), the AppOpsManager could return MODE_IGNORED. Do not treat such cases as
// error.
- if (!mUidIsTrusted && !isUidActive) {
+ if (!mUidIsTrusted && isUidActive && isCameraPrivacyEnabled) {
+ setCameraMute(true);
+ } else if (!mUidIsTrusted && !isUidActive) {
block();
}
+ } else if (res == AppOpsManager::MODE_ALLOWED) {
+ setCameraMute(sCameraService->mOverrideCameraMuteMode);
}
}
@@ -3232,6 +3434,7 @@
if (mRegistered) {
return;
}
+ hasCameraPrivacyFeature(); // Called so the result is cached
mSpm.addSensorPrivacyListener(this);
mSensorPrivacyEnabled = mSpm.isSensorPrivacyEnabled();
status_t res = mSpm.linkToDeath(this);
@@ -3241,39 +3444,6 @@
}
}
-status_t CameraService::SensorPrivacyPolicy::registerSelfForIndividual(int userId) {
- Mutex::Autolock _l(mSensorPrivacyLock);
- if (mRegistered) {
- return OK;
- }
-
- status_t res = mSpm.addIndividualSensorPrivacyListener(userId,
- SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, this);
- if (res != OK) {
- ALOGE("Unable to register camera privacy listener: %s (%d)", strerror(-res), res);
- return res;
- }
-
- res = mSpm.isIndividualSensorPrivacyEnabled(userId,
- SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, mSensorPrivacyEnabled);
- if (res != OK) {
- ALOGE("Unable to check camera privacy: %s (%d)", strerror(-res), res);
- return res;
- }
-
- res = mSpm.linkToDeath(this);
- if (res != OK) {
- ALOGE("Register link to death failed for sensor privacy: %s (%d)", strerror(-res), res);
- return res;
- }
-
- mRegistered = true;
- mIsIndividual = true;
- mUserId = userId;
- ALOGV("SensorPrivacyPolicy: Registered with SensorPrivacyManager");
- return OK;
-}
-
void CameraService::SensorPrivacyPolicy::unregisterSelf() {
Mutex::Autolock _l(mSensorPrivacyLock);
mSpm.removeSensorPrivacyListener(this);
@@ -3287,20 +3457,24 @@
return mSensorPrivacyEnabled;
}
+bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled(userid_t userId) {
+ if (!hasCameraPrivacyFeature()) {
+ return false;
+ }
+ return mSpm.isIndividualSensorPrivacyEnabled(userId,
+ SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA);
+}
+
binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyChanged(bool enabled) {
{
Mutex::Autolock _l(mSensorPrivacyLock);
mSensorPrivacyEnabled = enabled;
}
// if sensor privacy is enabled then block all clients from accessing the camera
- sp<CameraService> service = mService.promote();
- if (service != nullptr) {
- if (mIsIndividual) {
- service->setMuteForAllClients(mUserId, enabled);
- } else {
- if (enabled) {
- service->blockAllClients();
- }
+ if (enabled) {
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ service->blockAllClients();
}
}
return binder::Status::ok();
@@ -3312,6 +3486,10 @@
mRegistered = false;
}
+bool CameraService::SensorPrivacyPolicy::hasCameraPrivacyFeature() {
+ return mSpm.supportsSensorToggle(SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA);
+}
+
// ----------------------------------------------------------------------------
// CameraState
// ----------------------------------------------------------------------------
@@ -3478,6 +3656,66 @@
}
// ----------------------------------------------------------------------------
+// InjectionStatusListener
+// ----------------------------------------------------------------------------
+
+void CameraService::InjectionStatusListener::addListener(
+ const sp<ICameraInjectionCallback>& callback) {
+ Mutex::Autolock lock(mListenerLock);
+ if (mCameraInjectionCallback) return;
+ status_t res = IInterface::asBinder(callback)->linkToDeath(this);
+ if (res == OK) {
+ mCameraInjectionCallback = callback;
+ }
+}
+
+void CameraService::InjectionStatusListener::removeListener() {
+ Mutex::Autolock lock(mListenerLock);
+ if (mCameraInjectionCallback == nullptr) {
+ ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
+ return;
+ }
+ IInterface::asBinder(mCameraInjectionCallback)->unlinkToDeath(this);
+ mCameraInjectionCallback = nullptr;
+}
+
+void CameraService::InjectionStatusListener::notifyInjectionError(
+ int errorCode) {
+ Mutex::Autolock lock(mListenerLock);
+ if (mCameraInjectionCallback == nullptr) {
+ ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
+ return;
+ }
+ mCameraInjectionCallback->onInjectionError(errorCode);
+}
+
+void CameraService::InjectionStatusListener::binderDied(
+ const wp<IBinder>& /*who*/) {
+ Mutex::Autolock lock(mListenerLock);
+ ALOGV("InjectionStatusListener: ICameraInjectionCallback has died");
+ auto parent = mParent.promote();
+ if (parent != nullptr) {
+ parent->stopInjectionImpl();
+ }
+}
+
+// ----------------------------------------------------------------------------
+// CameraInjectionSession
+// ----------------------------------------------------------------------------
+
+binder::Status CameraService::CameraInjectionSession::stopInjection() {
+ Mutex::Autolock lock(mInjectionSessionLock);
+ auto parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("CameraInjectionSession: Parent is gone");
+ return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SERVICE,
+ "Camera service encountered error");
+ }
+ parent->stopInjectionImpl();
+ return binder::Status::ok();
+}
+
+// ----------------------------------------------------------------------------
static const int kDumpLockRetries = 50;
static const int kDumpLockSleep = 60000;
@@ -3747,7 +3985,7 @@
TorchModeStatus::AVAILABLE_OFF :
TorchModeStatus::NOT_AVAILABLE;
if (torchStatus != newTorchStatus) {
- onTorchStatusChangedLocked(cameraId, newTorchStatus);
+ onTorchStatusChangedLocked(cameraId, newTorchStatus, deviceKind);
}
}
}
@@ -3931,19 +4169,6 @@
}
}
-void CameraService::setMuteForAllClients(userid_t userId, bool enabled) {
- const auto clients = mActiveClientManager.getAll();
- for (auto& current : clients) {
- if (current != nullptr) {
- const auto basicClient = current->getValue();
- if (basicClient.get() != nullptr
- && multiuser_get_user_id(basicClient->getClientUid()) == userId) {
- basicClient->setCameraMute(enabled);
- }
- }
- }
-}
-
// NOTE: This is a remote API - make sure all args are validated
status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
if (!checkCallingPermission(sManageCameraPermission, nullptr, nullptr)) {
@@ -4150,16 +4375,10 @@
return mode;
}
-bool CameraService::isUserSensorPrivacyEnabledForUid(uid_t uid) {
- userid_t userId = multiuser_get_user_id(uid);
- if (mCameraSensorPrivacyPolicies.find(userId) == mCameraSensorPrivacyPolicies.end()) {
- sp<SensorPrivacyPolicy> userPolicy = new SensorPrivacyPolicy(this);
- if (userPolicy->registerSelfForIndividual(userId) != OK) {
- return false;
- }
- mCameraSensorPrivacyPolicies[userId] = userPolicy;
- }
- return mCameraSensorPrivacyPolicies[userId]->isSensorPrivacyEnabled();
+void CameraService::stopInjectionImpl() {
+ mInjectionStatusListener->removeListener();
+
+ // TODO: Implement the stop injection function.
}
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 092d916..10e1748 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -20,6 +20,8 @@
#include <android/hardware/BnCameraService.h>
#include <android/hardware/BnSensorPrivacyListener.h>
#include <android/hardware/ICameraServiceListener.h>
+#include <android/hardware/camera2/BnCameraInjectionSession.h>
+#include <android/hardware/camera2/ICameraInjectionCallback.h>
#include <cutils/multiuser.h>
#include <utils/Vector.h>
@@ -167,6 +169,8 @@
virtual binder::Status notifyDeviceStateChange(int64_t newState);
+ virtual binder::Status notifyDisplayConfigurationChange();
+
// OK = supports api of that version, -EOPNOTSUPP = does not support
virtual binder::Status supportsCameraApi(
const String16& cameraId, int32_t apiVersion,
@@ -178,6 +182,13 @@
/*out*/
bool *isSupported);
+ virtual binder::Status injectCamera(
+ const String16& packageName, const String16& internalCamId,
+ const String16& externalCamId,
+ const sp<hardware::camera2::ICameraInjectionCallback>& callback,
+ /*out*/
+ sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession);
+
// Extra permissions checks
virtual status_t onTransact(uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
@@ -188,7 +199,8 @@
binder::Status addListenerHelper(const sp<hardware::ICameraServiceListener>& listener,
/*out*/
- std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false);
+ std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false,
+ bool isProcessLocalTest = false);
// Monitored UIDs availability notification
void notifyMonitoredUids();
@@ -217,6 +229,19 @@
int* orientation = nullptr);
/////////////////////////////////////////////////////////////////////
+ // Methods to be used in CameraService class tests only
+ //
+ // CameraService class test method only - clear static variables in the
+ // cameraserver process, which otherwise might affect multiple test runs.
+ void clearCachedVariables();
+
+ // Add test listener, linkToDeath won't be called since this is for process
+ // local testing.
+ binder::Status addListenerTest(const sp<hardware::ICameraServiceListener>& listener,
+ /*out*/
+ std::vector<hardware::CameraStatus>* cameraStatuses);
+
+ /////////////////////////////////////////////////////////////////////
// Shared utilities
static binder::Status filterGetInfoErrorCode(status_t err);
@@ -224,6 +249,7 @@
// CameraClient functionality
class BasicClient : public virtual RefBase {
+ friend class CameraService;
public:
virtual status_t initialize(sp<CameraProviderManager> manager,
const String8& monitorTags) = 0;
@@ -246,6 +272,12 @@
// Return the package name for this client
virtual String16 getPackageName() const;
+ // Return the camera facing for this client
+ virtual int getCameraFacing() const;
+
+ // Return the camera orientation for this client
+ virtual int getCameraOrientation() const;
+
// Notify client about a fatal error
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) = 0;
@@ -292,6 +324,7 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraIdStr,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -308,6 +341,7 @@
static sp<CameraService> sCameraService;
const String8 mCameraIdStr;
const int mCameraFacing;
+ const int mOrientation;
String16 mClientPackageName;
std::optional<String16> mClientFeatureId;
pid_t mClientPid;
@@ -322,9 +356,18 @@
// - The app-side Binder interface to receive callbacks from us
sp<IBinder> mRemoteBinder; // immutable after constructor
- // permissions management
+ // Permissions management methods for camera lifecycle
+
+ // Notify rest of system/apps about camera opening, and check appops
virtual status_t startCameraOps();
+ // Notify rest of system/apps about camera starting to stream data, and confirm appops
+ virtual status_t startCameraStreamingOps();
+ // Notify rest of system/apps about camera stopping streaming data
+ virtual status_t finishCameraStreamingOps();
+ // Notify rest of system/apps about camera closing
virtual status_t finishCameraOps();
+ // Handle errors for start/checkOps
+ virtual status_t handleAppOpMode(int32_t mode);
std::unique_ptr<AppOpsManager> mAppOpsManager = nullptr;
@@ -339,9 +382,12 @@
}; // class OpsCallback
sp<OpsCallback> mOpsCallback;
- // Track whether startCameraOps was called successfully, to avoid
- // finishing what we didn't start.
+ // Track whether checkOps was called successfully, to avoid
+ // finishing what we didn't start, on camera open.
bool mOpsActive;
+ // Track whether startOps was called successfully on start of
+ // camera streaming.
+ bool mOpsStreaming;
// IAppOpsCallback interface, indirected through opListener
virtual void opChanged(int32_t op, const String16& packageName);
@@ -385,6 +431,7 @@
const String8& cameraIdStr,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -638,14 +685,13 @@
public virtual IBinder::DeathRecipient {
public:
explicit SensorPrivacyPolicy(wp<CameraService> service)
- : mService(service), mSensorPrivacyEnabled(false), mRegistered(false),
- mIsIndividual(false), mUserId(0) {}
+ : mService(service), mSensorPrivacyEnabled(false), mRegistered(false) {}
void registerSelf();
- status_t registerSelfForIndividual(int userId);
void unregisterSelf();
bool isSensorPrivacyEnabled();
+ bool isCameraPrivacyEnabled(userid_t userId);
binder::Status onSensorPrivacyChanged(bool enabled);
@@ -658,8 +704,8 @@
Mutex mSensorPrivacyLock;
bool mSensorPrivacyEnabled;
bool mRegistered;
- bool mIsIndividual;
- userid_t mUserId;
+
+ bool hasCameraPrivacyFeature();
};
sp<UidPolicy> mUidPolicy;
@@ -915,7 +961,10 @@
mIsVendorListener(isVendorClient),
mOpenCloseCallbackAllowed(openCloseCallbackAllowed) { }
- status_t initialize() {
+ status_t initialize(bool isProcessLocalTest) {
+ if (isProcessLocalTest) {
+ return OK;
+ }
return IInterface::asBinder(mListener)->linkToDeath(this);
}
@@ -991,7 +1040,8 @@
// handle torch mode status change and invoke callbacks. mTorchStatusMutex
// should be locked.
void onTorchStatusChangedLocked(const String8& cameraId,
- hardware::camera::common::V1_0::TorchModeStatus newStatus);
+ hardware::camera::common::V1_0::TorchModeStatus newStatus,
+ SystemCameraKind systemCameraKind);
// get a camera's torch status. mTorchStatusMutex should be locked.
status_t getTorchStatusLocked(const String8 &cameraId,
@@ -1034,9 +1084,6 @@
// Blocks all active clients.
void blockAllClients();
- // Mutes all active clients for a user.
- void setMuteForAllClients(userid_t userId, bool enabled);
-
// Overrides the UID state as if it is idle
status_t handleSetUidState(const Vector<String16>& args, int err);
@@ -1072,7 +1119,7 @@
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
const std::optional<String16>& featureId, const String8& cameraId, int api1CameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid,
+ int facing, int sensorOrientation, int clientPid, uid_t clientUid, int servicePid,
int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
@@ -1085,7 +1132,8 @@
void broadcastTorchModeStatus(const String8& cameraId,
- hardware::camera::common::V1_0::TorchModeStatus status);
+ hardware::camera::common::V1_0::TorchModeStatus status,
+ SystemCameraKind systemCameraKind);
void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
@@ -1100,7 +1148,7 @@
// Aggreated audio restriction mode for all camera clients
int32_t mAudioRestriction;
- // Current override rotate-and-crop mode
+ // Current override cmd rotate-and-crop mode; AUTO means no override
uint8_t mOverrideRotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_AUTO;
// Current image dump mask
@@ -1109,11 +1157,45 @@
// Current camera mute mode
bool mOverrideCameraMuteMode = false;
- // Map from user to sensor privacy policy
- std::map<userid_t, sp<SensorPrivacyPolicy>> mCameraSensorPrivacyPolicies;
+ /**
+ * A listener class that implements the IBinder::DeathRecipient interface
+ * for use to call back the error state injected by the external camera, and
+ * camera service can kill the injection when binder signals process death.
+ */
+ class InjectionStatusListener : public virtual IBinder::DeathRecipient {
+ public:
+ InjectionStatusListener(sp<CameraService> parent) : mParent(parent) {}
- // Checks if the sensor privacy is enabled for the uid
- bool isUserSensorPrivacyEnabledForUid(uid_t uid);
+ void addListener(const sp<hardware::camera2::ICameraInjectionCallback>& callback);
+ void removeListener();
+ void notifyInjectionError(int errorCode);
+
+ // IBinder::DeathRecipient implementation
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ Mutex mListenerLock;
+ wp<CameraService> mParent;
+ sp<hardware::camera2::ICameraInjectionCallback> mCameraInjectionCallback;
+ };
+
+ sp<InjectionStatusListener> mInjectionStatusListener;
+
+ /**
+ * A class that implements the hardware::camera2::BnCameraInjectionSession interface
+ */
+ class CameraInjectionSession : public hardware::camera2::BnCameraInjectionSession {
+ public:
+ CameraInjectionSession(sp<CameraService> parent) : mParent(parent) {}
+ virtual ~CameraInjectionSession() {}
+ binder::Status stopInjection() override;
+
+ private:
+ Mutex mInjectionSessionLock;
+ wp<CameraService> mParent;
+ };
+
+ void stopInjectionImpl();
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 31cfed6..72b3c40 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -55,11 +55,12 @@
const String8& cameraDeviceId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid):
Camera2ClientBase(cameraService, cameraClient, clientPackageName, clientFeatureId,
- cameraDeviceId, api1CameraId, cameraFacing,
+ cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation,
clientPid, clientUid, servicePid),
mParameters(api1CameraId, cameraFacing)
{
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 4d667e3..d16b242 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -101,6 +101,7 @@
const String8& cameraDeviceId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1b65d1a..343f4a7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -61,6 +61,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid) :
@@ -70,6 +71,7 @@
clientFeatureId,
cameraId,
cameraFacing,
+ sensorOrientation,
clientPid,
clientUid,
servicePid),
@@ -86,12 +88,13 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid) :
Camera2ClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
cameraId, /*API1 camera ID*/ -1,
- cameraFacing, clientPid, clientUid, servicePid),
+ cameraFacing, sensorOrientation, clientPid, clientUid, servicePid),
mInputStream(),
mStreamingRequestId(REQUEST_ID_NONE),
mRequestIdCounter(0) {
@@ -1711,7 +1714,8 @@
if (offlineSession.get() != nullptr) {
offlineClient = new CameraOfflineSessionClient(sCameraService,
offlineSession, offlineCompositeStreamMap, cameraCb, mClientPackageName,
- mClientFeatureId, mCameraIdStr, mCameraFacing, mClientPid, mClientUid, mServicePid);
+ mClientFeatureId, mCameraIdStr, mCameraFacing, mOrientation, mClientPid, mClientUid,
+ mServicePid);
ret = sCameraService->addOfflineClient(mCameraIdStr, offlineClient);
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index adedf92..44ffeef 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -55,6 +55,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -181,6 +182,7 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 6765c3b..ef15f2d 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -275,12 +275,17 @@
}
}
+status_t CameraOfflineSessionClient::notifyActive() {
+ return startCameraStreamingOps();
+}
+
void CameraOfflineSessionClient::notifyIdle(
int64_t /*requestCount*/, int64_t /*resultErrorCount*/, bool /*deviceError*/,
const std::vector<hardware::CameraStreamStats>& /*streamStats*/) {
if (mRemoteCallback.get() != nullptr) {
mRemoteCallback->onDeviceIdle();
}
+ finishCameraStreamingOps();
}
void CameraOfflineSessionClient::notifyAutoFocus(uint8_t newState, int triggerId) {
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 5c5fcda..b219a4c 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -49,13 +49,13 @@
const sp<ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
const std::optional<String16>& clientFeatureId,
- const String8& cameraIdStr, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid, int servicePid) :
CameraService::BasicClient(
cameraService,
IInterface::asBinder(remoteCallback),
clientPackageName, clientFeatureId,
- cameraIdStr, cameraFacing, clientPid, clientUid, servicePid),
+ cameraIdStr, cameraFacing, sensorOrientation, clientPid, clientUid, servicePid),
mRemoteCallback(remoteCallback), mOfflineSession(session),
mCompositeStreamMap(offlineCompositeStreamMap) {}
@@ -89,6 +89,7 @@
// NotificationListener API
void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras) override;
void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) override;
+ status_t notifyActive() override;
void notifyIdle(int64_t requestCount, int64_t resultErrorCount, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats) override;
void notifyAutoFocus(uint8_t newState, int triggerId) override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 6fd8d45..ce479a1 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -51,11 +51,13 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid):
TClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
- cameraId, api1CameraId, cameraFacing, clientPid, clientUid, servicePid),
+ cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid, clientUid,
+ servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
mDevice(new Camera3Device(cameraId)),
@@ -248,10 +250,32 @@
}
template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::notifyActive() {
+ if (!mDeviceActive) {
+ status_t res = TClientBase::startCameraStreamingOps();
+ if (res != OK) {
+ ALOGE("%s: Camera %s: Error starting camera streaming ops: %d", __FUNCTION__,
+ TClientBase::mCameraIdStr.string(), res);
+ return res;
+ }
+ CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr);
+ }
+ mDeviceActive = true;
+
+ ALOGV("Camera device is now active");
+ return OK;
+}
+
+template <typename TClientBase>
void Camera2ClientBase<TClientBase>::notifyIdle(
int64_t requestCount, int64_t resultErrorCount, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats) {
if (mDeviceActive) {
+ status_t res = TClientBase::finishCameraStreamingOps();
+ if (res != OK) {
+ ALOGE("%s: Camera %s: Error finishing streaming ops: %d", __FUNCTION__,
+ TClientBase::mCameraIdStr.string(), res);
+ }
CameraServiceProxyWrapper::logIdle(TClientBase::mCameraIdStr,
requestCount, resultErrorCount, deviceError, streamStats);
}
@@ -266,11 +290,6 @@
(void)resultExtras;
(void)timestamp;
- if (!mDeviceActive) {
- CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr);
- }
- mDeviceActive = true;
-
ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
__FUNCTION__, resultExtras.requestId, timestamp);
}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 1ce4393..b3a38a2 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -52,6 +52,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -66,6 +67,7 @@
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
+ virtual status_t notifyActive(); // Returns errors on app ops permission failures
virtual void notifyIdle(int64_t requestCount, int64_t resultErrorCount,
bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats);
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index e02e146..54e42a6 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,10 +40,11 @@
// Required for API 1 and 2
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras &resultExtras) = 0;
-
- // Required only for API2
+ virtual status_t notifyActive() = 0; // May return an error since it checks appops
virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats) = 0;
+
+ // Required only for API2
virtual void notifyShutter(const CaptureResultExtras &resultExtras,
nsecs_t timestamp) = 0;
virtual void notifyPrepared(int streamId) = 0;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 62fc18f..6dffc5d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -474,12 +474,12 @@
hardware::Return<void> CameraProviderManager::onRegistration(
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
- bool /*preexisting*/) {
+ bool preexisting) {
std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- addProviderLocked(name);
+ addProviderLocked(name, preexisting);
}
sp<StatusListener> listener = getStatusListener();
@@ -1230,31 +1230,53 @@
return falseRet;
}
-status_t CameraProviderManager::addProviderLocked(const std::string& newProvider) {
- for (const auto& providerInfo : mProviders) {
- if (providerInfo->mProviderName == newProvider) {
- ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
- newProvider.c_str());
- return ALREADY_EXISTS;
- }
- }
-
+status_t CameraProviderManager::tryToInitializeProviderLocked(
+ const std::string& providerName, const sp<ProviderInfo>& providerInfo) {
sp<provider::V2_4::ICameraProvider> interface;
- interface = mServiceProxy->tryGetService(newProvider);
+ interface = mServiceProxy->tryGetService(providerName);
if (interface == nullptr) {
- ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
- newProvider.c_str());
+ // The interface may not be started yet. In that case, this is not a
+ // fatal error.
+ ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+ providerName.c_str());
return BAD_VALUE;
}
- sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
- status_t res = providerInfo->initialize(interface, mDeviceState);
- if (res != OK) {
- return res;
+ return providerInfo->initialize(interface, mDeviceState);
+}
+
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider,
+ bool preexisting) {
+ // Several camera provider instances can be temporarily present.
+ // Defer initialization of a new instance until the older instance is properly removed.
+ auto providerInstance = newProvider + "-" + std::to_string(mProviderInstanceId);
+ bool providerPresent = false;
+ for (const auto& providerInfo : mProviders) {
+ if (providerInfo->mProviderName == newProvider) {
+ ALOGW("%s: Camera provider HAL with name '%s' already registered",
+ __FUNCTION__, newProvider.c_str());
+ if (preexisting) {
+ return ALREADY_EXISTS;
+ } else{
+ ALOGW("%s: The new provider instance will get initialized immediately after the"
+ " currently present instance is removed!", __FUNCTION__);
+ providerPresent = true;
+ break;
+ }
+ }
+ }
+
+ sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, providerInstance, this);
+ if (!providerPresent) {
+ status_t res = tryToInitializeProviderLocked(newProvider, providerInfo);
+ if (res != OK) {
+ return res;
+ }
}
mProviders.push_back(providerInfo);
+ mProviderInstanceId++;
return OK;
}
@@ -1264,12 +1286,14 @@
std::unique_lock<std::mutex> lock(mInterfaceMutex);
std::vector<String8> removedDeviceIds;
status_t res = NAME_NOT_FOUND;
+ std::string removedProviderName;
for (auto it = mProviders.begin(); it != mProviders.end(); it++) {
- if ((*it)->mProviderName == provider) {
+ if ((*it)->mProviderInstance == provider) {
removedDeviceIds.reserve((*it)->mDevices.size());
for (auto& deviceInfo : (*it)->mDevices) {
removedDeviceIds.push_back(String8(deviceInfo->mId.c_str()));
}
+ removedProviderName = (*it)->mProviderName;
mProviders.erase(it);
res = OK;
break;
@@ -1279,6 +1303,14 @@
ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
provider.c_str());
} else {
+ // Check if there are any newer camera instances from the same provider and try to
+ // initialize.
+ for (const auto& providerInfo : mProviders) {
+ if (providerInfo->mProviderName == removedProviderName) {
+ return tryToInitializeProviderLocked(removedProviderName, providerInfo);
+ }
+ }
+
// Inform camera service of loss of presence for all the devices from this provider,
// without lock held for reentrancy
sp<StatusListener> listener = getStatusListener();
@@ -1287,7 +1319,9 @@
for (auto& id : removedDeviceIds) {
listener->onDeviceStatusChanged(id, CameraDeviceStatus::NOT_PRESENT);
}
+ lock.lock();
}
+
}
return res;
}
@@ -1301,8 +1335,10 @@
CameraProviderManager::ProviderInfo::ProviderInfo(
const std::string &providerName,
+ const std::string &providerInstance,
CameraProviderManager *manager) :
mProviderName(providerName),
+ mProviderInstance(providerInstance),
mProviderTagid(generateVendorTagId(providerName)),
mUniqueDeviceCount(0),
mManager(manager) {
@@ -1626,7 +1662,7 @@
status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
- mProviderName.c_str(),
+ mProviderInstance.c_str(),
mMinorVersion,
mIsRemote ? "remote" : "passthrough",
mDevices.size());
@@ -1942,12 +1978,12 @@
void CameraProviderManager::ProviderInfo::serviceDied(uint64_t cookie,
const wp<hidl::base::V1_0::IBase>& who) {
(void) who;
- ALOGI("Camera provider '%s' has died; removing it", mProviderName.c_str());
+ ALOGI("Camera provider '%s' has died; removing it", mProviderInstance.c_str());
if (cookie != mId) {
ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
__FUNCTION__, cookie, mId);
}
- mManager->removeProvider(mProviderName);
+ mManager->removeProvider(mProviderInstance);
}
status_t CameraProviderManager::ProviderInfo::setUpVendorTags() {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 12bda9b..5531dd7 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -365,6 +365,7 @@
virtual public hardware::hidl_death_recipient
{
const std::string mProviderName;
+ const std::string mProviderInstance;
const metadata_vendor_id_t mProviderTagid;
int mMinorVersion;
sp<VendorTagDescriptor> mVendorTagDescriptor;
@@ -379,7 +380,7 @@
sp<hardware::camera::provider::V2_4::ICameraProvider> mSavedInterface;
- ProviderInfo(const std::string &providerName,
+ ProviderInfo(const std::string &providerName, const std::string &providerInstance,
CameraProviderManager *manager);
~ProviderInfo();
@@ -657,7 +658,10 @@
hardware::hidl_version minVersion = hardware::hidl_version{0,0},
hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
- status_t addProviderLocked(const std::string& newProvider);
+ status_t addProviderLocked(const std::string& newProvider, bool preexisting = false);
+
+ status_t tryToInitializeProviderLocked(const std::string& providerName,
+ const sp<ProviderInfo>& providerInfo);
bool isLogicalCameraLocked(const std::string& id, std::vector<std::string>* physicalCameraIds);
@@ -666,6 +670,7 @@
bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
+ size_t mProviderInstanceId = 0;
std::vector<sp<ProviderInfo>> mProviders;
void addProviderToMap(
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index bf7e597..d93b9e5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2189,31 +2189,40 @@
std::lock_guard<std::mutex> l(mOutputLock);
listener = mListener.promote();
}
- if (idle && listener != NULL) {
- // Get session stats from the builder, and notify the listener.
- int64_t requestCount, resultErrorCount;
- bool deviceError;
- std::map<int, StreamStats> streamStatsMap;
- mSessionStatsBuilder.buildAndReset(&requestCount, &resultErrorCount,
- &deviceError, &streamStatsMap);
- for (size_t i = 0; i < streamIds.size(); i++) {
- int streamId = streamIds[i];
- auto stats = streamStatsMap.find(streamId);
- if (stats != streamStatsMap.end()) {
- streamStats[i].mRequestCount = stats->second.mRequestedFrameCount;
- streamStats[i].mErrorCount = stats->second.mDroppedFrameCount;
- streamStats[i].mStartLatencyMs = stats->second.mStartLatencyMs;
- streamStats[i].mHistogramType =
- hardware::CameraStreamStats::HISTOGRAM_TYPE_CAPTURE_LATENCY;
- streamStats[i].mHistogramBins.assign(
- stats->second.mCaptureLatencyBins.begin(),
- stats->second.mCaptureLatencyBins.end());
- streamStats[i].mHistogramCounts.assign(
- stats->second.mCaptureLatencyHistogram.begin(),
- stats->second.mCaptureLatencyHistogram.end());
+ status_t res = OK;
+ if (listener != nullptr) {
+ if (idle) {
+ // Get session stats from the builder, and notify the listener.
+ int64_t requestCount, resultErrorCount;
+ bool deviceError;
+ std::map<int, StreamStats> streamStatsMap;
+ mSessionStatsBuilder.buildAndReset(&requestCount, &resultErrorCount,
+ &deviceError, &streamStatsMap);
+ for (size_t i = 0; i < streamIds.size(); i++) {
+ int streamId = streamIds[i];
+ auto stats = streamStatsMap.find(streamId);
+ if (stats != streamStatsMap.end()) {
+ streamStats[i].mRequestCount = stats->second.mRequestedFrameCount;
+ streamStats[i].mErrorCount = stats->second.mDroppedFrameCount;
+ streamStats[i].mStartLatencyMs = stats->second.mStartLatencyMs;
+ streamStats[i].mHistogramType =
+ hardware::CameraStreamStats::HISTOGRAM_TYPE_CAPTURE_LATENCY;
+ streamStats[i].mHistogramBins.assign(
+ stats->second.mCaptureLatencyBins.begin(),
+ stats->second.mCaptureLatencyBins.end());
+ streamStats[i].mHistogramCounts.assign(
+ stats->second.mCaptureLatencyHistogram.begin(),
+ stats->second.mCaptureLatencyHistogram.end());
+ }
}
+ listener->notifyIdle(requestCount, resultErrorCount, deviceError, streamStats);
+ } else {
+ res = listener->notifyActive();
}
- listener->notifyIdle(requestCount, resultErrorCount, deviceError, streamStats);
+ }
+ if (res != OK) {
+ SET_ERR("Camera access permission lost mid-operation: %s (%d)",
+ strerror(-res), res);
}
}
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index c7d7c4b..3d74f0b 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -44,6 +44,7 @@
"libcutils",
"libcameraservice",
"libcamera_client",
+ "liblog",
"libui",
"libgui",
"android.hardware.camera.common@1.0",
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index 54550a5..985b2f8 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -18,8 +18,18 @@
* Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
*/
+#define LOG_TAG "CameraServiceFuzzer"
+//#define LOG_NDEBUG 0
+
#include <CameraService.h>
+#include <device3/Camera3StreamInterface.h>
+#include <android/hardware/BnCameraServiceListener.h>
+#include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
#include <android/hardware/ICameraServiceListener.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include <gui/BufferItemConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
#include <private/android_filesystem_config.h>
@@ -30,6 +40,7 @@
using namespace std;
const int32_t kPreviewThreshold = 8;
+const int32_t kNumRequestsTested = 8;
const nsecs_t kPreviewTimeout = 5000000000; // .5 [s.]
const nsecs_t kEventTimeout = 10000000000; // 1 [s.]
const size_t kMaxNumLines = USHRT_MAX;
@@ -39,6 +50,23 @@
hardware::ICameraService::CAMERA_TYPE_ALL};
const int kCameraApiVersion[] = {android::CameraService::API_VERSION_1,
android::CameraService::API_VERSION_2};
+const uint8_t kSensorPixelModes[] = {ANDROID_SENSOR_PIXEL_MODE_DEFAULT,
+ ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION};
+const int32_t kRequestTemplates[] = {
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_STILL_CAPTURE,
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_RECORD,
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_VIDEO_SNAPSHOT,
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_MANUAL,
+ hardware::camera2::ICameraDeviceUser::TEMPLATE_ZERO_SHUTTER_LAG
+};
+
+const int32_t kRotations[] = {
+ camera3::CAMERA_STREAM_ROTATION_0,
+ camera3::CAMERA_STREAM_ROTATION_90,
+ camera3::CAMERA_STREAM_ROTATION_270
+};
+
const int kLayerMetadata[] = {
0x00100000 /*GRALLOC_USAGE_RENDERSCRIPT*/, 0x00000003 /*GRALLOC_USAGE_SW_READ_OFTEN*/,
0x00000100 /*GRALLOC_USAGE_HW_TEXTURE*/, 0x00000800 /*GRALLOC_USAGE_HW_COMPOSER*/,
@@ -69,15 +97,15 @@
class CameraFuzzer : public ::android::hardware::BnCameraClient {
public:
- CameraFuzzer() = default;
+ CameraFuzzer(sp<CameraService> cs, std::shared_ptr<FuzzedDataProvider> fp) :
+ mCameraService(cs), mFuzzedDataProvider(fp) {};
~CameraFuzzer() { deInit(); }
- bool init();
- void process(const uint8_t *data, size_t size);
+ void process();
void deInit();
private:
- FuzzedDataProvider *mFuzzedDataProvider = nullptr;
sp<CameraService> mCameraService = nullptr;
+ std::shared_ptr<FuzzedDataProvider> mFuzzedDataProvider = nullptr;
sp<SurfaceComposerClient> mComposerClient = nullptr;
int32_t mNumCameras = 0;
size_t mPreviewBufferCount = 0;
@@ -167,19 +195,7 @@
return rc;
}
-bool CameraFuzzer::init() {
- setuid(AID_MEDIA);
- mCameraService = new CameraService();
- if (mCameraService) {
- return true;
- }
- return false;
-}
-
void CameraFuzzer::deInit() {
- if (mCameraService) {
- mCameraService = nullptr;
- }
if (mComposerClient) {
mComposerClient->dispose();
}
@@ -298,12 +314,12 @@
for (int32_t cameraId = 0; cameraId < mNumCameras; ++cameraId) {
getCameraInformation(cameraId);
- const String16 opPackageName("com.fuzzer.poc");
::android::binder::Status rc;
sp<ICamera> cameraDevice;
- rc = mCameraService->connect(this, cameraId, opPackageName, AID_MEDIA, AID_ROOT,
- &cameraDevice);
+ rc = mCameraService->connect(this, cameraId, String16(),
+ android::CameraService::USE_CALLING_UID, android::CameraService::USE_CALLING_PID,
+ &cameraDevice);
if (!rc.isOk()) {
// camera not connected
return;
@@ -405,8 +421,7 @@
}
}
-void CameraFuzzer::process(const uint8_t *data, size_t size) {
- mFuzzedDataProvider = new FuzzedDataProvider(data, size);
+void CameraFuzzer::process() {
getNumCameras();
invokeCameraSound();
if (mNumCameras > 0) {
@@ -415,19 +430,169 @@
invokeDump();
invokeShellCommand();
invokeNotifyCalls();
- delete mFuzzedDataProvider;
+}
+
+class TestCameraServiceListener : public hardware::BnCameraServiceListener {
+public:
+ virtual ~TestCameraServiceListener() {};
+
+ virtual binder::Status onStatusChanged(int32_t , const String16&) {
+ return binder::Status::ok();
+ };
+
+ virtual binder::Status onPhysicalCameraStatusChanged(int32_t /*status*/,
+ const String16& /*cameraId*/, const String16& /*physicalCameraId*/) {
+ // No op
+ return binder::Status::ok();
+ };
+
+ virtual binder::Status onTorchStatusChanged(int32_t /*status*/, const String16& /*cameraId*/) {
+ return binder::Status::ok();
+ };
+
+ virtual binder::Status onCameraAccessPrioritiesChanged() {
+ // No op
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onCameraOpened(const String16& /*cameraId*/,
+ const String16& /*clientPackageName*/) {
+ // No op
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onCameraClosed(const String16& /*cameraId*/) {
+ // No op
+ return binder::Status::ok();
+ }
+};
+
+class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
+public:
+ TestCameraDeviceCallbacks() {}
+
+ virtual ~TestCameraDeviceCallbacks() {}
+
+ virtual binder::Status onDeviceError(int /*errorCode*/,
+ const CaptureResultExtras& /*resultExtras*/) {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onDeviceIdle() {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onCaptureStarted(const CaptureResultExtras& /*resultExtras*/,
+ int64_t /*timestamp*/) {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onResultReceived(const CameraMetadata& /*metadata*/,
+ const CaptureResultExtras& /*resultExtras*/,
+ const std::vector<PhysicalCaptureResultInfo>& /*physicalResultInfos*/) {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onPrepared(int /*streamId*/) {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onRepeatingRequestError(
+ int64_t /*lastFrameNumber*/, int32_t /*stoppedSequenceId*/) {
+ return binder::Status::ok();
+ }
+
+ virtual binder::Status onRequestQueueEmpty() {
+ return binder::Status::ok();
+ }
+};
+
+class Camera2Fuzzer {
+ public:
+ Camera2Fuzzer(sp<CameraService> cs, std::shared_ptr<FuzzedDataProvider> fp) :
+ mCameraService(cs), mFuzzedDataProvider(fp) { };
+ ~Camera2Fuzzer() {}
+ void process();
+ private:
+ sp<CameraService> mCameraService = nullptr;
+ std::shared_ptr<FuzzedDataProvider> mFuzzedDataProvider = nullptr;
+};
+
+void Camera2Fuzzer::process() {
+ sp<TestCameraServiceListener> listener = new TestCameraServiceListener();
+ std::vector<hardware::CameraStatus> statuses;
+ mCameraService->addListenerTest(listener, &statuses);
+ for (auto s : statuses) {
+ sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
+ sp<hardware::camera2::ICameraDeviceUser> device;
+ mCameraService->connectDevice(callbacks, String16(s.cameraId), String16(), {},
+ android::CameraService::USE_CALLING_UID, &device);
+ if (device == nullptr) {
+ continue;
+ }
+ device->beginConfigure();
+ sp<IGraphicBufferProducer> gbProducer;
+ sp<IGraphicBufferConsumer> gbConsumer;
+ BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
+ sp<BufferItemConsumer> opaqueConsumer = new BufferItemConsumer(gbConsumer,
+ GRALLOC_USAGE_SW_READ_NEVER, /*maxImages*/8, /*controlledByApp*/true);
+ opaqueConsumer->setName(String8("Roger"));
+
+ // Set to VGA dimension for default, as that is guaranteed to be present
+ gbConsumer->setDefaultBufferSize(640, 480);
+ gbConsumer->setDefaultBufferFormat(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+
+ sp<Surface> surface(new Surface(gbProducer, /*controlledByApp*/false));
+
+ String16 noPhysicalId;
+ size_t rotations = sizeof(kRotations) / sizeof(int32_t) - 1;
+ OutputConfiguration output(gbProducer,
+ kRotations[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, rotations)],
+ noPhysicalId);
+ int streamId;
+ device->createStream(output, &streamId);
+ CameraMetadata sessionParams;
+ std::vector<int> offlineStreamIds;
+ device->endConfigure(/*isConstrainedHighSpeed*/ mFuzzedDataProvider->ConsumeBool(),
+ sessionParams, ns2ms(systemTime()), &offlineStreamIds);
+
+ CameraMetadata requestTemplate;
+ size_t requestTemplatesSize = sizeof(kRequestTemplates) /sizeof(int32_t) - 1;
+ device->createDefaultRequest(kRequestTemplates[
+ mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, requestTemplatesSize)],
+ /*out*/&requestTemplate);
+ hardware::camera2::CaptureRequest request;
+ request.mSurfaceList.add(surface);
+ request.mIsReprocess = false;
+ hardware::camera2::utils::SubmitInfo info;
+ for (int i = 0; i < kNumRequestsTested; i++) {
+ uint8_t sensorPixelMode =
+ kSensorPixelModes[mFuzzedDataProvider->ConsumeBool() ? 1 : 0];
+ requestTemplate.update(ANDROID_SENSOR_PIXEL_MODE, &sensorPixelMode, 1);
+ request.mPhysicalCameraSettings.clear();
+ request.mPhysicalCameraSettings.push_back({s.cameraId.string(), requestTemplate});
+ device->submitRequest(request, /*streaming*/false, /*out*/&info);
+ ALOGV("%s : camera id %s submit request id %d",__FUNCTION__, s.cameraId.string(),
+ info.mRequestId);
+ }
+ device->disconnect();
+ }
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
if (size < 1) {
return 0;
}
- sp<CameraFuzzer> camerafuzzer = new CameraFuzzer();
+ setuid(AID_CAMERASERVER);
+ std::shared_ptr<FuzzedDataProvider> fp = std::make_shared<FuzzedDataProvider>(data, size);
+ sp<CameraService> cs = new CameraService();
+ cs->clearCachedVariables();
+ sp<CameraFuzzer> camerafuzzer = new CameraFuzzer(cs, fp);
if (!camerafuzzer) {
return 0;
}
- if (camerafuzzer->init()) {
- camerafuzzer->process(data, size);
- }
+ camerafuzzer->process();
+ Camera2Fuzzer camera2fuzzer(cs, fp);
+ camera2fuzzer.process();
return 0;
}
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index 855b5ab..a74fd9d 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -23,7 +23,9 @@
#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
#include <camera_metadata_hidden.h>
+#include <hidl/HidlBinderSupport.h>
#include <gtest/gtest.h>
+#include <utility>
using namespace android;
using namespace android::hardware::camera;
@@ -173,6 +175,25 @@
return hardware::Void();
}
+ virtual ::android::hardware::Return<bool> linkToDeath(
+ const ::android::sp<::android::hardware::hidl_death_recipient>& recipient,
+ uint64_t cookie) {
+ if (mInitialDeathRecipient.get() == nullptr) {
+ mInitialDeathRecipient =
+ std::make_unique<::android::hardware::hidl_binder_death_recipient>(recipient,
+ cookie, this);
+ }
+ return true;
+ }
+
+ void signalInitialBinderDeathRecipient() {
+ if (mInitialDeathRecipient.get() != nullptr) {
+ mInitialDeathRecipient->binderDied(nullptr /*who*/);
+ }
+ }
+
+ std::unique_ptr<::android::hardware::hidl_binder_death_recipient> mInitialDeathRecipient;
+
enum MethodNames {
SET_CALLBACK,
GET_VENDOR_TAGS,
@@ -567,3 +588,47 @@
ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(), testProviderInstanceName) <<
"Incorrect instance requested from service manager";
}
+
+// Test that CameraProviderManager can handle races between provider death notifications and
+// provider registration callbacks
+TEST(CameraProviderManagerTest, BinderDeathRegistrationRaceTest) {
+
+ std::vector<hardware::hidl_string> deviceNames;
+ deviceNames.push_back("device@3.2/test/0");
+ deviceNames.push_back("device@3.2/test/1");
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ status_t res;
+
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+
+ // Not setting up provider in the service proxy yet, to test cases where a
+ // HAL isn't starting right
+ res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ // Now set up provider and trigger a registration
+ serviceProxy.setProvider(provider);
+
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ // Simulate artificial delay of the registration callback which arrives before the
+ // death notification
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ provider->signalInitialBinderDeathRecipient();
+
+ auto deviceCount = static_cast<unsigned> (providerManager->getCameraCount().second);
+ ASSERT_EQ(deviceCount, deviceNames.size()) <<
+ "Unexpected amount of camera devices";
+}
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 4ef87e4..dbc68b2 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -140,7 +140,6 @@
static constexpr const char * const AAudioStreamFields[] {
"mediametrics_aaudiostream_reported",
- "caller_name",
"path",
"direction",
"frames_per_burst",
@@ -156,6 +155,8 @@
"format_app",
"format_device",
"log_session_id",
+ "sample_rate",
+ "content_type",
};
/**
@@ -206,8 +207,10 @@
return { result, ss.str() };
}
-AudioAnalytics::AudioAnalytics()
+AudioAnalytics::AudioAnalytics(const std::shared_ptr<StatsdLog>& statsdLog)
: mDeliverStatistics(property_get_bool(PROP_AUDIO_ANALYTICS_CLOUD_ENABLED, true))
+ , mStatsdLog(statsdLog)
+ , mAudioPowerUsage(this, statsdLog)
{
SetMinimumLogSeverity(android::base::DEBUG); // for LOG().
ALOGD("%s", __func__);
@@ -242,7 +245,7 @@
});
}));
- // Handle legacy aaudio stream statistics
+ // Handle legacy aaudio playback stream statistics
mActions.addAction(
AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "*." AMEDIAMETRICS_PROP_EVENT,
std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
@@ -251,6 +254,15 @@
mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_LEGACY);
}));
+ // Handle legacy aaudio capture stream statistics
+ mActions.addAction(
+ AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "*." AMEDIAMETRICS_PROP_EVENT,
+ std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
+ std::make_shared<AnalyticsActions::Function>(
+ [this](const std::shared_ptr<const android::mediametrics::Item> &item) {
+ mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_LEGACY);
+ }));
+
// Handle mmap aaudio stream statistics
mActions.addAction(
AMEDIAMETRICS_KEY_PREFIX_AUDIO_STREAM "*." AMEDIAMETRICS_PROP_EVENT,
@@ -406,20 +418,6 @@
ll -= l;
}
- if (ll > 0) {
- // Print the statsd atoms we sent out.
- const std::string statsd = mStatsdLog.dumpToString(" " /* prefix */, ll - 1);
- const size_t n = std::count(statsd.begin(), statsd.end(), '\n') + 1; // we control this.
- if ((size_t)ll >= n) {
- if (n == 1) {
- ss << "Statsd atoms: empty or truncated\n";
- } else {
- ss << "Statsd atoms:\n" << statsd;
- }
- ll -= (int32_t)n;
- }
- }
-
if (ll > 0 && prefix == nullptr) {
auto [s, l] = mAudioPowerUsage.dump(ll);
ss << s;
@@ -601,7 +599,8 @@
, logSessionIdForStats.c_str()
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED, str);
}
} break;
case THREAD: {
@@ -649,7 +648,8 @@
, ENUM_EXTRACT(typeForStats)
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED, str);
}
} break;
case TRACK: {
@@ -769,7 +769,8 @@
, logSessionIdForStats.c_str()
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED, str);
}
} break;
}
@@ -845,7 +846,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
}
}
@@ -898,7 +900,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
return;
}
@@ -924,7 +927,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
}
@@ -932,12 +936,6 @@
const std::shared_ptr<const android::mediametrics::Item> &item, CallerPath path) const {
const std::string& key = item->getKey();
- std::string callerNameStr;
- mAudioAnalytics.mAnalyticsState->timeMachine().get(
- key, AMEDIAMETRICS_PROP_CALLERNAME, &callerNameStr);
-
- const auto callerName = types::lookup<types::CALLER_NAME, int32_t>(callerNameStr);
-
std::string directionStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
key, AMEDIAMETRICS_PROP_DIRECTION, &directionStr);
@@ -960,7 +958,8 @@
key, AMEDIAMETRICS_PROP_CHANNELCOUNT, &channelCount);
int64_t totalFramesTransferred = -1;
- // TODO: log and get total frames transferred
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_FRAMESTRANSFERRED, &totalFramesTransferred);
std::string perfModeRequestedStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -968,8 +967,11 @@
const auto perfModeRequested =
types::lookup<types::AAUDIO_PERFORMANCE_MODE, int32_t>(perfModeRequestedStr);
- int32_t perfModeActual = 0;
- // TODO: log and get actual performance mode
+ std::string perfModeActualStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL, &perfModeActualStr);
+ const auto perfModeActual =
+ types::lookup<types::AAUDIO_PERFORMANCE_MODE, int32_t>(perfModeActualStr);
std::string sharingModeStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -983,8 +985,10 @@
std::string serializedDeviceTypes;
// TODO: only routed device id is logged, but no device type
- int32_t formatApp = 0;
- // TODO: log format from app
+ std::string formatAppStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_ENCODINGREQUESTED, &formatAppStr);
+ const auto formatApp = types::lookup<types::ENCODING, int32_t>(formatAppStr);
std::string formatDeviceStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -992,10 +996,19 @@
const auto formatDevice = types::lookup<types::ENCODING, int32_t>(formatDeviceStr);
std::string logSessionId;
- // TODO: log logSessionId
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_LOGSESSIONID, &logSessionId);
+
+ int32_t sampleRate = 0;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_SAMPLERATE, &sampleRate);
+
+ std::string contentTypeStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_CONTENTTYPE, &contentTypeStr);
+ const auto contentType = types::lookup<types::CONTENT_TYPE, int32_t>(contentTypeStr);
LOG(LOG_LEVEL) << "key:" << key
- << " caller_name:" << callerName << "(" << callerNameStr << ")"
<< " path:" << path
<< " direction:" << direction << "(" << directionStr << ")"
<< " frames_per_burst:" << framesPerBurst
@@ -1004,20 +1017,21 @@
<< " channel_count:" << channelCount
<< " total_frames_transferred:" << totalFramesTransferred
<< " perf_mode_requested:" << perfModeRequested << "(" << perfModeRequestedStr << ")"
- << " perf_mode_actual:" << perfModeActual
+ << " perf_mode_actual:" << perfModeActual << "(" << perfModeActualStr << ")"
<< " sharing:" << sharingMode << "(" << sharingModeStr << ")"
<< " xrun_count:" << xrunCount
<< " device_type:" << serializedDeviceTypes
- << " format_app:" << formatApp
+ << " format_app:" << formatApp << "(" << formatAppStr << ")"
<< " format_device: " << formatDevice << "(" << formatDeviceStr << ")"
- << " log_session_id: " << logSessionId;
+ << " log_session_id: " << logSessionId
+ << " sample_rate: " << sampleRate
+ << " content_type: " << contentType << "(" << contentTypeStr << ")";
if (mAudioAnalytics.mDeliverStatistics) {
android::util::BytesField bf_serialized(
serializedDeviceTypes.c_str(), serializedDeviceTypes.size());
const auto result = sendToStatsd(
CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
- , callerName
, path
, direction
, framesPerBurst
@@ -1033,12 +1047,13 @@
, formatApp
, formatDevice
, logSessionId.c_str()
+ , sampleRate
+ , contentType
);
std::stringstream ss;
ss << "result:" << result;
const auto fieldsStr = printFields(AAudioStreamFields,
CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
- , callerName
, path
, direction
, framesPerBurst
@@ -1054,11 +1069,13 @@
, formatApp
, formatDevice
, logSessionId.c_str()
+ , sampleRate
+ , contentType
);
ss << " " << fieldsStr;
std::string str = ss.str();
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED, str);
}
}
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index 07872ef..2b41a95 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -17,10 +17,10 @@
#pragma once
#include <android-base/thread_annotations.h>
-#include <audio_utils/SimpleLog.h>
#include "AnalyticsActions.h"
#include "AnalyticsState.h"
#include "AudioPowerUsage.h"
+#include "StatsdLog.h"
#include "TimedAction.h"
#include "Wrap.h"
@@ -32,7 +32,7 @@
friend AudioPowerUsage;
public:
- AudioAnalytics();
+ explicit AudioAnalytics(const std::shared_ptr<StatsdLog>& statsdLog);
~AudioAnalytics();
/**
@@ -122,8 +122,7 @@
SharedPtrWrap<AnalyticsState> mPreviousAnalyticsState;
TimedAction mTimedAction; // locked internally
-
- SimpleLog mStatsdLog{16 /* log lines */}; // locked internally
+ const std::shared_ptr<StatsdLog> mStatsdLog; // locked internally, ok for multiple threads.
// DeviceUse is a nested class which handles audio device usage accounting.
// We define this class at the end to ensure prior variables all properly constructed.
@@ -212,7 +211,7 @@
AudioAnalytics &mAudioAnalytics;
} mAAudioStreamInfo{*this};
- AudioPowerUsage mAudioPowerUsage{this};
+ AudioPowerUsage mAudioPowerUsage;
};
} // namespace android::mediametrics
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index e584f12..ab74c8e 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -127,14 +127,13 @@
return deviceMask;
}
-/* static */
-void AudioPowerUsage::sendItem(const std::shared_ptr<const mediametrics::Item>& item)
+void AudioPowerUsage::sendItem(const std::shared_ptr<const mediametrics::Item>& item) const
{
int32_t type;
if (!item->getInt32(AUDIO_POWER_USAGE_PROP_TYPE, &type)) return;
- int32_t device;
- if (!item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &device)) return;
+ int32_t audio_device;
+ if (!item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &audio_device)) return;
int64_t duration_ns;
if (!item->getInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, &duration_ns)) return;
@@ -142,11 +141,24 @@
double volume;
if (!item->getDouble(AUDIO_POWER_USAGE_PROP_VOLUME, &volume)) return;
- (void)android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
- device,
- (int32_t)(duration_ns / NANOS_PER_SECOND),
- (float)volume,
+ const int32_t duration_secs = (int32_t)(duration_ns / NANOS_PER_SECOND);
+ const float average_volume = (float)volume;
+ const int result = android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
+ audio_device,
+ duration_secs,
+ average_volume,
type);
+
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audio_power_usage_data_reported:"
+ << android::util::AUDIO_POWER_USAGE_DATA_REPORTED
+ << " audio_device:" << audio_device
+ << " duration_secs:" << duration_secs
+ << " average_volume:" << average_volume
+ << " type:" << type
+ << " }";
+ mStatsdLog->log(android::util::AUDIO_POWER_USAGE_DATA_REPORTED, log.str());
}
bool AudioPowerUsage::saveAsItem_l(
@@ -360,8 +372,10 @@
mPrimaryDevice = device;
}
-AudioPowerUsage::AudioPowerUsage(AudioAnalytics *audioAnalytics)
+AudioPowerUsage::AudioPowerUsage(
+ AudioAnalytics *audioAnalytics, const std::shared_ptr<StatsdLog>& statsdLog)
: mAudioAnalytics(audioAnalytics)
+ , mStatsdLog(statsdLog)
, mDisabled(property_get_bool(PROP_AUDIO_METRICS_DISABLED, AUDIO_METRICS_DISABLED_DEFAULT))
, mIntervalHours(property_get_int32(PROP_AUDIO_METRICS_INTERVAL_HR, INTERVAL_HR_DEFAULT))
{
diff --git a/services/mediametrics/AudioPowerUsage.h b/services/mediametrics/AudioPowerUsage.h
index b705a6a..7021902 100644
--- a/services/mediametrics/AudioPowerUsage.h
+++ b/services/mediametrics/AudioPowerUsage.h
@@ -22,13 +22,15 @@
#include <mutex>
#include <thread>
+#include "StatsdLog.h"
+
namespace android::mediametrics {
class AudioAnalytics;
class AudioPowerUsage {
public:
- explicit AudioPowerUsage(AudioAnalytics *audioAnalytics);
+ AudioPowerUsage(AudioAnalytics *audioAnalytics, const std::shared_ptr<StatsdLog>& statsdLog);
~AudioPowerUsage();
void checkTrackRecord(const std::shared_ptr<const mediametrics::Item>& item, bool isTrack);
@@ -83,12 +85,13 @@
private:
bool saveAsItem_l(int32_t device, int64_t duration, int32_t type, double average_vol)
REQUIRES(mLock);
- static void sendItem(const std::shared_ptr<const mediametrics::Item>& item);
+ void sendItem(const std::shared_ptr<const mediametrics::Item>& item) const;
void collect();
bool saveAsItems_l(int32_t device, int64_t duration, int32_t type, double average_vol)
REQUIRES(mLock);
AudioAnalytics * const mAudioAnalytics;
+ const std::shared_ptr<StatsdLog> mStatsdLog; // mStatsdLog is internally locked
const bool mDisabled;
const int32_t mIntervalHours;
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 44e96ec..838cdd5 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -77,6 +77,7 @@
{"AUDIO_DEVICE_IN_DEFAULT", 1LL << 28},
// R values above.
{"AUDIO_DEVICE_IN_BLE_HEADSET", 1LL << 29},
+ {"AUDIO_DEVICE_IN_HDMI_EARC", 1LL << 30},
};
return map;
}
@@ -123,7 +124,8 @@
{"AUDIO_DEVICE_OUT_DEFAULT", 1LL << 30},
// R values above.
{"AUDIO_DEVICE_OUT_BLE_HEADSET", 1LL << 31},
- {"AUDIO_DEVICE_OUT_BLE_SPAEKER", 1LL << 32},
+ {"AUDIO_DEVICE_OUT_BLE_SPEAKER", 1LL << 32},
+ {"AUDIO_DEVICE_OUT_HDMI_EARC", 1LL << 33},
};
return map;
}
@@ -158,9 +160,9 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
- {"AAUDIO_DIRECTION_OUTPUT", 0},
- {"AAUDIO_DIRECTION_INPUT", 1},
+ // UNKNOWN is 0
+ {"AAUDIO_DIRECTION_OUTPUT", 1 /* AAUDIO_DIRECTION_OUTPUT + 1 */},
+ {"AAUDIO_DIRECTION_INPUT", 2 /* AAUDIO_DIRECTION_INPUT + 1*/},
};
return map;
}
@@ -169,7 +171,7 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
+ // UNKNOWN is 0
{"AAUDIO_PERFORMANCE_MODE_NONE", 10},
{"AAUDIO_PERFORMANCE_MODE_POWER_SAVING", 11},
{"AAUDIO_PERFORMANCE_MODE_LOW_LATENCY", 12},
@@ -181,9 +183,9 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
- {"AAUDIO_SHARING_MODE_EXCLUSIVE", 0},
- {"AAUDIO_SHARING_MODE_SHARED", 1},
+ // UNKNOWN is 0
+ {"AAUDIO_SHARING_MODE_EXCLUSIVE", 1 /* AAUDIO_SHARING_MODE_EXCLUSIVE + 1 */},
+ {"AAUDIO_SHARING_MODE_SHARED", 2 /* AAUDIO_SHARING_MODE_SHARED + 1 */},
};
return map;
}
@@ -484,7 +486,7 @@
auto& map = getAAudioDirection();
auto it = map.find(direction);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
@@ -506,7 +508,7 @@
auto& map = getAAudioPerformanceMode();
auto it = map.find(performanceMode);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
@@ -528,7 +530,7 @@
auto& map = getAAudioSharingMode();
auto it = map.find(sharingMode);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index bfc722e..1d64878 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -23,6 +23,7 @@
#include <pwd.h> //getpwuid
+#include <android-base/stringprintf.h>
#include <android/content/pm/IPackageManagerNative.h> // package info
#include <audio_utils/clock.h> // clock conversions
#include <binder/IPCThreadState.h> // get calling uid
@@ -37,6 +38,7 @@
namespace android {
+using base::StringPrintf;
using mediametrics::Item;
using mediametrics::startsWith;
@@ -204,21 +206,19 @@
(void)mAudioAnalytics.submit(sitem, isTrusted);
- (void)dump2Statsd(sitem); // failure should be logged in function.
+ (void)dump2Statsd(sitem, mStatsdLog); // failure should be logged in function.
saveItem(sitem);
return NO_ERROR;
}
status_t MediaMetricsService::dump(int fd, const Vector<String16>& args)
{
- String8 result;
-
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- result.appendFormat("Permission Denial: "
+ const std::string result = StringPrintf("Permission Denial: "
"can't dump MediaMetricsService from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
- write(fd, result.string(), result.size());
+ write(fd, result.c_str(), result.size());
return NO_ERROR;
}
@@ -250,17 +250,18 @@
// dumpsys media.metrics audiotrack,codec
// or dumpsys media.metrics audiotrack codec
- result.append("Recognized parameters:\n");
- result.append("--all show all records\n");
- result.append("--clear clear out saved records\n");
- result.append("--heap show heap usage (top 100)\n");
- result.append("--help display help\n");
- result.append("--prefix X process records for component X\n");
- result.append("--since X X < 0: records from -X seconds in the past\n");
- result.append(" X = 0: ignore\n");
- result.append(" X > 0: records from X seconds since Unix epoch\n");
- result.append("--unreachable show unreachable memory (leaks)\n");
- write(fd, result.string(), result.size());
+ static constexpr char result[] =
+ "Recognized parameters:\n"
+ "--all show all records\n"
+ "--clear clear out saved records\n"
+ "--heap show heap usage (top 100)\n"
+ "--help display help\n"
+ "--prefix X process records for component X\n"
+ "--since X X < 0: records from -X seconds in the past\n"
+ " X = 0: ignore\n"
+ " X > 0: records from X seconds since Unix epoch\n"
+ "--unreachable show unreachable memory (leaks)\n";
+ write(fd, result, std::size(result));
return NO_ERROR;
} else if (args[i] == prefixOption) {
++i;
@@ -286,7 +287,7 @@
unreachable = true;
}
}
-
+ std::stringstream result;
{
std::lock_guard _l(mLock);
@@ -295,21 +296,27 @@
mItems.clear();
mAudioAnalytics.clear();
} else {
- result.appendFormat("Dump of the %s process:\n", kServiceName);
+ result << StringPrintf("Dump of the %s process:\n", kServiceName);
const char *prefixptr = prefix.size() > 0 ? prefix.c_str() : nullptr;
- dumpHeaders(result, sinceNs, prefixptr);
- dumpQueue(result, sinceNs, prefixptr);
+ result << dumpHeaders(sinceNs, prefixptr);
+ result << dumpQueue(sinceNs, prefixptr);
// TODO: maybe consider a better way of dumping audio analytics info.
const int32_t linesToDump = all ? INT32_MAX : 1000;
auto [ dumpString, lines ] = mAudioAnalytics.dump(linesToDump, sinceNs, prefixptr);
- result.append(dumpString.c_str());
+ result << dumpString;
if (lines == linesToDump) {
- result.append("-- some lines may be truncated --\n");
+ result << "-- some lines may be truncated --\n";
}
+
+ // Dump the statsd atoms we sent out.
+ result << "Statsd atoms:\n"
+ << mStatsdLog->dumpToString(" " /* prefix */,
+ all ? STATSD_LOG_LINES_MAX : STATSD_LOG_LINES_DUMP);
}
}
- write(fd, result.string(), result.size());
+ const std::string str = result.str();
+ write(fd, str.c_str(), str.size());
// Check heap and unreachable memory outside of lock.
if (heap) {
@@ -327,38 +334,37 @@
}
// dump headers
-void MediaMetricsService::dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpHeaders(int64_t sinceNs, const char* prefix)
{
+ std::stringstream result;
if (mediametrics::Item::isEnabled()) {
- result.append("Metrics gathering: enabled\n");
+ result << "Metrics gathering: enabled\n";
} else {
- result.append("Metrics gathering: DISABLED via property\n");
+ result << "Metrics gathering: DISABLED via property\n";
}
- result.appendFormat(
+ result << StringPrintf(
"Since Boot: Submissions: %lld Accepted: %lld\n",
(long long)mItemsSubmitted.load(), (long long)mItemsFinalized);
- result.appendFormat(
+ result << StringPrintf(
"Records Discarded: %lld (by Count: %lld by Expiration: %lld)\n",
(long long)mItemsDiscarded, (long long)mItemsDiscardedCount,
(long long)mItemsDiscardedExpire);
if (prefix != nullptr) {
- result.appendFormat("Restricting to prefix %s", prefix);
+ result << "Restricting to prefix " << prefix << "\n";
}
if (sinceNs != 0) {
- result.appendFormat(
- "Emitting Queue entries more recent than: %lld\n",
- (long long)sinceNs);
+ result << "Emitting Queue entries more recent than: " << sinceNs << "\n";
}
+ return result.str();
}
// TODO: should prefix be a set<string>?
-void MediaMetricsService::dumpQueue(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpQueue(int64_t sinceNs, const char* prefix)
{
if (mItems.empty()) {
- result.append("empty\n");
- return;
+ return "empty\n";
}
-
+ std::stringstream result;
int slot = 0;
for (const auto &item : mItems) { // TODO: consider std::lower_bound() on mItems
if (item->getTimestamp() < sinceNs) { // sinceNs == 0 means all items shown
@@ -369,9 +375,10 @@
__func__, item->getKey().c_str(), prefix);
continue;
}
- result.appendFormat("%5d: %s\n", slot, item->toString().c_str());
+ result << StringPrintf("%5d: %s\n", slot, item->toString().c_str());
slot++;
}
+ return result.str();
}
//
@@ -538,12 +545,13 @@
return AStatsManager_PULL_SKIP;
}
std::lock_guard _l(mLock);
+ bool dumped = false;
for (auto &item : mPullableItems[key]) {
if (const auto sitem = item.lock()) {
- dump2Statsd(sitem, data);
+ dumped |= dump2Statsd(sitem, data, mStatsdLog);
}
}
mPullableItems[key].clear();
- return AStatsManager_PULL_SUCCESS;
+ return dumped ? AStatsManager_PULL_SUCCESS : AStatsManager_PULL_SKIP;
}
} // namespace android
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index 8bc8019..8d0b1cf 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -100,8 +100,8 @@
bool expirations(const std::shared_ptr<const mediametrics::Item>& item) REQUIRES(mLock);
// support for generating output
- void dumpQueue(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
- void dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+ std::string dumpQueue(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+ std::string dumpHeaders(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
// support statsd pushed atoms
static bool isPullable(const std::string &key);
@@ -124,7 +124,14 @@
std::atomic<int64_t> mItemsSubmitted{}; // accessed outside of lock.
- mediametrics::AudioAnalytics mAudioAnalytics; // mAudioAnalytics is locked internally.
+ // mStatsdLog is locked internally (thread-safe) and shows the last atoms logged
+ static constexpr size_t STATSD_LOG_LINES_MAX = 30; // recent log lines to keep
+ static constexpr size_t STATSD_LOG_LINES_DUMP = 4; // normal amount of lines to dump
+ const std::shared_ptr<mediametrics::StatsdLog> mStatsdLog{
+ std::make_shared<mediametrics::StatsdLog>(STATSD_LOG_LINES_MAX)};
+
+ // mAudioAnalytics is locked internally.
+ mediametrics::AudioAnalytics mAudioAnalytics{mStatsdLog};
std::mutex mLock;
// statistics about our analytics
diff --git a/services/mediametrics/StatsdLog.h b/services/mediametrics/StatsdLog.h
new file mode 100644
index 0000000..e207bac
--- /dev/null
+++ b/services/mediametrics/StatsdLog.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <audio_utils/SimpleLog.h>
+#include <map>
+#include <mutex>
+#include <sstream>
+
+namespace android::mediametrics {
+
+class StatsdLog {
+public:
+ explicit StatsdLog(size_t lines) : mSimpleLog(lines) {}
+
+ void log(int atom, const std::string& string) {
+ {
+ std::lock_guard lock(mLock);
+ ++mCountMap[atom];
+ }
+ mSimpleLog.log("%s", string.c_str());
+ }
+
+ std::string dumpToString(const char *prefix = "", size_t logLines = 0) const {
+ std::stringstream ss;
+
+ { // first print out the atom counts
+ std::lock_guard lock(mLock);
+
+ size_t col = 0;
+ for (const auto& count : mCountMap) {
+ if (col == 8) {
+ col = 0;
+ ss << "\n" << prefix;
+ } else {
+ ss << " ";
+ }
+ ss << "[ " << count.first << " : " << count.second << " ]";
+ ++col;
+ }
+ ss << "\n";
+ }
+
+ // then print out the log lines
+ ss << mSimpleLog.dumpToString(prefix, logLines);
+ return ss.str();
+ }
+
+private:
+ SimpleLog mSimpleLog; // internally locked
+ std::map<int /* atom */, size_t /* count */> mCountMap GUARDED_BY(mLock); // sorted
+ mutable std::mutex mLock;
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/StringUtils.h
index 37ed173..01034d9 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/StringUtils.h
@@ -16,6 +16,8 @@
#pragma once
+#include <iomanip>
+#include <sstream>
#include <string>
#include <vector>
@@ -146,4 +148,23 @@
return {}; // if not a logSessionId, return an empty string.
}
+inline std::string bytesToString(const std::vector<uint8_t>& bytes, size_t maxSize = SIZE_MAX) {
+ if (bytes.size() == 0) {
+ return "{}";
+ }
+ std::stringstream ss;
+ ss << "{";
+ ss << std::hex << std::setfill('0');
+ maxSize = std::min(maxSize, bytes.size());
+ for (size_t i = 0; i < maxSize; ++i) {
+ ss << " " << std::setw(2) << (int)bytes[i];
+ }
+ if (maxSize != bytes.size()) {
+ ss << " ... }";
+ } else {
+ ss << " }";
+ }
+ return ss.str();
+}
+
} // namespace android::mediametrics::stringutils
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
index 0cb2594..8b0b479 100644
--- a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -320,7 +320,9 @@
void MediaMetricsServiceFuzzer::invokeAudioAnalytics(const uint8_t *data, size_t size) {
FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<android::mediametrics::StatsdLog> statsdLog =
+ std::make_shared<android::mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
while (fdp.remaining_bytes()) {
auto item = std::make_shared<mediametrics::Item>(fdp.ConsumeRandomLengthString().c_str());
diff --git a/services/mediametrics/iface_statsd.cpp b/services/mediametrics/iface_statsd.cpp
index b7c5296..776f878 100644
--- a/services/mediametrics/iface_statsd.cpp
+++ b/services/mediametrics/iface_statsd.cpp
@@ -48,10 +48,7 @@
// has its own routine to handle this.
//
-bool enabled_statsd = true;
-
-using statsd_pusher = bool (*)(const mediametrics::Item *);
-using statsd_puller = bool (*)(const mediametrics::Item *, AStatsEventList *);
+static bool enabled_statsd = true;
namespace {
template<typename Handler, typename... Args>
@@ -68,15 +65,17 @@
}
if (handlers.count(key)) {
- return (handlers.at(key))(item.get(), args...);
+ return (handlers.at(key))(item, args...);
}
return false;
}
} // namespace
// give me a record, I'll look at the type and upload appropriately
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item) {
- static const std::map<std::string, statsd_pusher> statsd_pushers =
+bool dump2Statsd(
+ const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
+ static const std::map<std::string, statsd_pusher*> statsd_pushers =
{
{ "audiopolicy", statsd_audiopolicy },
{ "audiorecord", statsd_audiorecord },
@@ -91,15 +90,16 @@
{ "nuplayer2", statsd_nuplayer },
{ "recorder", statsd_recorder },
};
- return dump2StatsdInternal(statsd_pushers, item);
+ return dump2StatsdInternal(statsd_pushers, item, statsdLog);
}
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out) {
- static const std::map<std::string, statsd_puller> statsd_pullers =
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
+ static const std::map<std::string, statsd_puller*> statsd_pullers =
{
{ "mediadrm", statsd_mediadrm_puller },
};
- return dump2StatsdInternal(statsd_pullers, item, out);
+ return dump2StatsdInternal(statsd_pullers, item, out, statsdLog);
}
} // namespace android
diff --git a/services/mediametrics/iface_statsd.h b/services/mediametrics/iface_statsd.h
index 1b6c79a..c2a8b3c 100644
--- a/services/mediametrics/iface_statsd.h
+++ b/services/mediametrics/iface_statsd.h
@@ -22,26 +22,29 @@
class Item;
}
-extern bool enabled_statsd;
-
+using statsd_pusher = bool (const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
// component specific dumpers
-extern bool statsd_audiopolicy(const mediametrics::Item *);
-extern bool statsd_audiorecord(const mediametrics::Item *);
-extern bool statsd_audiothread(const mediametrics::Item *);
-extern bool statsd_audiotrack(const mediametrics::Item *);
-extern bool statsd_codec(const mediametrics::Item *);
-extern bool statsd_extractor(const mediametrics::Item *);
-extern bool statsd_mediaparser(const mediametrics::Item *);
-extern bool statsd_nuplayer(const mediametrics::Item *);
-extern bool statsd_recorder(const mediametrics::Item *);
+extern statsd_pusher statsd_audiopolicy;
+extern statsd_pusher statsd_audiorecord;
+extern statsd_pusher statsd_audiothread;
+extern statsd_pusher statsd_audiotrack;
+extern statsd_pusher statsd_codec;
+extern statsd_pusher statsd_extractor;
+extern statsd_pusher statsd_mediaparser;
-extern bool statsd_mediadrm(const mediametrics::Item *);
-extern bool statsd_drmmanager(const mediametrics::Item *);
+extern statsd_pusher statsd_nuplayer;
+extern statsd_pusher statsd_recorder;
+extern statsd_pusher statsd_mediadrm;
+extern statsd_pusher statsd_drmmanager;
+using statsd_puller = bool (const std::shared_ptr<const mediametrics::Item>& item,
+ AStatsEventList *, const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
// component specific pullers
-extern bool statsd_mediadrm_puller(const mediametrics::Item *, AStatsEventList *);
+extern statsd_puller statsd_mediadrm_puller;
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item);
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out);
-
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
} // namespace android
diff --git a/services/mediametrics/statsd_audiopolicy.cpp b/services/mediametrics/statsd_audiopolicy.cpp
index 6ef2f2c..f44b7c4 100644
--- a/services/mediametrics/statsd_audiopolicy.cpp
+++ b/services/mediametrics/statsd_audiopolicy.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_audiopolicy(const mediametrics::Item *item)
+bool statsd_audiopolicy(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -60,35 +60,35 @@
metrics_proto.set_status(status);
}
//string char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
- std::string rqst_src;
- if (item->getString("android.media.audiopolicy.rqst.src", &rqst_src)) {
- metrics_proto.set_request_source(std::move(rqst_src));
+ std::string request_source;
+ if (item->getString("android.media.audiopolicy.rqst.src", &request_source)) {
+ metrics_proto.set_request_source(request_source);
}
//string char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
- std::string rqst_pkg;
- if (item->getString("android.media.audiopolicy.rqst.pkg", &rqst_pkg)) {
- metrics_proto.set_request_package(std::move(rqst_pkg));
+ std::string request_package;
+ if (item->getString("android.media.audiopolicy.rqst.pkg", &request_package)) {
+ metrics_proto.set_request_package(request_package);
}
//int32 char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
- int32_t rqst_session = -1;
- if (item->getInt32("android.media.audiopolicy.rqst.session", &rqst_session)) {
- metrics_proto.set_request_session(rqst_session);
+ int32_t request_session = -1;
+ if (item->getInt32("android.media.audiopolicy.rqst.session", &request_session)) {
+ metrics_proto.set_request_session(request_session);
}
//string char kAudioPolicyRqstDevice[] = "android.media.audiopolicy.rqst.device";
- std::string rqst_device;
- if (item->getString("android.media.audiopolicy.rqst.device", &rqst_device)) {
- metrics_proto.set_request_device(std::move(rqst_device));
+ std::string request_device;
+ if (item->getString("android.media.audiopolicy.rqst.device", &request_device)) {
+ metrics_proto.set_request_device(request_device);
}
//string char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
- std::string active_src;
- if (item->getString("android.media.audiopolicy.active.src", &active_src)) {
- metrics_proto.set_active_source(std::move(active_src));
+ std::string active_source;
+ if (item->getString("android.media.audiopolicy.active.src", &active_source)) {
+ metrics_proto.set_active_source(active_source);
}
//string char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
- std::string active_pkg;
- if (item->getString("android.media.audiopolicy.active.pkg", &active_pkg)) {
- metrics_proto.set_active_package(std::move(active_pkg));
+ std::string active_package;
+ if (item->getString("android.media.audiopolicy.active.pkg", &active_package)) {
+ metrics_proto.set_active_package(active_package);
}
//int32 char kAudioPolicyActiveSession[] = "android.media.audiopolicy.active.session";
int32_t active_session = -1;
@@ -98,27 +98,40 @@
//string char kAudioPolicyActiveDevice[] = "android.media.audiopolicy.active.device";
std::string active_device;
if (item->getString("android.media.audiopolicy.active.device", &active_device)) {
- metrics_proto.set_active_device(std::move(active_device));
+ metrics_proto.set_active_device(active_device);
}
-
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize audipolicy metrics");
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiopolicy_reported:"
+ << android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
-
+ << " status:" << status
+ << " request_source:" << request_source
+ << " request_package:" << request_package
+ << " request_session:" << request_session
+ << " request_device:" << request_device
+ << " active_source:" << active_source
+ << " active_package:" << active_package
+ << " active_session:" << active_session
+ << " active_device:" << active_device
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index db809dc..70a67ae 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -38,16 +38,15 @@
namespace android {
-bool statsd_audiorecord(const mediametrics::Item *item)
-{
+bool statsd_audiorecord(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -57,12 +56,12 @@
//
std::string encoding;
if (item->getString("android.media.audiorecord.encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
std::string source;
if (item->getString("android.media.audiorecord.source", &source)) {
- metrics_proto.set_source(std::move(source));
+ metrics_proto.set_source(source);
}
int32_t latency = -1;
@@ -80,14 +79,14 @@
metrics_proto.set_channels(channels);
}
- int64_t createdMs = -1;
- if (item->getInt64("android.media.audiorecord.createdMs", &createdMs)) {
- metrics_proto.set_created_millis(createdMs);
+ int64_t created_millis = -1;
+ if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
+ metrics_proto.set_created_millis(created_millis);
}
- int64_t durationMs = -1;
- if (item->getInt64("android.media.audiorecord.durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
int32_t count = -1;
@@ -95,44 +94,43 @@
metrics_proto.set_count(count);
}
- int32_t errcode = -1;
- if (item->getInt32("android.media.audiorecord.errcode", &errcode)) {
- metrics_proto.set_error_code(errcode);
- } else if (item->getInt32("android.media.audiorecord.lastError.code", &errcode)) {
- metrics_proto.set_error_code(errcode);
+ int32_t error_code = -1;
+ if (item->getInt32("android.media.audiorecord.errcode", &error_code)) {
+ metrics_proto.set_error_code(error_code);
+ } else if (item->getInt32("android.media.audiorecord.lastError.code", &error_code)) {
+ metrics_proto.set_error_code(error_code);
}
- std::string errfunc;
- if (item->getString("android.media.audiorecord.errfunc", &errfunc)) {
- metrics_proto.set_error_function(std::move(errfunc));
- } else if (item->getString("android.media.audiorecord.lastError.at", &errfunc)) {
- metrics_proto.set_error_function(std::move(errfunc));
+ std::string error_function;
+ if (item->getString("android.media.audiorecord.errfunc", &error_function)) {
+ metrics_proto.set_error_function(error_function);
+ } else if (item->getString("android.media.audiorecord.lastError.at", &error_function)) {
+ metrics_proto.set_error_function(error_function);
}
- // portId (int32)
int32_t port_id = -1;
if (item->getInt32("android.media.audiorecord.portId", &port_id)) {
metrics_proto.set_port_id(count);
}
- // frameCount (int32)
- int32_t frameCount = -1;
- if (item->getInt32("android.media.audiorecord.frameCount", &frameCount)) {
- metrics_proto.set_frame_count(frameCount);
+
+ int32_t frame_count = -1;
+ if (item->getInt32("android.media.audiorecord.frameCount", &frame_count)) {
+ metrics_proto.set_frame_count(frame_count);
}
- // attributes (string)
+
std::string attributes;
if (item->getString("android.media.audiorecord.attributes", &attributes)) {
- metrics_proto.set_attributes(std::move(attributes));
+ metrics_proto.set_attributes(attributes);
}
- // channelMask (int64)
- int64_t channelMask = -1;
- if (item->getInt64("android.media.audiorecord.channelMask", &channelMask)) {
- metrics_proto.set_channel_mask(channelMask);
+
+ int64_t channel_mask = -1;
+ if (item->getInt64("android.media.audiorecord.channelMask", &channel_mask)) {
+ metrics_proto.set_channel_mask(channel_mask);
}
- // startcount (int64)
- int64_t startcount = -1;
- if (item->getInt64("android.media.audiorecord.startcount", &startcount)) {
- metrics_proto.set_start_count(startcount);
+
+ int64_t start_count = -1;
+ if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+ metrics_proto.set_start_count(start_count);
}
std::string serialized;
@@ -145,21 +143,44 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
- const auto logSessionIdForStats =
+ const auto log_session_id =
mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized,
- logSessionIdForStats.c_str());
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized,
+ log_session_id.c_str());
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiorecord_reported:"
+ << android::util::MEDIAMETRICS_AUDIORECORD_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " encoding:" << encoding
+ << " source:" << source
+ << " latency:" << latency
+ << " samplerate:" << samplerate
+ << " channels:" << channels
+ << " created_millis:" << created_millis
+ << " duration_millis:" << duration_millis
+ << " count:" << count
+ << " error_code:" << error_code
+ << " error_function:" << error_function
+ << " port_id:" << port_id
+ << " frame_count:" << frame_count
+ << " attributes:" << attributes
+ << " channel_mask:" << channel_mask
+ << " start_count:" << start_count
+
+ << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiothread.cpp b/services/mediametrics/statsd_audiothread.cpp
index 2ad2562..34cc923 100644
--- a/services/mediametrics/statsd_audiothread.cpp
+++ b/services/mediametrics/statsd_audiothread.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_audiothread(const mediametrics::Item *item)
+bool statsd_audiothread(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -68,17 +68,17 @@
if (item->getInt32(MM_PREFIX "samplerate", &samplerate)) {
metrics_proto.set_samplerate(samplerate);
}
- std::string workhist;
- if (item->getString(MM_PREFIX "workMs.hist", &workhist)) {
- metrics_proto.set_work_millis_hist(std::move(workhist));
+ std::string work_millis_hist;
+ if (item->getString(MM_PREFIX "workMs.hist", &work_millis_hist)) {
+ metrics_proto.set_work_millis_hist(work_millis_hist);
}
- std::string latencyhist;
- if (item->getString(MM_PREFIX "latencyMs.hist", &latencyhist)) {
- metrics_proto.set_latency_millis_hist(std::move(latencyhist));
+ std::string latency_millis_hist;
+ if (item->getString(MM_PREFIX "latencyMs.hist", &latency_millis_hist)) {
+ metrics_proto.set_latency_millis_hist(latency_millis_hist);
}
- std::string warmuphist;
- if (item->getString(MM_PREFIX "warmupMs.hist", &warmuphist)) {
- metrics_proto.set_warmup_millis_hist(std::move(warmuphist));
+ std::string warmup_millis_hist;
+ if (item->getString(MM_PREFIX "warmupMs.hist", &warmup_millis_hist)) {
+ metrics_proto.set_warmup_millis_hist(warmup_millis_hist);
}
int64_t underruns = -1;
if (item->getInt64(MM_PREFIX "underruns", &underruns)) {
@@ -88,101 +88,99 @@
if (item->getInt64(MM_PREFIX "overruns", &overruns)) {
metrics_proto.set_overruns(overruns);
}
- int64_t activeMs = -1;
- if (item->getInt64(MM_PREFIX "activeMs", &activeMs)) {
- metrics_proto.set_active_millis(activeMs);
+ int64_t active_millis = -1;
+ if (item->getInt64(MM_PREFIX "activeMs", &active_millis)) {
+ metrics_proto.set_active_millis(active_millis);
}
- int64_t durationMs = -1;
- if (item->getInt64(MM_PREFIX "durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64(MM_PREFIX "durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
- // item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
int32_t id = -1;
if (item->getInt32(MM_PREFIX "id", &id)) {
metrics_proto.set_id(id);
}
- // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
+
int32_t port_id = -1;
- if (item->getInt32(MM_PREFIX "portId", &id)) {
+ if (item->getInt32(MM_PREFIX "portId", &port_id)) {
metrics_proto.set_port_id(port_id);
}
// item->setCString(MM_PREFIX "type", threadTypeToString(mType));
std::string type;
if (item->getString(MM_PREFIX "type", &type)) {
- metrics_proto.set_type(std::move(type));
+ metrics_proto.set_type(type);
}
- // item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
+
int32_t sample_rate = -1;
if (item->getInt32(MM_PREFIX "sampleRate", &sample_rate)) {
metrics_proto.set_sample_rate(sample_rate);
}
- // item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
+
int32_t channel_mask = -1;
if (item->getInt32(MM_PREFIX "channelMask", &channel_mask)) {
metrics_proto.set_channel_mask(channel_mask);
}
- // item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
+
std::string encoding;
if (item->getString(MM_PREFIX "encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
- // item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
+
int32_t frame_count = -1;
if (item->getInt32(MM_PREFIX "frameCount", &frame_count)) {
metrics_proto.set_frame_count(frame_count);
}
- // item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
- std::string outDevice;
- if (item->getString(MM_PREFIX "outDevice", &outDevice)) {
- metrics_proto.set_output_device(std::move(outDevice));
- }
- // item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
- std::string inDevice;
- if (item->getString(MM_PREFIX "inDevice", &inDevice)) {
- metrics_proto.set_input_device(std::move(inDevice));
- }
- // item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
- double iojitters_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "ioJitterMs.mean", &iojitters_ms_mean)) {
- metrics_proto.set_io_jitter_mean_millis(iojitters_ms_mean);
- }
- // item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
- double iojitters_ms_std = -1;
- if (item->getDouble(MM_PREFIX "ioJitterMs.std", &iojitters_ms_std)) {
- metrics_proto.set_io_jitter_stddev_millis(iojitters_ms_std);
- }
- // item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
- double process_time_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "processTimeMs.mean", &process_time_ms_mean)) {
- metrics_proto.set_process_time_mean_millis(process_time_ms_mean);
- }
- // item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
- double process_time_ms_std = -1;
- if (item->getDouble(MM_PREFIX "processTimeMs.std", &process_time_ms_std)) {
- metrics_proto.set_process_time_stddev_millis(process_time_ms_std);
- }
- // item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
- double timestamp_jitter_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "timestampJitterMs.mean", ×tamp_jitter_ms_mean)) {
- metrics_proto.set_timestamp_jitter_mean_millis(timestamp_jitter_ms_mean);
- }
- // item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
- double timestamp_jitter_ms_stddev = -1;
- if (item->getDouble(MM_PREFIX "timestampJitterMs.std", ×tamp_jitter_ms_stddev)) {
- metrics_proto.set_timestamp_jitter_stddev_millis(timestamp_jitter_ms_stddev);
- }
- // item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
- double latency_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "latencyMs.mean", &latency_ms_mean)) {
- metrics_proto.set_latency_mean_millis(latency_ms_mean);
- }
- // item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
- double latency_ms_stddev = -1;
- if (item->getDouble(MM_PREFIX "latencyMs.std", &latency_ms_stddev)) {
- metrics_proto.set_latency_stddev_millis(latency_ms_stddev);
+
+ std::string output_device;
+ if (item->getString(MM_PREFIX "outDevice", &output_device)) {
+ metrics_proto.set_output_device(output_device);
}
+ std::string input_device;
+ if (item->getString(MM_PREFIX "inDevice", &input_device)) {
+ metrics_proto.set_input_device(input_device);
+ }
+
+ double io_jitter_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "ioJitterMs.mean", &io_jitter_mean_millis)) {
+ metrics_proto.set_io_jitter_mean_millis(io_jitter_mean_millis);
+ }
+
+ double io_jitter_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "ioJitterMs.std", &io_jitter_stddev_millis)) {
+ metrics_proto.set_io_jitter_stddev_millis(io_jitter_stddev_millis);
+ }
+
+ double process_time_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "processTimeMs.mean", &process_time_mean_millis)) {
+ metrics_proto.set_process_time_mean_millis(process_time_mean_millis);
+ }
+
+ double process_time_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "processTimeMs.std", &process_time_stddev_millis)) {
+ metrics_proto.set_process_time_stddev_millis(process_time_stddev_millis);
+ }
+
+ double timestamp_jitter_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "timestampJitterMs.mean", ×tamp_jitter_mean_millis)) {
+ metrics_proto.set_timestamp_jitter_mean_millis(timestamp_jitter_mean_millis);
+ }
+
+ double timestamp_jitter_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "timestampJitterMs.std", ×tamp_jitter_stddev_millis)) {
+ metrics_proto.set_timestamp_jitter_stddev_millis(timestamp_jitter_stddev_millis);
+ }
+
+ double latency_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "latencyMs.mean", &latency_mean_millis)) {
+ metrics_proto.set_latency_mean_millis(latency_mean_millis);
+ }
+
+ double latency_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "latencyMs.std", &latency_stddev_millis)) {
+ metrics_proto.set_latency_stddev_millis(latency_stddev_millis);
+ }
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
@@ -190,17 +188,50 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiothread_reported:"
+ << android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " type:" << type
+ << " framecount:" << framecount
+ << " samplerate:" << samplerate
+ << " work_millis_hist:" << work_millis_hist
+ << " latency_millis_hist:" << latency_millis_hist
+ << " warmup_millis_hist:" << warmup_millis_hist
+ << " underruns:" << underruns
+ << " overruns:" << overruns
+ << " active_millis:" << active_millis
+ << " duration_millis:" << duration_millis
+ << " id:" << id
+ << " port_id:" << port_id
+ << " sample_rate:" << sample_rate
+ << " channel_mask:" << channel_mask
+ << " encoding:" << encoding
+ << " frame_count:" << frame_count
+ << " output_device:" << output_device
+ << " input_device:" << input_device
+ << " io_jitter_mean_millis:" << io_jitter_mean_millis
+ << " io_jitter_stddev_millis:" << io_jitter_stddev_millis
+
+ << " process_time_mean_millis:" << process_time_mean_millis
+ << " process_time_stddev_millis:" << process_time_stddev_millis
+ << " timestamp_jitter_mean_millis:" << timestamp_jitter_mean_millis
+ << " timestamp_jitter_stddev_millis:" << timestamp_jitter_stddev_millis
+ << " latency_mean_millis:" << latency_mean_millis
+ << " latency_stddev_millis:" << latency_stddev_millis
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index fd809c8..fe269a1 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -38,16 +38,16 @@
namespace android {
-bool statsd_audiotrack(const mediametrics::Item *item)
+bool statsd_audiotrack(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -58,52 +58,52 @@
// static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
// optional string streamType;
- std::string streamtype;
- if (item->getString("android.media.audiotrack.streamtype", &streamtype)) {
- metrics_proto.set_stream_type(std::move(streamtype));
+ std::string stream_type;
+ if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
+ metrics_proto.set_stream_type(stream_type);
}
// static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
// optional string contentType;
- std::string contenttype;
- if (item->getString("android.media.audiotrack.type", &contenttype)) {
- metrics_proto.set_content_type(std::move(contenttype));
+ std::string content_type;
+ if (item->getString("android.media.audiotrack.type", &content_type)) {
+ metrics_proto.set_content_type(content_type);
}
// static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
// optional string trackUsage;
- std::string trackusage;
- if (item->getString("android.media.audiotrack.usage", &trackusage)) {
- metrics_proto.set_track_usage(std::move(trackusage));
+ std::string track_usage;
+ if (item->getString("android.media.audiotrack.usage", &track_usage)) {
+ metrics_proto.set_track_usage(track_usage);
}
// static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
// optional int32 samplerate;
- int32_t samplerate = -1;
- if (item->getInt32("android.media.audiotrack.samplerate", &samplerate)) {
- metrics_proto.set_sample_rate(samplerate);
+ int32_t sample_rate = -1;
+ if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+ metrics_proto.set_sample_rate(sample_rate);
}
// static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
// optional int64 channelMask;
- int64_t channelMask = -1;
- if (item->getInt64("android.media.audiotrack.channelmask", &channelMask)) {
- metrics_proto.set_channel_mask(channelMask);
+ int64_t channel_mask = -1;
+ if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+ metrics_proto.set_channel_mask(channel_mask);
}
// NB: These are not yet exposed as public Java API constants.
// static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
// optional int32 underrunframes;
- int32_t underrunframes = -1;
- if (item->getInt32("android.media.audiotrack.underrunframes", &underrunframes)) {
- metrics_proto.set_underrun_frames(underrunframes);
+ int32_t underrun_frames = -1;
+ if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+ metrics_proto.set_underrun_frames(underrun_frames);
}
// static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
// optional int32 startupglitch;
- int32_t startupglitch = -1;
- if (item->getInt32("android.media.audiotrack.glitch.startup", &startupglitch)) {
- metrics_proto.set_startup_glitch(startupglitch);
+ int32_t startup_glitch = -1;
+ if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
+ metrics_proto.set_startup_glitch(startup_glitch);
}
// portId (int32)
@@ -114,7 +114,7 @@
// encoding (string)
std::string encoding;
if (item->getString("android.media.audiotrack.encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
// frameCount (int32)
int32_t frame_count = -1;
@@ -124,7 +124,7 @@
// attributes (string)
std::string attributes;
if (item->getString("android.media.audiotrack.attributes", &attributes)) {
- metrics_proto.set_attributes(std::move(attributes));
+ metrics_proto.set_attributes(attributes);
}
std::string serialized;
@@ -137,21 +137,40 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
- const auto logSessionIdForStats =
+ const auto log_session_id =
mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized,
- logSessionIdForStats.c_str());
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized,
+ log_session_id.c_str());
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiotrack_reported:"
+ << android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " stream_type:" << stream_type
+ << " content_type:" << content_type
+ << " track_usage:" << track_usage
+ << " sample_rate:" << sample_rate
+ << " channel_mask:" << channel_mask
+ << " underrun_frames:" << underrun_frames
+ << " startup_glitch:" << startup_glitch
+ << " port_id:" << port_id
+ << " encoding:" << encoding
+ << " frame_count:" << frame_count
+ << " attributes:" << attributes
+
+ << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 1c5ab77..4539ad5 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -33,186 +33,317 @@
#include "cleaner.h"
#include "MediaMetricsService.h"
-#include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
+#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
namespace android {
-bool statsd_codec(const mediametrics::Item *item)
+bool statsd_codec(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
- ::android::stats::mediametrics::CodecData metrics_proto;
+ ::android::stats::mediametrics_message::CodecData metrics_proto;
// flesh out the protobuf we'll hand off with our data
//
// android.media.mediacodec.codec string
std::string codec;
if (item->getString("android.media.mediacodec.codec", &codec)) {
- metrics_proto.set_codec(std::move(codec));
+ metrics_proto.set_codec(codec);
}
- // android.media.mediacodec.mime string
+
std::string mime;
if (item->getString("android.media.mediacodec.mime", &mime)) {
- metrics_proto.set_mime(std::move(mime));
+ metrics_proto.set_mime(mime);
}
- // android.media.mediacodec.mode string
+
std::string mode;
if ( item->getString("android.media.mediacodec.mode", &mode)) {
- metrics_proto.set_mode(std::move(mode));
+ metrics_proto.set_mode(mode);
}
- // android.media.mediacodec.encoder int32
+
int32_t encoder = -1;
if ( item->getInt32("android.media.mediacodec.encoder", &encoder)) {
metrics_proto.set_encoder(encoder);
}
- // android.media.mediacodec.secure int32
+
int32_t secure = -1;
if ( item->getInt32("android.media.mediacodec.secure", &secure)) {
metrics_proto.set_secure(secure);
}
- // android.media.mediacodec.width int32
+
int32_t width = -1;
if ( item->getInt32("android.media.mediacodec.width", &width)) {
metrics_proto.set_width(width);
}
- // android.media.mediacodec.height int32
+
int32_t height = -1;
if ( item->getInt32("android.media.mediacodec.height", &height)) {
metrics_proto.set_height(height);
}
- // android.media.mediacodec.rotation-degrees int32
+
int32_t rotation = -1;
if ( item->getInt32("android.media.mediacodec.rotation-degrees", &rotation)) {
metrics_proto.set_rotation(rotation);
}
- // android.media.mediacodec.crypto int32 (although missing if not needed
+ // android.media.mediacodec.crypto int32 (although missing if not needed)
int32_t crypto = -1;
if ( item->getInt32("android.media.mediacodec.crypto", &crypto)) {
metrics_proto.set_crypto(crypto);
}
- // android.media.mediacodec.profile int32
+
int32_t profile = -1;
if ( item->getInt32("android.media.mediacodec.profile", &profile)) {
metrics_proto.set_profile(profile);
}
- // android.media.mediacodec.level int32
+
int32_t level = -1;
if ( item->getInt32("android.media.mediacodec.level", &level)) {
metrics_proto.set_level(level);
}
- // android.media.mediacodec.maxwidth int32
- int32_t maxwidth = -1;
- if ( item->getInt32("android.media.mediacodec.maxwidth", &maxwidth)) {
- metrics_proto.set_max_width(maxwidth);
+
+ int32_t max_width = -1;
+ if ( item->getInt32("android.media.mediacodec.maxwidth", &max_width)) {
+ metrics_proto.set_max_width(max_width);
}
- // android.media.mediacodec.maxheight int32
- int32_t maxheight = -1;
- if ( item->getInt32("android.media.mediacodec.maxheight", &maxheight)) {
- metrics_proto.set_max_height(maxheight);
+
+ int32_t max_height = -1;
+ if ( item->getInt32("android.media.mediacodec.maxheight", &max_height)) {
+ metrics_proto.set_max_height(max_height);
}
- // android.media.mediacodec.errcode int32
- int32_t errcode = -1;
- if ( item->getInt32("android.media.mediacodec.errcode", &errcode)) {
- metrics_proto.set_error_code(errcode);
+
+ int32_t error_code = -1;
+ if ( item->getInt32("android.media.mediacodec.errcode", &error_code)) {
+ metrics_proto.set_error_code(error_code);
}
- // android.media.mediacodec.errstate string
- std::string errstate;
- if ( item->getString("android.media.mediacodec.errstate", &errstate)) {
- metrics_proto.set_error_state(std::move(errstate));
+
+ std::string error_state;
+ if ( item->getString("android.media.mediacodec.errstate", &error_state)) {
+ metrics_proto.set_error_state(error_state);
}
- // android.media.mediacodec.latency.max int64
+
int64_t latency_max = -1;
if ( item->getInt64("android.media.mediacodec.latency.max", &latency_max)) {
metrics_proto.set_latency_max(latency_max);
}
- // android.media.mediacodec.latency.min int64
+
int64_t latency_min = -1;
if ( item->getInt64("android.media.mediacodec.latency.min", &latency_min)) {
metrics_proto.set_latency_min(latency_min);
}
- // android.media.mediacodec.latency.avg int64
+
int64_t latency_avg = -1;
if ( item->getInt64("android.media.mediacodec.latency.avg", &latency_avg)) {
metrics_proto.set_latency_avg(latency_avg);
}
- // android.media.mediacodec.latency.n int64
+
int64_t latency_count = -1;
if ( item->getInt64("android.media.mediacodec.latency.n", &latency_count)) {
metrics_proto.set_latency_count(latency_count);
}
- // android.media.mediacodec.latency.unknown int64
+
int64_t latency_unknown = -1;
if ( item->getInt64("android.media.mediacodec.latency.unknown", &latency_unknown)) {
metrics_proto.set_latency_unknown(latency_unknown);
}
- // android.media.mediacodec.queueSecureInputBufferError int32
- if (int32_t queueSecureInputBufferError = -1;
- item->getInt32("android.media.mediacodec.queueSecureInputBufferError",
- &queueSecureInputBufferError)) {
- metrics_proto.set_queue_secure_input_buffer_error(queueSecureInputBufferError);
+
+ int32_t queue_secure_input_buffer_error = -1;
+ if (item->getInt32("android.media.mediacodec.queueSecureInputBufferError",
+ &queue_secure_input_buffer_error)) {
+ metrics_proto.set_queue_secure_input_buffer_error(queue_secure_input_buffer_error);
}
- // android.media.mediacodec.queueInputBufferError int32
- if (int32_t queueInputBufferError = -1;
- item->getInt32("android.media.mediacodec.queueInputBufferError",
- &queueInputBufferError)) {
- metrics_proto.set_queue_input_buffer_error(queueInputBufferError);
+
+ int32_t queue_input_buffer_error = -1;
+ if (item->getInt32("android.media.mediacodec.queueInputBufferError",
+ &queue_input_buffer_error)) {
+ metrics_proto.set_queue_input_buffer_error(queue_input_buffer_error);
}
// android.media.mediacodec.latency.hist NOT EMITTED
- // android.media.mediacodec.bitrate_mode string
std::string bitrate_mode;
if (item->getString("android.media.mediacodec.bitrate_mode", &bitrate_mode)) {
- metrics_proto.set_bitrate_mode(std::move(bitrate_mode));
+ metrics_proto.set_bitrate_mode(bitrate_mode);
}
- // android.media.mediacodec.bitrate int32
+
int32_t bitrate = -1;
if (item->getInt32("android.media.mediacodec.bitrate", &bitrate)) {
metrics_proto.set_bitrate(bitrate);
}
- // android.media.mediacodec.lifetimeMs int64
- int64_t lifetimeMs = -1;
- if ( item->getInt64("android.media.mediacodec.lifetimeMs", &lifetimeMs)) {
- lifetimeMs = mediametrics::bucket_time_minutes(lifetimeMs);
- metrics_proto.set_lifetime_millis(lifetimeMs);
+
+ int64_t lifetime_millis = -1;
+ if (item->getInt64("android.media.mediacodec.lifetimeMs", &lifetime_millis)) {
+ lifetime_millis = mediametrics::bucket_time_minutes(lifetime_millis);
+ metrics_proto.set_lifetime_millis(lifetime_millis);
}
- // new for S; need to plumb through to westworld
- // android.media.mediacodec.channelCount int32
- // android.media.mediacodec.sampleRate int32
+ // android.media.mediacodec.channelCount
+ int32_t channelCount = -1;
+ if ( item->getInt32("android.media.mediacodec.channelCount", &channelCount)) {
+ metrics_proto.set_channel_count(channelCount);
+ }
- // new for S; need to plumb through to westworld
+ // android.media.mediacodec.sampleRate
+ int32_t sampleRate = -1;
+ if ( item->getInt32("android.media.mediacodec.sampleRate", &sampleRate)) {
+ metrics_proto.set_sample_rate(sampleRate);
+ }
+
// TODO PWG may want these fuzzed up a bit to obscure some precision
- // android.media.mediacodec.vencode.bytes int64
- // android.media.mediacodec.vencode.frames int64
- // android.media.mediacodec.vencode.durationUs int64
+ // android.media.mediacodec.vencode.bytes
+ int64_t bytes = -1;
+ if ( item->getInt64("android.media.mediacodec.vencode.bytes", &bytes)) {
+ metrics_proto.set_video_encode_bytes(bytes);
+ }
+
+ // android.media.mediacodec.vencode.frames
+ int64_t frames = -1;
+ if ( item->getInt64("android.media.mediacodec.vencode.frames", &frames)) {
+ metrics_proto.set_video_encode_frames(frames);
+ }
+
+ // android.media.mediacodec.vencode.durationUs
+ int64_t durationUs = -1;
+ if ( item->getInt64("android.media.mediacodec.vencode.durationUs", &durationUs)) {
+ metrics_proto.set_video_encode_duration_us(durationUs);
+ }
+
+ // android.media.mediacodec.color-format
+ int32_t colorFormat = -1;
+ if ( item->getInt32("android.media.mediacodec.color-format", &colorFormat)) {
+ metrics_proto.set_color_format(colorFormat);
+ }
+
+ // android.media.mediacodec.frame-rate
+ double frameRate = -1.0;
+ if ( item->getDouble("android.media.mediacodec.frame-rate", &frameRate)) {
+ metrics_proto.set_frame_rate(frameRate);
+ }
+
+ // android.media.mediacodec.capture-rate
+ double captureRate = -1.0;
+ if ( item->getDouble("android.media.mediacodec.capture-rate", &captureRate)) {
+ metrics_proto.set_capture_rate(captureRate);
+ }
+
+ // android.media.mediacodec.operating-rate
+ double operatingRate = -1.0;
+ if ( item->getDouble("android.media.mediacodec.operating-rate", &operatingRate)) {
+ metrics_proto.set_operating_rate(operatingRate);
+ }
+
+ // android.media.mediacodec.priority
+ int32_t priority = -1;
+ if ( item->getInt32("android.media.mediacodec.priority", &priority)) {
+ metrics_proto.set_priority(priority);
+ }
+
+ // android.media.mediacodec.video-qp-i-min
+ int32_t qpIMin = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-i-min", &qpIMin)) {
+ metrics_proto.set_video_qp_i_min(qpIMin);
+ }
+
+ // android.media.mediacodec.video-qp-i-max
+ int32_t qpIMax = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-i-max", &qpIMax)) {
+ metrics_proto.set_video_qp_i_max(qpIMax);
+ }
+
+ // android.media.mediacodec.video-qp-p-min
+ int32_t qpPMin = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-p-min", &qpPMin)) {
+ metrics_proto.set_video_qp_p_min(qpPMin);
+ }
+
+ // android.media.mediacodec.video-qp-p-max
+ int32_t qpPMax = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-p-max", &qpPMax)) {
+ metrics_proto.set_video_qp_p_max(qpPMax);
+ }
+
+ // android.media.mediacodec.video-qp-b-min
+ int32_t qpBMin = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-b-min", &qpBMin)) {
+ metrics_proto.set_video_qp_b_min(qpIMin);
+ }
+
+ // android.media.mediacodec.video-qp-b-max
+ int32_t qpBMax = -1;
+ if ( item->getInt32("android.media.mediacodec.video-qp-b-max", &qpBMax)) {
+ metrics_proto.set_video_qp_b_max(qpBMax);
+ }
+
+ // android.media.mediacodec.video.input.bytes
+ int64_t inputBytes = -1;
+ if ( item->getInt64("android.media.mediacodec.video.input.bytes", &inputBytes)) {
+ metrics_proto.set_video_input_bytes(inputBytes);
+ }
+
+ // android.media.mediacodec.video.input.frames
+ int64_t inputFrames = -1;
+ if ( item->getInt64("android.media.mediacodec.video.input.frames", &inputFrames)) {
+ metrics_proto.set_video_input_frames(inputFrames);
+ }
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize codec metrics");
return false;
}
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_CODEC_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_codec_reported:"
+ << android::util::MEDIAMETRICS_CODEC_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_CODEC_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ << " codec:" << codec
+ << " mime:" << mime
+ << " mode:" << mode
+ << " encoder:" << encoder
+ << " secure:" << secure
+ << " width:" << width
+ << " height:" << height
+ << " rotation:" << rotation
+ << " crypto:" << crypto
+ << " profile:" << profile
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " level:" << level
+ << " max_width:" << max_width
+ << " max_height:" << max_height
+ << " error_code:" << error_code
+ << " error_state:" << error_state
+ << " latency_max:" << latency_max
+ << " latency_min:" << latency_min
+ << " latency_avg:" << latency_avg
+ << " latency_count:" << latency_count
+ << " latency_unknown:" << latency_unknown
+ << " queue_input_buffer_error:" << queue_input_buffer_error
+ << " queue_secure_input_buffer_error:" << queue_secure_input_buffer_error
+ << " bitrate_mode:" << bitrate_mode
+ << " bitrate:" << bitrate
+ << " lifetime_millis:" << lifetime_millis
+ // TODO: add when log_session_id is merged.
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_CODEC_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 071c549..287fb8d 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -32,6 +32,7 @@
#include <pwd.h>
#include "MediaMetricsService.h"
+#include "StringUtils.h"
#include "iface_statsd.h"
#include <statslog.h>
@@ -43,53 +44,67 @@
namespace android {
// mediadrm
-bool statsd_mediadrm(const mediametrics::Item *item)
+bool statsd_mediadrm(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
std::string vendor;
(void) item->getString("vendor", &vendor);
std::string description;
(void) item->getString("description", &description);
- if (enabled_statsd) {
- // This field is left here for backward compatibility.
- // This field is not used anymore.
- const std::string kUnusedField("unused");
- android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
- android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- vendor.c_str(),
- description.c_str(),
- bf_serialized);
- } else {
- ALOGV("NOT sending: mediadrm data(%s, %s)", vendor.c_str(), description.c_str());
+ std::string serialized_metrics;
+ (void) item->getString("serialized_metrics", &serialized_metrics);
+ if (serialized_metrics.empty()) {
+ ALOGD("statsd_mediadrm skipping empty entry");
+ return false;
}
+ // This field is left here for backward compatibility.
+ // This field is not used anymore.
+ const std::string kUnusedField("");
+ android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ vendor.c_str(),
+ description.c_str(),
+ bf_serialized);
+
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_mediadrm_reported:"
+ << android::util::MEDIAMETRICS_MEDIADRM_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+
+ << " vendor:" << vendor
+ << " description:" << description
+ // omitting serialized
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_MEDIADRM_REPORTED, log.str());
return true;
}
// drmmanager
-bool statsd_drmmanager(const mediametrics::Item *item)
+bool statsd_drmmanager(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
using namespace std::string_literals;
if (item == nullptr) return false;
- if (!enabled_statsd) {
- ALOGV("NOT sending: drmmanager data");
- return true;
- }
-
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
std::string plugin_id;
(void) item->getString("plugin_id", &plugin_id);
@@ -107,8 +122,9 @@
item->getInt64(("method"s + std::to_string(i)).c_str(), &methodCounts[i]);
}
- android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode, mediaApexVersion,
+ const int result = android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
plugin_id.c_str(), description.c_str(),
method_id, mime_types.c_str(),
methodCounts[0], methodCounts[1], methodCounts[2],
@@ -117,6 +133,25 @@
methodCounts[9], methodCounts[10], methodCounts[11],
methodCounts[12]);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_drmmanager_reported:"
+ << android::util::MEDIAMETRICS_DRMMANAGER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+
+ << " plugin_id:" << plugin_id
+ << " description:" << description
+ << " method_id:" << method_id
+ << " mime_types:" << mime_types;
+
+ for (size_t i = 0; i < methodCounts.size(); ++i) {
+ log << " method_" << i << ":" << methodCounts[i];
+ }
+ log << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED, log.str());
return true;
}
@@ -144,17 +179,14 @@
} // namespace
// |out| and its contents are memory-managed by statsd.
-bool statsd_mediadrm_puller(const mediametrics::Item* item, AStatsEventList* out)
+bool statsd_mediadrm_puller(
+ const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) {
return false;
}
- if (!enabled_statsd) {
- ALOGV("NOT pulling: mediadrm activity");
- return true;
- }
-
std::string serialized_metrics;
(void) item->getString("serialized_metrics", &serialized_metrics);
const auto framework_raw(base64DecodeNoPad(serialized_metrics));
@@ -163,6 +195,11 @@
(void) item->getString("plugin_metrics", &plugin_metrics);
const auto plugin_raw(base64DecodeNoPad(plugin_metrics));
+ if (serialized_metrics.size() == 0 && plugin_metrics.size() == 0) {
+ ALOGD("statsd_mediadrm_puller skipping empty entry");
+ return false;
+ }
+
std::string vendor;
(void) item->getString("vendor", &vendor);
std::string description;
@@ -178,6 +215,19 @@
AStatsEvent_writeByteArray(event, framework_raw.data(), framework_raw.size());
AStatsEvent_writeByteArray(event, plugin_raw.data(), plugin_raw.size());
AStatsEvent_build(event);
+
+ std::stringstream log;
+ log << "pulled:" << " {"
+ << " media_drm_activity_info:"
+ << android::util::MEDIA_DRM_ACTIVITY_INFO
+ << " package_name:" << item->getPkgName()
+ << " package_version_code:" << item->getPkgVersionCode()
+ << " vendor:" << vendor
+ << " description:" << description
+ << " framework_metrics:" << mediametrics::stringutils::bytesToString(framework_raw, 8)
+ << " vendor_metrics:" << mediametrics::stringutils::bytesToString(plugin_raw, 8)
+ << " }";
+ statsdLog->log(android::util::MEDIA_DRM_ACTIVITY_INFO, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index 4180e0c..e228f07 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_extractor(const mediametrics::Item *item)
+bool statsd_extractor(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -55,26 +55,25 @@
// flesh out the protobuf we'll hand off with our data
//
- // android.media.mediaextractor.fmt string
- std::string fmt;
- if (item->getString("android.media.mediaextractor.fmt", &fmt)) {
- metrics_proto.set_format(std::move(fmt));
- }
- // android.media.mediaextractor.mime string
- std::string mime;
- if (item->getString("android.media.mediaextractor.mime", &mime)) {
- metrics_proto.set_mime(std::move(mime));
- }
- // android.media.mediaextractor.ntrk int32
- int32_t ntrk = -1;
- if (item->getInt32("android.media.mediaextractor.ntrk", &ntrk)) {
- metrics_proto.set_tracks(ntrk);
+ std::string format;
+ if (item->getString("android.media.mediaextractor.fmt", &format)) {
+ metrics_proto.set_format(format);
}
- // android.media.mediaextractor.entry string
+ std::string mime;
+ if (item->getString("android.media.mediaextractor.mime", &mime)) {
+ metrics_proto.set_mime(mime);
+ }
+
+ int32_t tracks = -1;
+ if (item->getInt32("android.media.mediaextractor.ntrk", &tracks)) {
+ metrics_proto.set_tracks(tracks);
+ }
+
std::string entry_point_string;
+ stats::mediametrics::ExtractorData::EntryPoint entry_point =
+ stats::mediametrics::ExtractorData_EntryPoint_OTHER;
if (item->getString("android.media.mediaextractor.entry", &entry_point_string)) {
- stats::mediametrics::ExtractorData::EntryPoint entry_point;
if (entry_point_string == "sdk") {
entry_point = stats::mediametrics::ExtractorData_EntryPoint_SDK;
} else if (entry_point_string == "ndk-with-jvm") {
@@ -93,17 +92,30 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_extractor_reported:"
+ << android::util::MEDIAMETRICS_EXTRACTOR_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " format:" << format
+ << " mime:" << mime
+ << " tracks:" << tracks
+ << " entry_point:" << entry_point_string << "(" << entry_point << ")"
+ // TODO: Add MediaExtractor log_session_id
+ // << " log_session_id:" << log_session_id
+
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 262b2ae..6cceb06 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -36,16 +36,15 @@
namespace android {
-bool statsd_mediaparser(const mediametrics::Item *item)
+bool statsd_mediaparser(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
- if (item == nullptr) {
- return false;
- }
+ static constexpr bool enabled_statsd = true; // TODO: Remove, dup with dump2StatsdInternal().
+ if (item == nullptr) return false;
- // statsd wrapper data.
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
std::string parserName;
item->getString("android.media.mediaparser.parserName", &parserName);
@@ -80,11 +79,14 @@
int32_t videoHeight = -1;
item->getInt32("android.media.mediaparser.videoHeight", &videoHeight);
+ std::string logSessionId;
+ item->getString("android.media.mediaparser.logSessionId", &logSessionId);
+
if (enabled_statsd) {
(void) android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
- timestamp,
- pkgName.c_str(),
- pkgVersionCode,
+ timestamp_nanos,
+ package_name.c_str(),
+ package_version_code,
parserName.c_str(),
createdByName,
parserPool.c_str(),
@@ -95,11 +97,32 @@
trackCodecs.c_str(),
alteredParameters.c_str(),
videoWidth,
- videoHeight);
+ videoHeight,
+ logSessionId.c_str());
} else {
ALOGV("NOT sending MediaParser media metrics.");
}
-
+ std::stringstream log;
+ log << "result:" << "(result)" << " {"
+ << " mediametrics_mediaparser_reported:"
+ << android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " parser_name:" << parserName
+ << " created_by_name:" << createdByName
+ << " parser_pool:" << parserPool
+ << " last_exception:" << lastException
+ << " resource_byte_count:" << resourceByteCount
+ << " duration_millis:" << durationMillis
+ << " track_mime_types:" << trackMimeTypes
+ << " track_codecs:" << trackCodecs
+ << " altered_parameters:" << alteredParameters
+ << " video_width:" << videoWidth
+ << " video_height:" << videoHeight
+ << " log_session_id:" << logSessionId
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_nuplayer.cpp b/services/mediametrics/statsd_nuplayer.cpp
index a8d0f55..33da81e 100644
--- a/services/mediametrics/statsd_nuplayer.cpp
+++ b/services/mediametrics/statsd_nuplayer.cpp
@@ -41,16 +41,16 @@
* handles nuplayer AND nuplayer2
* checks for the union of what the two players generate
*/
-bool statsd_nuplayer(const mediametrics::Item *item)
+bool statsd_nuplayer(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -60,15 +60,16 @@
//
// differentiate between nuplayer and nuplayer2
- metrics_proto.set_whichplayer(item->getKey().c_str());
+ std::string whichPlayer = item->getKey();
+ metrics_proto.set_whichplayer(whichPlayer.c_str());
std::string video_mime;
if (item->getString("android.media.mediaplayer.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(std::move(video_mime));
+ metrics_proto.set_video_mime(video_mime);
}
std::string video_codec;
if (item->getString("android.media.mediaplayer.video.codec", &video_codec)) {
- metrics_proto.set_video_codec(std::move(video_codec));
+ metrics_proto.set_video_codec(video_codec);
}
int32_t width = -1;
@@ -92,32 +93,32 @@
if (item->getInt64("android.media.mediaplayer.startupdropped", &frames_dropped_startup)) {
metrics_proto.set_frames_dropped_startup(frames_dropped_startup);
}
- double fps = -1.0;
- if (item->getDouble("android.media.mediaplayer.fps", &fps)) {
- metrics_proto.set_framerate(fps);
+ double framerate = -1.0;
+ if (item->getDouble("android.media.mediaplayer.fps", &framerate)) {
+ metrics_proto.set_framerate(framerate);
}
std::string audio_mime;
if (item->getString("android.media.mediaplayer.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(std::move(audio_mime));
+ metrics_proto.set_audio_mime(audio_mime);
}
std::string audio_codec;
if (item->getString("android.media.mediaplayer.audio.codec", &audio_codec)) {
- metrics_proto.set_audio_codec(std::move(audio_codec));
+ metrics_proto.set_audio_codec(audio_codec);
}
- int64_t duration_ms = -1;
- if (item->getInt64("android.media.mediaplayer.durationMs", &duration_ms)) {
- metrics_proto.set_duration_millis(duration_ms);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
- int64_t playing_ms = -1;
- if (item->getInt64("android.media.mediaplayer.playingMs", &playing_ms)) {
- metrics_proto.set_playing_millis(playing_ms);
+ int64_t playing_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.playingMs", &playing_millis)) {
+ metrics_proto.set_playing_millis(playing_millis);
}
- int32_t err = -1;
- if (item->getInt32("android.media.mediaplayer.err", &err)) {
- metrics_proto.set_error(err);
+ int32_t error = -1;
+ if (item->getInt32("android.media.mediaplayer.err", &error)) {
+ metrics_proto.set_error(error);
}
int32_t error_code = -1;
if (item->getInt32("android.media.mediaplayer.errcode", &error_code)) {
@@ -125,45 +126,74 @@
}
std::string error_state;
if (item->getString("android.media.mediaplayer.errstate", &error_state)) {
- metrics_proto.set_error_state(std::move(error_state));
+ metrics_proto.set_error_state(error_state);
}
std::string data_source_type;
if (item->getString("android.media.mediaplayer.dataSource", &data_source_type)) {
- metrics_proto.set_data_source_type(std::move(data_source_type));
+ metrics_proto.set_data_source_type(data_source_type);
}
- int64_t rebufferingMs = -1;
- if (item->getInt64("android.media.mediaplayer.rebufferingMs", &rebufferingMs)) {
- metrics_proto.set_rebuffering_millis(rebufferingMs);
+ int64_t rebuffering_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.rebufferingMs", &rebuffering_millis)) {
+ metrics_proto.set_rebuffering_millis(rebuffering_millis);
}
int32_t rebuffers = -1;
if (item->getInt32("android.media.mediaplayer.rebuffers", &rebuffers)) {
metrics_proto.set_rebuffers(rebuffers);
}
- int32_t rebufferExit = -1;
- if (item->getInt32("android.media.mediaplayer.rebufferExit", &rebufferExit)) {
- metrics_proto.set_rebuffer_at_exit(rebufferExit);
+ int32_t rebuffer_at_exit = -1;
+ if (item->getInt32("android.media.mediaplayer.rebufferExit", &rebuffer_at_exit)) {
+ metrics_proto.set_rebuffer_at_exit(rebuffer_at_exit);
}
-
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize nuplayer metrics");
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_NUPLAYER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_NUPLAYER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_nuplayer_reported:"
+ << android::util::MEDIAMETRICS_NUPLAYER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+ << " whichPlayer:" << whichPlayer
+ << " video_mime:" << video_mime
+ << " video_codec:" << video_codec
+ << " width:" << width
+ << " height:" << height
+ << " frames:" << frames
+ << " frames_dropped:" << frames_dropped
+ << " framerate:" << framerate
+ << " audio_mime:" << audio_mime
+ << " audio_codec:" << media_apex_version
+
+ << " duration_millis:" << duration_millis
+ << " playing_millis:" << playing_millis
+ << " error:" << error
+ << " error_code:" << error_code
+ << " error_state:" << error_state
+ << " data_source_type:" << data_source_type
+ << " rebuffering_millis:" << rebuffering_millis
+ << " rebuffers:" << rebuffers
+ << " rebuffer_at_exit:" << rebuffer_at_exit
+ << " frames_dropped_startup:" << frames_dropped_startup
+
+ // TODO NuPlayer - add log_session_id
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_NUPLAYER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index 2e5ada4..23b884f 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_recorder(const mediametrics::Item *item)
+bool statsd_recorder(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -58,22 +58,22 @@
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
std::string audio_mime;
if (item->getString("android.media.mediarecorder.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(std::move(audio_mime));
+ metrics_proto.set_audio_mime(audio_mime);
}
// string kRecorderVideoMime = "android.media.mediarecorder.video.mime";
std::string video_mime;
if (item->getString("android.media.mediarecorder.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(std::move(video_mime));
+ metrics_proto.set_video_mime(video_mime);
}
// int32 kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
- int32_t videoProfile = -1;
- if (item->getInt32("android.media.mediarecorder.video-encoder-profile", &videoProfile)) {
- metrics_proto.set_video_profile(videoProfile);
+ int32_t video_profile = -1;
+ if (item->getInt32("android.media.mediarecorder.video-encoder-profile", &video_profile)) {
+ metrics_proto.set_video_profile(video_profile);
}
// int32 kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
- int32_t videoLevel = -1;
- if (item->getInt32("android.media.mediarecorder.video-encoder-level", &videoLevel)) {
- metrics_proto.set_video_level(videoLevel);
+ int32_t video_level = -1;
+ if (item->getInt32("android.media.mediarecorder.video-encoder-level", &video_level)) {
+ metrics_proto.set_video_level(video_level);
}
// int32 kRecorderWidth = "android.media.mediarecorder.width";
int32_t width = -1;
@@ -97,73 +97,73 @@
}
// int32 kRecorderCaptureFps = "android.media.mediarecorder.capture-fps";
- int32_t captureFps = -1;
- if (item->getInt32("android.media.mediarecorder.capture-fps", &captureFps)) {
- metrics_proto.set_capture_fps(captureFps);
+ int32_t capture_fps = -1;
+ if (item->getInt32("android.media.mediarecorder.capture-fps", &capture_fps)) {
+ metrics_proto.set_capture_fps(capture_fps);
}
// double kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
- double captureFpsEnable = -1;
- if (item->getDouble("android.media.mediarecorder.capture-fpsenable", &captureFpsEnable)) {
- metrics_proto.set_capture_fps_enable(captureFpsEnable);
+ double capture_fps_enable = -1;
+ if (item->getDouble("android.media.mediarecorder.capture-fpsenable", &capture_fps_enable)) {
+ metrics_proto.set_capture_fps_enable(capture_fps_enable);
}
// int64 kRecorderDurationMs = "android.media.mediarecorder.durationMs";
- int64_t durationMs = -1;
- if (item->getInt64("android.media.mediarecorder.durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.mediarecorder.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
// int64 kRecorderPaused = "android.media.mediarecorder.pausedMs";
- int64_t pausedMs = -1;
- if (item->getInt64("android.media.mediarecorder.pausedMs", &pausedMs)) {
- metrics_proto.set_paused_millis(pausedMs);
+ int64_t paused_millis = -1;
+ if (item->getInt64("android.media.mediarecorder.pausedMs", &paused_millis)) {
+ metrics_proto.set_paused_millis(paused_millis);
}
// int32 kRecorderNumPauses = "android.media.mediarecorder.NPauses";
- int32_t pausedCount = -1;
- if (item->getInt32("android.media.mediarecorder.NPauses", &pausedCount)) {
- metrics_proto.set_paused_count(pausedCount);
+ int32_t paused_count = -1;
+ if (item->getInt32("android.media.mediarecorder.NPauses", &paused_count)) {
+ metrics_proto.set_paused_count(paused_count);
}
// int32 kRecorderAudioBitrate = "android.media.mediarecorder.audio-bitrate";
- int32_t audioBitrate = -1;
- if (item->getInt32("android.media.mediarecorder.audio-bitrate", &audioBitrate)) {
- metrics_proto.set_audio_bitrate(audioBitrate);
+ int32_t audio_bitrate = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-bitrate", &audio_bitrate)) {
+ metrics_proto.set_audio_bitrate(audio_bitrate);
}
// int32 kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
- int32_t audioChannels = -1;
- if (item->getInt32("android.media.mediarecorder.audio-channels", &audioChannels)) {
- metrics_proto.set_audio_channels(audioChannels);
+ int32_t audio_channels = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-channels", &audio_channels)) {
+ metrics_proto.set_audio_channels(audio_channels);
}
// int32 kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
- int32_t audioSampleRate = -1;
- if (item->getInt32("android.media.mediarecorder.audio-samplerate", &audioSampleRate)) {
- metrics_proto.set_audio_samplerate(audioSampleRate);
+ int32_t audio_samplerate = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-samplerate", &audio_samplerate)) {
+ metrics_proto.set_audio_samplerate(audio_samplerate);
}
// int32 kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
- int32_t movieTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.movie-timescale", &movieTimescale)) {
- metrics_proto.set_movie_timescale(movieTimescale);
+ int32_t movie_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.movie-timescale", &movie_timescale)) {
+ metrics_proto.set_movie_timescale(movie_timescale);
}
// int32 kRecorderAudioTimescale = "android.media.mediarecorder.audio-timescale";
- int32_t audioTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.audio-timescale", &audioTimescale)) {
- metrics_proto.set_audio_timescale(audioTimescale);
+ int32_t audio_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-timescale", &audio_timescale)) {
+ metrics_proto.set_audio_timescale(audio_timescale);
}
// int32 kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
- int32_t videoTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.video-timescale", &videoTimescale)) {
- metrics_proto.set_video_timescale(videoTimescale);
+ int32_t video_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.video-timescale", &video_timescale)) {
+ metrics_proto.set_video_timescale(video_timescale);
}
// int32 kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
- int32_t videoBitRate = -1;
- if (item->getInt32("android.media.mediarecorder.video-bitrate", &videoBitRate)) {
- metrics_proto.set_video_bitrate(videoBitRate);
+ int32_t video_bitrate = -1;
+ if (item->getInt32("android.media.mediarecorder.video-bitrate", &video_bitrate)) {
+ metrics_proto.set_video_bitrate(video_bitrate);
}
// int32 kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
- int32_t iFrameInterval = -1;
- if (item->getInt32("android.media.mediarecorder.video-iframe-interval", &iFrameInterval)) {
- metrics_proto.set_iframe_interval(iFrameInterval);
+ int32_t iframe_interval = -1;
+ if (item->getInt32("android.media.mediarecorder.video-iframe-interval", &iframe_interval)) {
+ metrics_proto.set_iframe_interval(iframe_interval);
}
std::string serialized;
@@ -172,17 +172,47 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_RECORDER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_RECORDER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_recorder_reported:"
+ << android::util::MEDIAMETRICS_RECORDER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " audio_mime:" << audio_mime
+ << " video_mime:" << video_mime
+ << " video_profile:" << video_profile
+ << " video_level:" << video_level
+ << " width:" << width
+ << " height:" << height
+ << " rotation:" << rotation
+ << " framerate:" << framerate
+ << " capture_fps:" << capture_fps
+ << " capture_fps_enable:" << capture_fps_enable
+ << " duration_millis:" << duration_millis
+ << " paused_millis:" << paused_millis
+ << " paused_count:" << paused_count
+ << " audio_bitrate:" << audio_bitrate
+ << " audio_channels:" << audio_channels
+ << " audio_samplerate:" << audio_samplerate
+ << " movie_timescale:" << movie_timescale
+ << " audio_timescale:" << audio_timescale
+ << " video_timescale:" << video_timescale
+ << " video_bitrate:" << video_bitrate
+
+ << " iframe_interval:" << iframe_interval
+ // TODO Recorder - add log_session_id
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_RECORDER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index ac9c7fa..2336d6f 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -809,7 +809,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
// untrusted entities cannot create a new key.
ASSERT_EQ(PERMISSION_DENIED, audioAnalytics.submit(item, false /* isTrusted */));
@@ -817,7 +819,7 @@
// TODO: Verify contents of AudioAnalytics.
// Currently there is no getter API in AudioAnalytics besides dump.
- ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
+ ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
@@ -845,7 +847,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
// untrusted entities cannot create a new key.
ASSERT_EQ(PERMISSION_DENIED, audioAnalytics.submit(item, false /* isTrusted */));
@@ -853,7 +857,7 @@
// TODO: Verify contents of AudioAnalytics.
// Currently there is no getter API in AudioAnalytics besides dump.
- ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
+ ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
@@ -877,7 +881,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 926de3e..db61061 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -74,6 +74,9 @@
"ResourceManagerService.cpp",
"ResourceObserverService.cpp",
"ServiceLog.cpp",
+
+ // TODO: convert to AIDL?
+ "IMediaResourceMonitor.cpp",
],
shared_libs: [
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.cpp b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
new file mode 100644
index 0000000..42d7feb
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IMediaResourceMonitor.h"
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <sys/types.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class BpMediaResourceMonitor : public BpInterface<IMediaResourceMonitor> {
+public:
+ explicit BpMediaResourceMonitor(const sp<IBinder>& impl)
+ : BpInterface<IMediaResourceMonitor>(impl) {}
+
+ virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaResourceMonitor::getInterfaceDescriptor());
+ data.writeInt32(pid);
+ data.writeInt32(type);
+ remote()->transact(NOTIFY_RESOURCE_GRANTED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(MediaResourceMonitor, "android.media.IMediaResourceMonitor")
+
+// ----------------------------------------------------------------------
+
+// NOLINTNEXTLINE(google-default-arguments)
+status_t BnMediaResourceMonitor::onTransact( uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags) {
+ switch(code) {
+ case NOTIFY_RESOURCE_GRANTED: {
+ CHECK_INTERFACE(IMediaResourceMonitor, data, reply);
+ int32_t pid = data.readInt32();
+ const int32_t type = data.readInt32();
+ notifyResourceGranted(/*in*/ pid, /*in*/ type);
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+// ----------------------------------------------------------------------
+
+} // namespace android
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.h b/services/mediaresourcemanager/IMediaResourceMonitor.h
new file mode 100644
index 0000000..f92d557
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef __ANDROID_VNDK__
+
+#include <binder/IInterface.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class IMediaResourceMonitor : public IInterface {
+public:
+ DECLARE_META_INTERFACE(MediaResourceMonitor)
+
+ // Values should be in sync with Intent.EXTRA_MEDIA_RESOURCE_TYPE_XXX.
+ enum {
+ TYPE_VIDEO_CODEC = 0,
+ TYPE_AUDIO_CODEC = 1,
+ };
+
+ virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type) = 0;
+
+ enum {
+ NOTIFY_RESOURCE_GRANTED = IBinder::FIRST_CALL_TRANSACTION,
+ };
+};
+
+// ----------------------------------------------------------------------
+
+class BnMediaResourceMonitor : public BnInterface<IMediaResourceMonitor> {
+public:
+ // NOLINTNEXTLINE(google-default-arguments)
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------
+
+} // namespace android
+
+#else // __ANDROID_VNDK__
+#error "This header is not visible to vendors"
+#endif // __ANDROID_VNDK__
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 289cffd..953686b 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -21,7 +21,6 @@
#include <android/binder_manager.h>
#include <android/binder_process.h>
-#include <binder/IMediaResourceMonitor.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <cutils/sched_policy.h>
@@ -36,6 +35,7 @@
#include <sys/time.h>
#include <unistd.h>
+#include "IMediaResourceMonitor.h"
#include "ResourceManagerService.h"
#include "ResourceObserverService.h"
#include "ServiceLog.h"
diff --git a/services/mediatranscoding/MediaTranscodingService.cpp b/services/mediatranscoding/MediaTranscodingService.cpp
index 8b64134..b80fe57 100644
--- a/services/mediatranscoding/MediaTranscodingService.cpp
+++ b/services/mediatranscoding/MediaTranscodingService.cpp
@@ -131,10 +131,10 @@
void MediaTranscodingService::instantiate() {
std::shared_ptr<MediaTranscodingService> service =
::ndk::SharedRefBase::make<MediaTranscodingService>();
- binder_status_t status =
- AServiceManager_addService(service->asBinder().get(), getServiceName());
- if (status != STATUS_OK) {
- return;
+ if (__builtin_available(android __TRANSCODING_MIN_API__, *)) {
+ // Once service is started, we want it to stay even is client side perished.
+ AServiceManager_forceLazyServicesPersist(true /*persist*/);
+ (void)AServiceManager_registerLazyService(service->asBinder().get(), getServiceName());
}
}
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index 4df5a9f..cb180ec 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -25,6 +25,7 @@
],
shared_libs: [
+ "libactivitymanager_aidl",
"libbinder",
"libbinder_ndk",
"liblog",
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 3f7d8d6..0cb2fad 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -434,6 +434,34 @@
session.request.destinationFilePath == destinationFilePath));
}
+ template <bool expectation = success>
+ bool addClientUid(int32_t sessionId, uid_t clientUid) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ Status status = mClient->addClientUid(sessionId, clientUid, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+
+ return status.isOk() && (result == shouldSucceed);
+ }
+
+ template <bool expectation = success>
+ bool getClientUids(int32_t sessionId, std::vector<int32_t>* clientUids) {
+ constexpr bool shouldSucceed = (expectation == success);
+ std::optional<std::vector<int32_t>> aidl_return;
+ Status status = mClient->getClientUids(sessionId, &aidl_return);
+
+ EXPECT_TRUE(status.isOk());
+ bool success = (aidl_return != std::nullopt);
+ if (success) {
+ *clientUids = *aidl_return;
+ }
+ EXPECT_EQ(success, shouldSucceed);
+
+ return status.isOk() && (success == shouldSucceed);
+ }
+
int32_t mClientId;
pid_t mClientPid;
uid_t mClientUid;
@@ -453,7 +481,7 @@
// Need thread pool to receive callbacks, otherwise oneway callbacks are
// silently ignored.
ABinderProcess_startThreadPool();
- ::ndk::SpAIBinder binder(AServiceManager_getService("media.transcoding"));
+ ::ndk::SpAIBinder binder(AServiceManager_waitForService("media.transcoding"));
mService = IMediaTranscodingService::fromBinder(binder);
if (mService == nullptr) {
ALOGE("Failed to connect to the media.trascoding service.");
@@ -500,8 +528,24 @@
EXPECT_TRUE(mClient3->unregisterClient().isOk());
}
+ const char* prepareOutputFile(const char* path) {
+ deleteFile(path);
+ return path;
+ }
+
void deleteFile(const char* path) { unlink(path); }
+ void dismissKeyguard() {
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ }
+
+ void stopAppPackages() {
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ }
+
std::shared_ptr<IMediaTranscodingService> mService;
std::shared_ptr<TestClientCallback> mClient1;
std::shared_ptr<TestClientCallback> mClient2;
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
index 0550d77..e9eebe2 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
@@ -33,7 +33,7 @@
namespace media {
-constexpr int64_t kPaddingUs = 400000;
+constexpr int64_t kPaddingUs = 1000000;
constexpr int64_t kSessionWithPaddingUs = 10000000 + kPaddingUs;
constexpr int32_t kBitRate = 8 * 1000 * 1000; // 8Mbs
@@ -56,8 +56,7 @@
registerMultipleClients();
const char* srcPath = "bad_file_uri";
- const char* dstPath = OUTPATH(TestInvalidSource);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestInvalidSource));
// Submit one session.
EXPECT_TRUE(
@@ -73,8 +72,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestPassthru) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestPassthru);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestPassthru));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath));
@@ -89,8 +87,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideo) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestTranscodeVideo);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideo));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -106,8 +103,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideoProgress) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestTranscodeVideoProgress);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideoProgress));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -134,11 +130,9 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestCancelImmediately_Session0);
- const char* dstPath1 = OUTPATH(TestCancelImmediately_Session1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session1));
- deleteFile(dstPath0);
- deleteFile(dstPath1);
// Submit one session, should start immediately.
EXPECT_TRUE(
mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -166,11 +160,9 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestCancelWhileRunning_Session0);
- const char* dstPath1 = OUTPATH(TestCancelWhileRunning_Session1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session1));
- deleteFile(dstPath0);
- deleteFile(dstPath1);
// Submit two sessions, session 0 should start immediately, session 1 should be queued.
EXPECT_TRUE(
mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -197,10 +189,8 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestPauseResumeSingleClient_Session0);
- const char* dstPath1 = OUTPATH(TestPauseResumeSingleClient_Session1);
- deleteFile(dstPath0);
- deleteFile(dstPath1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session1));
// Submit one offline session, should start immediately.
EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kUnspecified,
@@ -244,20 +234,15 @@
TEST_F(MediaTranscodingServiceRealTest, TestPauseResumeMultiClients) {
ALOGD("TestPauseResumeMultiClients starting...");
- EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
- EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ dismissKeyguard();
+ stopAppPackages();
registerMultipleClients();
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
- const char* dstPath1 = OUTPATH(TestPauseResumeMultiClients_Client1);
- deleteFile(dstPath0);
- deleteFile(dstPath1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client1));
ALOGD("Moving app A to top...");
EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
@@ -294,12 +279,177 @@
unregisterMultipleClients();
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ stopAppPackages();
ALOGD("TestPauseResumeMultiClients finished.");
}
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForeground) {
+ ALOGD("TestUidGoneForeground starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill foreground app, using only 1 uid.
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit sessions to Client1 (app A).
+ ALOGD("Submitting sessions to client1 (app A) ...");
+ EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::NoEvent);
+
+ // Kill app A, expect to see A's session pause followed by B's session start,
+ // then A's session cancelled with error code kUidGoneCancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneForeground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForegroundMultiUids) {
+ ALOGD("TestUidGoneForegroundMultiUids starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill foreground app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+ // Make app A also requesting session 1.
+ EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+ // Kill app A, CLIENT(2)'s session 1 should continue because it's also requested by app B.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+ // Kill app B, sessions should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneForegroundMultiUids finished.");
+}
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackground) {
+ ALOGD("TestUidGoneBackground starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill background app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Kill app B, all its sessions should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneBackground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackgroundMultiUids) {
+ ALOGD("TestUidGoneBackgroundMultiUids starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill background app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+ // Make app A also requesting session 1.
+ EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+ // Kill app B, CLIENT(2)'s session 1 should continue to run, session 0 on
+ // the other hand should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneBackgroundMultiUids finished.");
+}
+
} // namespace media
} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
index c8994ac..cb354f4 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
@@ -36,6 +36,7 @@
#include <iostream>
#include <list>
+#include <unordered_set>
#include "MediaTranscodingServiceTestHelper.h"
#include "SimulatedTranscoder.h"
@@ -255,6 +256,54 @@
unregisterMultipleClients();
}
+TEST_F(MediaTranscodingServiceSimulatedTest, TestAddGetClientUids) {
+ registerMultipleClients();
+
+ std::vector<int32_t> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Submit one real-time session.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file"));
+
+ // Should have mClientUid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(0, &clientUids));
+ EXPECT_EQ(clientUids.size(), 1u);
+ EXPECT_EQ(clientUids[0], (int32_t)mClient1->mClientUid);
+
+ // Adding invalid client uid should fail.
+ EXPECT_TRUE(mClient1->addClientUid<fail>(0, kInvalidClientUid));
+
+ // Adding mClientUid again should fail.
+ EXPECT_TRUE(mClient1->addClientUid<fail>(0, mClient1->mClientUid));
+
+ // Submit one offline session.
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1",
+ TranscodingSessionPriority::kUnspecified));
+
+ // Should not have any uids in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ EXPECT_EQ(clientUids.size(), 0u);
+
+ // Add own uid (with IMediaTranscodingService::USE_CALLING_UID), should succeed.
+ EXPECT_TRUE(mClient1->addClientUid(1, IMediaTranscodingService::USE_CALLING_UID));
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ EXPECT_EQ(clientUids.size(), 1u);
+ EXPECT_EQ(clientUids[0], (int32_t)ownUid);
+
+ // Adding mClientUid should succeed.
+ EXPECT_TRUE(mClient1->addClientUid(1, mClient1->mClientUid));
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids.begin(), clientUids.end());
+ EXPECT_EQ(uidSet.size(), 2u);
+ EXPECT_EQ(uidSet.count(ownUid), 1u);
+ EXPECT_EQ(uidSet.count(mClient1->mClientUid), 1u);
+
+ unregisterMultipleClients();
+}
+
TEST_F(MediaTranscodingServiceSimulatedTest, TestSubmitCancelWithOfflineSessions) {
registerMultipleClients();
@@ -378,6 +427,53 @@
ALOGD("TestTranscodingUidPolicy finished.");
}
+TEST_F(MediaTranscodingServiceSimulatedTest, TestTranscodingUidPolicyWithMultipleClientUids) {
+ ALOGD("TestTranscodingUidPolicyWithMultipleClientUids starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ registerMultipleClients();
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit 3 requests.
+ ALOGD("Submitting session to client1 (app A)...");
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+
+ // mClient1's Session 0 should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Add client2 (app B)'s uid to mClient1's session 1.
+ EXPECT_TRUE(mClient1->addClientUid(1, mClient2->mClientUid));
+
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+
+ // mClient1's session 0 should pause, session 1 should start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ ALOGD("Moving app A back to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ ALOGD("TestTranscodingUidPolicyWithMultipleClientUids finished.");
+}
+
TEST_F(MediaTranscodingServiceSimulatedTest, TestTranscodingThermalPolicy) {
ALOGD("TestTranscodingThermalPolicy starting...");
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index faea58f..13dd3d3 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -38,6 +38,10 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
+AAudioServiceEndpoint::~AAudioServiceEndpoint() {
+ ALOGD("%s() called", __func__);
+}
+
std::string AAudioServiceEndpoint::dump() const NO_THREAD_SAFETY_ANALYSIS {
std::stringstream result;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 72090c2..a7f63d3 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -43,7 +43,7 @@
, public AAudioStreamParameters {
public:
- virtual ~AAudioServiceEndpoint() = default;
+ virtual ~AAudioServiceEndpoint();
virtual std::string dump() const;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 556710d..7294a58 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -51,8 +51,6 @@
: mMmapStream(nullptr)
, mAAudioService(audioService) {}
-AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
-
std::string AAudioServiceEndpointMMAP::dump() const {
std::stringstream result;
@@ -357,7 +355,10 @@
// This is called by AudioFlinger when it wants to destroy a stream.
void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
ALOGD("%s(portHandle = %d) called", __func__, portHandle);
- std::thread asyncTask(&AAudioServiceEndpointMMAP::handleTearDownAsync, this, portHandle);
+ android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ std::thread asyncTask([holdEndpoint, portHandle]() {
+ holdEndpoint->handleTearDownAsync(portHandle);
+ });
asyncTask.detach();
}
@@ -378,9 +379,11 @@
ALOGD("%s() called with dev %d, old = %d", __func__, deviceId, getDeviceId());
if (getDeviceId() != deviceId) {
if (getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
- std::thread asyncTask([this, deviceId]() {
- disconnectRegisteredStreams();
- setDeviceId(deviceId);
+ android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ std::thread asyncTask([holdEndpoint, deviceId]() {
+ ALOGD("onRoutingChanged() asyncTask launched");
+ holdEndpoint->disconnectRegisteredStreams();
+ holdEndpoint->setDeviceId(deviceId);
});
asyncTask.detach();
} else {
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 24b161d..5a53885 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -44,7 +44,7 @@
public:
explicit AAudioServiceEndpointMMAP(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpointMMAP();
+ virtual ~AAudioServiceEndpointMMAP() = default;
std::string dump() const override;
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 501e8c0..0d453cf 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -111,7 +111,7 @@
if (!endpoint->isConnected()) {
ALOGD("%s() call safeReleaseCloseFromCallback()", __func__);
// Release and close under a lock with no check for callback collisions.
- endpoint->getStreamInternal()->safeReleaseCloseFromCallback();
+ endpoint->getStreamInternal()->safeReleaseCloseInternal();
}
return result;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 694094c..dbacd75 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -67,8 +67,7 @@
// If the stream is deleted when OPEN or in use then audio resources will leak.
// This would indicate an internal error. So we want to find this ASAP.
LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
- || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
- || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
+ || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
"service stream %p still open, state = %d",
this, getState());
}
@@ -229,7 +228,7 @@
aaudio_result_t result = AAUDIO_OK;
if (auto state = getState();
- state == AAUDIO_STREAM_STATE_CLOSED || state == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ state == AAUDIO_STREAM_STATE_CLOSED || isDisconnected_l()) {
ALOGW("%s() already CLOSED, returns INVALID_STATE, handle = %d",
__func__, getHandle());
return AAUDIO_ERROR_INVALID_STATE;
@@ -261,8 +260,14 @@
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
setState(AAUDIO_STREAM_STATE_STARTED);
mThreadEnabled.store(true);
+ // Make sure this object does not get deleted before the run() method
+ // can protect it by making a strong pointer.
+ incStrong(nullptr); // See run() method.
result = mTimestampThread.start(this);
- if (result != AAUDIO_OK) goto error;
+ if (result != AAUDIO_OK) {
+ decStrong(nullptr); // run() can't do it so we have to do it here.
+ goto error;
+ }
return result;
@@ -291,10 +296,6 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
.record(); });
- // Send it now because the timestamp gets rounded up when stopStream() is called below.
- // Also we don't need the timestamps while we are shutting down.
- sendCurrentTimestamp();
-
result = stopTimestampThread();
if (result != AAUDIO_OK) {
disconnect_l();
@@ -340,10 +341,12 @@
setState(AAUDIO_STREAM_STATE_STOPPING);
- // Send it now because the timestamp gets rounded up when stopStream() is called below.
- // Also we don't need the timestamps while we are shutting down.
- sendCurrentTimestamp(); // warning - this calls a virtual function
+ // Temporarily unlock because we are joining the timestamp thread and it may try
+ // to acquire mLock.
+ mLock.unlock();
result = stopTimestampThread();
+ mLock.lock();
+
if (result != AAUDIO_OK) {
disconnect_l();
return result;
@@ -403,15 +406,21 @@
__attribute__((no_sanitize("integer")))
void AAudioServiceStreamBase::run() {
ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+ // Hold onto the ref counted stream until the end.
+ android::sp<AAudioServiceStreamBase> holdStream(this);
TimestampScheduler timestampScheduler;
+ // Balance the incStrong from when the thread was launched.
+ holdStream->decStrong(nullptr);
+
timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
timestampScheduler.start(AudioClock::getNanoseconds());
int64_t nextTime = timestampScheduler.nextAbsoluteTime();
int32_t loopCount = 0;
+ aaudio_result_t result = AAUDIO_OK;
while(mThreadEnabled.load()) {
loopCount++;
if (AudioClock::getNanoseconds() >= nextTime) {
- aaudio_result_t result = sendCurrentTimestamp();
+ result = sendCurrentTimestamp();
if (result != AAUDIO_OK) {
ALOGE("%s() timestamp thread got result = %d", __func__, result);
break;
@@ -423,6 +432,11 @@
AudioClock::sleepUntilNanoTime(nextTime);
}
}
+ // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
+ // if it resulted in a call to disconnect.
+ if (result == AAUDIO_OK) {
+ (void) sendCurrentTimestamp();
+ }
ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
__func__, getTypeText(), loopCount);
}
@@ -433,8 +447,7 @@
}
void AAudioServiceStreamBase::disconnect_l() {
- if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED
- && getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ if (!isDisconnected_l() && getState() != AAUDIO_STREAM_STATE_CLOSED) {
mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT)
@@ -442,7 +455,7 @@
.record();
sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
- setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ setDisconnected_l(true);
}
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 06c9f21..c42df0f 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -80,7 +80,7 @@
// because we had to wait until we generated the handle.
void logOpen(aaudio_handle_t streamHandle);
- aaudio_result_t close();
+ aaudio_result_t close() EXCLUDES(mLock);
/**
* Start the flow of audio data.
@@ -88,7 +88,7 @@
* This is not guaranteed to be synchronous but it currently is.
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
- aaudio_result_t start();
+ aaudio_result_t start() EXCLUDES(mLock);
/**
* Stop the flow of data so that start() can resume without loss of data.
@@ -96,7 +96,7 @@
* This is not guaranteed to be synchronous but it currently is.
* An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
*/
- aaudio_result_t pause();
+ aaudio_result_t pause() EXCLUDES(mLock);
/**
* Stop the flow of data after the currently queued data has finished playing.
@@ -105,14 +105,14 @@
* An AAUDIO_SERVICE_EVENT_STOPPED will be sent to the client when complete.
*
*/
- aaudio_result_t stop();
+ aaudio_result_t stop() EXCLUDES(mLock);
/**
* Discard any data held by the underlying HAL or Service.
*
* An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
*/
- aaudio_result_t flush();
+ aaudio_result_t flush() EXCLUDES(mLock);
virtual aaudio_result_t startClient(const android::AudioClient& client,
const audio_attributes_t *attr __unused,
@@ -126,9 +126,9 @@
return AAUDIO_ERROR_UNAVAILABLE;
}
- aaudio_result_t registerAudioThread(pid_t clientThreadId, int priority);
+ aaudio_result_t registerAudioThread(pid_t clientThreadId, int priority) EXCLUDES(mLock);
- aaudio_result_t unregisterAudioThread(pid_t clientThreadId);
+ aaudio_result_t unregisterAudioThread(pid_t clientThreadId) EXCLUDES(mLock);
bool isRunning() const {
return mState == AAUDIO_STREAM_STATE_STARTED;
@@ -137,7 +137,7 @@
/**
* Fill in a parcelable description of stream.
*/
- aaudio_result_t getDescription(AudioEndpointParcelable &parcelable);
+ aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) EXCLUDES(mLock);
void setRegisteredThread(pid_t pid) {
mRegisteredClientThread = pid;
@@ -153,7 +153,7 @@
void run() override; // to implement Runnable
- void disconnect();
+ void disconnect() EXCLUDES(mLock);
const android::AudioClient &getAudioClient() {
return mMmapClient;
@@ -248,7 +248,7 @@
aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
- aaudio_result_t sendCurrentTimestamp();
+ aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
aaudio_result_t sendXRunCount(int32_t xRunCount);
@@ -265,6 +265,13 @@
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ bool isDisconnected_l() const REQUIRES(mLock) {
+ return mDisconnected;
+ }
+ void setDisconnected_l(bool flag) REQUIRES(mLock) {
+ mDisconnected = flag;
+ }
+
pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
std::mutex mUpMessageQueueLock;
@@ -322,6 +329,8 @@
// for example a full message queue. Note that this atomic is unrelated to mCloseNeeded.
std::atomic<bool> mSuspended{false};
+ bool mDisconnected GUARDED_BY(mLock) {false};
+
protected:
// Locking order is important.
// Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 6ba1725..667465a 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -73,7 +73,8 @@
aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
- aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+ aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
+ int64_t *timeNanos) EXCLUDES(mLock) override;
aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index 16338db..b7ae167 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -67,8 +67,9 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->addPid(getHidlDemuxPid(pid),
- static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+ sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+ ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+ Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
if (res != Result::SUCCESS) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
}
@@ -82,8 +83,9 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->removePid(getHidlDemuxPid(pid),
- static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+ sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+ ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+ Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
if (res != Result::SUCCESS) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
}
@@ -109,11 +111,11 @@
DemuxPid hidlPid;
switch (pid.getTag()) {
case TunerDemuxPid::tPid: {
- hidlPid.tPid((uint16_t)pid.tPid);
+ hidlPid.tPid((uint16_t)pid.get<TunerDemuxPid::tPid>());
break;
}
case TunerDemuxPid::mmtpPid: {
- hidlPid.mmtpPid((uint16_t)pid.mmtpPid);
+ hidlPid.mmtpPid((uint16_t)pid.get<TunerDemuxPid::mmtpPid>());
break;
}
}
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 39a6723..039fd31 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -57,10 +57,10 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- MQDesc dvrMQDesc;
+ MQDesc filterMQDesc;
Result res;
mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
- dvrMQDesc = desc;
+ filterMQDesc = desc;
res = r;
});
if (res != Result::SUCCESS) {
@@ -69,7 +69,7 @@
AidlMQDesc aidlMQDesc;
unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
- dvrMQDesc, &aidlMQDesc);
+ filterMQDesc, &aidlMQDesc);
*_aidl_return = move(aidlMQDesc);
return Status::ok();
}
@@ -471,7 +471,7 @@
res = r;
if (res == Result::SUCCESS) {
TunerFilterSharedHandleInfo info{
- .handle = dupToAidl(hidl_handle(avMemory.getNativeHandle())),
+ .handle = dupToAidl(avMemory),
.size = static_cast<int64_t>(avMemSize),
};
*_aidl_return = move(info);
@@ -480,7 +480,10 @@
}
});
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
}
Status TunerFilter::releaseAvHandle(
@@ -497,7 +500,6 @@
return Status::ok();
}
-
Status TunerFilter::start() {
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
@@ -683,7 +685,7 @@
DemuxFilterMediaEvent mediaEvent = e.media();
TunerFilterMediaEvent tunerMedia;
- tunerMedia.streamId = static_cast<int>(mediaEvent.streamId);
+ tunerMedia.streamId = static_cast<char16_t>(mediaEvent.streamId);
tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
tunerMedia.pts = static_cast<long>(mediaEvent.pts);
tunerMedia.dataLength = static_cast<int>(mediaEvent.dataLength);
@@ -730,10 +732,10 @@
DemuxFilterSectionEvent sectionEvent = e.section();
TunerFilterSectionEvent tunerSection;
- tunerSection.tableId = static_cast<char>(sectionEvent.tableId);
- tunerSection.version = static_cast<char>(sectionEvent.version);
- tunerSection.sectionNum = static_cast<char>(sectionEvent.sectionNum);
- tunerSection.dataLength = static_cast<char>(sectionEvent.dataLength);
+ tunerSection.tableId = static_cast<char16_t>(sectionEvent.tableId);
+ tunerSection.version = static_cast<char16_t>(sectionEvent.version);
+ tunerSection.sectionNum = static_cast<char16_t>(sectionEvent.sectionNum);
+ tunerSection.dataLength = static_cast<char16_t>(sectionEvent.dataLength);
TunerFilterEvent tunerEvent;
tunerEvent.set<TunerFilterEvent::section>(move(tunerSection));
@@ -747,8 +749,8 @@
DemuxFilterPesEvent pesEvent = e.pes();
TunerFilterPesEvent tunerPes;
- tunerPes.streamId = static_cast<char>(pesEvent.streamId);
- tunerPes.dataLength = static_cast<int>(pesEvent.dataLength);
+ tunerPes.streamId = static_cast<char16_t>(pesEvent.streamId);
+ tunerPes.dataLength = static_cast<char16_t>(pesEvent.dataLength);
tunerPes.mpuSequenceNumber = static_cast<int>(pesEvent.mpuSequenceNumber);
TunerFilterEvent tunerEvent;
@@ -775,9 +777,9 @@
}
if (tsRecordEvent.pid.getDiscriminator() == DemuxPid::hidl_discriminator::tPid) {
- tunerTsRecord.pid = static_cast<char>(tsRecordEvent.pid.tPid());
+ tunerTsRecord.pid = static_cast<char16_t>(tsRecordEvent.pid.tPid());
} else {
- tunerTsRecord.pid = static_cast<char>(Constant::INVALID_TS_PID);
+ tunerTsRecord.pid = static_cast<char16_t>(Constant::INVALID_TS_PID);
}
tunerTsRecord.scIndexMask = scIndexMask;
@@ -837,7 +839,7 @@
tunerDownload.itemFragmentIndex = static_cast<int>(downloadEvent.itemFragmentIndex);
tunerDownload.mpuSequenceNumber = static_cast<int>(downloadEvent.mpuSequenceNumber);
tunerDownload.lastItemFragmentIndex = static_cast<int>(downloadEvent.lastItemFragmentIndex);
- tunerDownload.dataLength = static_cast<char>(downloadEvent.dataLength);
+ tunerDownload.dataLength = static_cast<char16_t>(downloadEvent.dataLength);
TunerFilterEvent tunerEvent;
tunerEvent.set<TunerFilterEvent::download>(move(tunerDownload));
@@ -851,7 +853,7 @@
DemuxFilterIpPayloadEvent ipPayloadEvent = e.ipPayload();
TunerFilterIpPayloadEvent tunerIpPayload;
- tunerIpPayload.dataLength = static_cast<char>(ipPayloadEvent.dataLength);
+ tunerIpPayload.dataLength = static_cast<char16_t>(ipPayloadEvent.dataLength);
TunerFilterEvent tunerEvent;
tunerEvent.set<TunerFilterEvent::ipPayload>(move(tunerIpPayload));
diff --git a/services/tuner/TunerLnb.cpp b/services/tuner/TunerLnb.cpp
index 4a5acf5..77248d4 100644
--- a/services/tuner/TunerLnb.cpp
+++ b/services/tuner/TunerLnb.cpp
@@ -48,7 +48,10 @@
sp<ILnbCallback> lnbCallback = new LnbCallback(tunerLnbCallback);
Result status = mLnb->setCallback(lnbCallback);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setVoltage(int voltage) {
@@ -58,7 +61,10 @@
}
Result status = mLnb->setVoltage(static_cast<LnbVoltage>(voltage));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setTone(int tone) {
@@ -68,7 +74,10 @@
}
Result status = mLnb->setTone(static_cast<LnbTone>(tone));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setSatellitePosition(int position) {
@@ -78,7 +87,10 @@
}
Result status = mLnb->setSatellitePosition(static_cast<LnbPosition>(position));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
@@ -88,7 +100,10 @@
}
Result status = mLnb->sendDiseqcMessage(diseqcMessage);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::close() {
diff --git a/services/tuner/TunerTimeFilter.cpp b/services/tuner/TunerTimeFilter.cpp
index 25e1ad9..ea9da30 100644
--- a/services/tuner/TunerTimeFilter.cpp
+++ b/services/tuner/TunerTimeFilter.cpp
@@ -38,7 +38,10 @@
}
Result status = mTimeFilter->setTimeStamp(timeStamp);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerTimeFilter::clearTimeStamp() {
@@ -48,7 +51,10 @@
}
Result status = mTimeFilter->clearTimeStamp();
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
@@ -66,8 +72,9 @@
});
if (status != Result::SUCCESS) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return Status::ok();
}
Status TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
@@ -85,8 +92,9 @@
});
if (status != Result::SUCCESS) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return Status::ok();
}
Status TunerTimeFilter::close() {
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
index 51c6378..8b238b6 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxPid.aidl
@@ -22,7 +22,7 @@
* {@hide}
*/
union TunerDemuxPid {
- int tPid;
+ char tPid;
- int mmtpPid;
+ char mmtpPid;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
index 5842c0d..c3dbce9 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
@@ -25,7 +25,7 @@
* {@hide}
*/
parcelable TunerFilterMediaEvent {
- int streamId;
+ char streamId;
/**
* true if PTS is present in PES header.
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
index f7ee286..dc1ecc6 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
@@ -27,7 +27,7 @@
/**
* Data size in bytes of PES data
*/
- int dataLength;
+ char dataLength;
/**
* MPU sequence number of filtered data
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
index 0923868..9a11fd5 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbs3Settings.aidl
@@ -27,7 +27,7 @@
*/
int frequency;
- int streamId;
+ char streamId;
int streamIdType;
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
index 2ae9092..dff9f4a 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendIsdbsSettings.aidl
@@ -27,7 +27,7 @@
*/
int frequency;
- int streamId;
+ char streamId;
int streamIdType;