Merge "Revert "Deprecate support for legacy effect config file audio_effects.conf"" into udc-dev
diff --git a/camera/Android.bp b/camera/Android.bp
index f27eb31..b3f70f4 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -142,14 +142,15 @@
filegroup {
name: "libcamera_client_aidl",
srcs: [
+ "aidl/android/hardware/CameraExtensionSessionStats.aidl",
"aidl/android/hardware/ICameraService.aidl",
"aidl/android/hardware/ICameraServiceListener.aidl",
"aidl/android/hardware/ICameraServiceProxy.aidl",
"aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
"aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
- "aidl/android/hardware/camera2/ICameraOfflineSession.aidl",
"aidl/android/hardware/camera2/ICameraInjectionCallback.aidl",
"aidl/android/hardware/camera2/ICameraInjectionSession.aidl",
+ "aidl/android/hardware/camera2/ICameraOfflineSession.aidl",
],
path: "aidl",
}
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index 26c612a..9e9793d 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -278,7 +278,9 @@
mRequestCount(0),
mResultErrorCount(0),
mDeviceError(false),
- mVideoStabilizationMode(-1) {}
+ mVideoStabilizationMode(-1),
+ mSessionIndex(0),
+ mCameraExtensionSessionStats() {}
CameraSessionStats::CameraSessionStats(const String16& cameraId,
int facing, int newCameraState, const String16& clientName,
@@ -297,7 +299,9 @@
mRequestCount(0),
mResultErrorCount(0),
mDeviceError(0),
- mVideoStabilizationMode(-1) {}
+ mVideoStabilizationMode(-1),
+ mSessionIndex(0),
+ mCameraExtensionSessionStats() {}
status_t CameraSessionStats::readFromParcel(const android::Parcel* parcel) {
if (parcel == NULL) {
@@ -409,6 +413,18 @@
return err;
}
+ int32_t sessionIdx;
+ if ((err = parcel->readInt32(&sessionIdx)) != OK) {
+ ALOGE("%s: Failed to read session index from parcel", __FUNCTION__);
+ return err;
+ }
+
+ CameraExtensionSessionStats extStats{};
+ if ((err = extStats.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read extension session stats from parcel", __FUNCTION__);
+ return err;
+ }
+
mCameraId = id;
mFacing = facing;
mNewCameraState = newCameraState;
@@ -426,6 +442,8 @@
mStreamStats = std::move(streamStats);
mUserTag = userTag;
mVideoStabilizationMode = videoStabilizationMode;
+ mSessionIndex = sessionIdx;
+ mCameraExtensionSessionStats = extStats;
return OK;
}
@@ -523,6 +541,16 @@
return err;
}
+ if ((err = parcel->writeInt32(mSessionIndex)) != OK) {
+ ALOGE("%s: Failed to write session index!", __FUNCTION__);
+ return err;
+ }
+
+ if ((err = mCameraExtensionSessionStats.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write extension sessions stats!", __FUNCTION__);
+ return err;
+ }
+
return OK;
}
diff --git a/camera/aidl/android/hardware/CameraExtensionSessionStats.aidl b/camera/aidl/android/hardware/CameraExtensionSessionStats.aidl
new file mode 100644
index 0000000..1c81831
--- /dev/null
+++ b/camera/aidl/android/hardware/CameraExtensionSessionStats.aidl
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/**
+ * Metrics specific to Extension Sessions (see CameraExtensionSession) for logging.
+ *
+ * Each Extension Session is mapped to one camera session internally, and will be sent to
+ * CameraServiceProxy with IDLE/CLOSE calls.
+ * @hide
+ */
+parcelable CameraExtensionSessionStats {
+ /**
+ * Value should match {@code CameraExtensionCharacteristics#EXTENSION_*}
+ */
+ @Backing(type="int")
+ enum Type {
+ EXTENSION_NONE = -1,
+ EXTENSION_AUTOMATIC = 0,
+ EXTENSION_FACE_RETOUCH = 1,
+ EXTENSION_BOKEH = 2,
+ EXTENSION_HDR = 3,
+ EXTENSION_NIGHT = 4
+ }
+
+ /**
+ * Key to uniquely identify the session this stat is associated with. The first call to
+ * 'ICameraService.reportExtensionSessionStats' should set this to an empty string.
+ * 'ICameraService.reportExtensionSessionStats' will return the key which should be used with
+ * the next calls.
+ */
+ String key;
+
+ /**
+ * Camera ID for which the stats is being reported.
+ */
+ String cameraId;
+
+ /**
+ * Package name of the client using the camera
+ */
+ String clientName;
+
+
+ /**
+ * Type of extension session requested by the app. Note that EXTENSION_AUTOMATIC is reported
+ * as such.
+ */
+ Type type = Type.EXTENSION_NONE;
+
+ /**
+ * true if advanced extensions are being used, false otherwise
+ */
+ boolean isAdvanced = false;
+}
\ No newline at end of file
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 9f32595..f8e1631 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -30,6 +30,7 @@
import android.hardware.ICameraServiceListener;
import android.hardware.CameraInfo;
import android.hardware.CameraStatus;
+import android.hardware.CameraExtensionSessionStats;
/**
* Binder interface for the native camera service running in mediaserver.
@@ -214,6 +215,26 @@
*/
oneway void notifyDeviceStateChange(long newState);
+ /**
+ * Report Extension specific metrics to camera service for logging. This should only be called
+ * by CameraExtensionSession to log extension metrics. All calls after the first must set
+ * CameraExtensionSessionStats.key to the value returned by this function.
+ *
+ * Each subsequent call fully overwrites the existing CameraExtensionSessionStats for the
+ * current session, so the caller is responsible for keeping the stats complete.
+ *
+ * Due to cameraservice and cameraservice_proxy architecture, there is no guarantee that
+ * {@code stats} will be logged immediately (or at all). CameraService will log whatever
+ * extension stats it has at the time of camera session closing which may be before the app
+ * process receives a session/device closed callback; so CameraExtensionSession
+ * should send metrics to the cameraservice preriodically, and cameraservice must handle calls
+ * to this function from sessions that have not been logged yet and from sessions that have
+ * already been closed.
+ *
+ * @return the key that must be used to report updates to previously reported stats.
+ */
+ String reportExtensionSessionStats(in CameraExtensionSessionStats stats);
+
// Bitfield constants for notifyDeviceStateChange
// All bits >= 32 are for custom vendor states
// Written as ints since AIDL does not support long constants.
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index be8a00f..4faa6b4 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -17,6 +17,7 @@
package android.hardware;
import android.hardware.CameraSessionStats;
+import android.hardware.CameraExtensionSessionStats;
/**
* Binder interface for the camera service proxy running in system_server.
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index 091a7ff..071bc73 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -20,6 +20,7 @@
#include <binder/Parcelable.h>
#include <camera/CameraMetadata.h>
+#include <android/hardware/CameraExtensionSessionStats.h>
namespace android {
namespace hardware {
@@ -158,6 +159,9 @@
std::vector<CameraStreamStats> mStreamStats;
String16 mUserTag;
int mVideoStabilizationMode;
+ int mSessionIndex;
+
+ CameraExtensionSessionStats mCameraExtensionSessionStats;
// Constructors
CameraSessionStats();
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index cd4932d..f53fc0a 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -122,7 +122,7 @@
static uint32_t gBitRate = 20000000; // 20Mbps
static uint32_t gTimeLimitSec = kMaxTimeLimitSec;
static uint32_t gBframes = 0;
-static PhysicalDisplayId gPhysicalDisplayId;
+static std::optional<PhysicalDisplayId> gPhysicalDisplayId;
// Set by signal handler to stop recording.
static volatile bool gStopRequested = false;
@@ -336,6 +336,24 @@
}
/*
+ * Gets the physical id of the display to record. If the user specified a physical
+ * display id, then that id will be set. Otherwise, the default display will be set.
+ */
+static status_t getPhysicalDisplayId(PhysicalDisplayId& outDisplayId) {
+ if (gPhysicalDisplayId) {
+ outDisplayId = *gPhysicalDisplayId;
+ return NO_ERROR;
+ }
+
+ const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
+ if (ids.empty()) {
+ return INVALID_OPERATION;
+ }
+ outDisplayId = ids.front();
+ return NO_ERROR;
+}
+
+/*
* Configures the virtual display. When this completes, virtual display
* frames will start arriving from the buffer producer.
*/
@@ -350,7 +368,12 @@
setDisplayProjection(t, dpy, displayState);
ui::LayerStack layerStack = ui::LayerStack::fromValue(std::rand());
t.setDisplayLayerStack(dpy, layerStack);
- *mirrorRoot = SurfaceComposerClient::getDefault()->mirrorDisplay(gPhysicalDisplayId);
+ PhysicalDisplayId displayId;
+ status_t err = getPhysicalDisplayId(displayId);
+ if (err != NO_ERROR) {
+ return err;
+ }
+ *mirrorRoot = SurfaceComposerClient::getDefault()->mirrorDisplay(displayId);
if (*mirrorRoot == nullptr) {
ALOGE("Failed to create a mirror for screenrecord");
return UNKNOWN_ERROR;
@@ -486,6 +509,43 @@
}
/*
+ * Update the display projection if size or orientation have changed.
+ */
+void updateDisplayProjection(const sp<IBinder>& virtualDpy, ui::DisplayState& displayState) {
+ ATRACE_NAME("updateDisplayProjection");
+
+ PhysicalDisplayId displayId;
+ if (getPhysicalDisplayId(displayId) != NO_ERROR) {
+ fprintf(stderr, "ERROR: Failed to get display id\n");
+ return;
+ }
+
+ sp<IBinder> displayToken = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
+ if (!displayToken) {
+ fprintf(stderr, "ERROR: failed to get display token\n");
+ return;
+ }
+
+ ui::DisplayState currentDisplayState;
+ if (SurfaceComposerClient::getDisplayState(displayToken, ¤tDisplayState) != NO_ERROR) {
+ ALOGW("ERROR: failed to get display state\n");
+ return;
+ }
+
+ if (currentDisplayState.orientation != displayState.orientation ||
+ currentDisplayState.layerStackSpaceRect != displayState.layerStackSpaceRect) {
+ displayState = currentDisplayState;
+ ALOGD("display state changed, now has orientation %s, size (%d, %d)",
+ toCString(displayState.orientation), displayState.layerStackSpaceRect.getWidth(),
+ displayState.layerStackSpaceRect.getHeight());
+
+ SurfaceComposerClient::Transaction t;
+ setDisplayProjection(t, virtualDpy, currentDisplayState);
+ t.apply();
+ }
+}
+
+/*
* Runs the MediaCodec encoder, sending the output to the MediaMuxer. The
* input frames are coming from the virtual display as fast as SurfaceFlinger
* wants to send them.
@@ -494,9 +554,8 @@
*
* The muxer must *not* have been started before calling.
*/
-static status_t runEncoder(const sp<MediaCodec>& encoder,
- AMediaMuxer *muxer, FILE* rawFp, const sp<IBinder>& display,
- const sp<IBinder>& virtualDpy, ui::Rotation orientation) {
+static status_t runEncoder(const sp<MediaCodec>& encoder, AMediaMuxer* muxer, FILE* rawFp,
+ const sp<IBinder>& virtualDpy, ui::DisplayState displayState) {
static int kTimeout = 250000; // be responsive on signal
status_t err;
ssize_t trackIdx = -1;
@@ -555,24 +614,7 @@
ALOGV("Got data in buffer %zu, size=%zu, pts=%" PRId64,
bufIndex, size, ptsUsec);
- { // scope
- ATRACE_NAME("orientation");
- // Check orientation, update if it has changed.
- //
- // Polling for changes is inefficient and wrong, but the
- // useful stuff is hard to get at without a Dalvik VM.
- ui::DisplayState displayState;
- err = SurfaceComposerClient::getDisplayState(display, &displayState);
- if (err != NO_ERROR) {
- ALOGW("getDisplayState() failed: %d", err);
- } else if (orientation != displayState.orientation) {
- ALOGD("orientation changed, now %s", toCString(displayState.orientation));
- SurfaceComposerClient::Transaction t;
- setDisplayProjection(t, virtualDpy, displayState);
- t.apply();
- orientation = displayState.orientation;
- }
- }
+ updateDisplayProjection(virtualDpy, displayState);
// If the virtual display isn't providing us with timestamps,
// use the current time. This isn't great -- we could get
@@ -764,6 +806,38 @@
};
/*
+ * Computes the maximum width and height across all physical displays.
+ */
+static ui::Size getMaxDisplaySize() {
+ const std::vector<PhysicalDisplayId> physicalDisplayIds =
+ SurfaceComposerClient::getPhysicalDisplayIds();
+ if (physicalDisplayIds.empty()) {
+ fprintf(stderr, "ERROR: Failed to get physical display ids\n");
+ return {};
+ }
+
+ ui::Size result;
+ for (auto& displayId : physicalDisplayIds) {
+ sp<IBinder> displayToken = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
+ if (!displayToken) {
+ fprintf(stderr, "ERROR: failed to get display token\n");
+ continue;
+ }
+
+ ui::DisplayState displayState;
+ status_t err = SurfaceComposerClient::getDisplayState(displayToken, &displayState);
+ if (err != NO_ERROR) {
+ fprintf(stderr, "ERROR: failed to get display state\n");
+ continue;
+ }
+
+ result.height = std::max(result.height, displayState.layerStackSpaceRect.getHeight());
+ result.width = std::max(result.width, displayState.layerStackSpaceRect.getWidth());
+ }
+ return result;
+}
+
+/*
* Main "do work" start point.
*
* Configures codec, muxer, and virtual display, then starts moving bits
@@ -781,9 +855,15 @@
sp<ProcessState> self = ProcessState::self();
self->startThreadPool();
+ PhysicalDisplayId displayId;
+ err = getPhysicalDisplayId(displayId);
+ if (err != NO_ERROR) {
+ fprintf(stderr, "ERROR: Failed to get display id\n");
+ return err;
+ }
+
// Get main display parameters.
- sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(
- gPhysicalDisplayId);
+ sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
if (display == nullptr) {
fprintf(stderr, "ERROR: no display\n");
return NAME_NOT_FOUND;
@@ -808,7 +888,8 @@
return INVALID_OPERATION;
}
- const ui::Size& layerStackSpaceRect = displayState.layerStackSpaceRect;
+ const ui::Size layerStackSpaceRect =
+ gPhysicalDisplayId ? displayState.layerStackSpaceRect : getMaxDisplaySize();
if (gVerbose) {
printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
@@ -973,8 +1054,7 @@
}
} else {
// Main encoder loop.
- err = runEncoder(recordingData.encoder, muxer, rawFp, display, recordingData.dpy,
- displayState.orientation);
+ err = runEncoder(recordingData.encoder, muxer, rawFp, recordingData.dpy, displayState);
if (err != NO_ERROR) {
fprintf(stderr, "Encoder failed (err=%d)\n", err);
// fall through to cleanup
@@ -1175,14 +1255,6 @@
{ NULL, 0, NULL, 0 }
};
- const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
- if (ids.empty()) {
- fprintf(stderr, "Failed to get ID for any displays\n");
- return 1;
- }
-
- gPhysicalDisplayId = ids.front();
-
while (true) {
int optionIndex = 0;
int ic = getopt_long(argc, argv, "", longOptions, &optionIndex);
diff --git a/media/audioaidlconversion/include/media/AidlConversionCppNdk-impl.h b/media/audioaidlconversion/include/media/AidlConversionCppNdk-impl.h
index ec1f75c..bc9d4d5 100644
--- a/media/audioaidlconversion/include/media/AidlConversionCppNdk-impl.h
+++ b/media/audioaidlconversion/include/media/AidlConversionCppNdk-impl.h
@@ -288,6 +288,11 @@
ConversionResult<media::audio::common::AudioOutputFlags>
legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
+ConversionResult<audio_stream_type_t>
+aidl2legacy_AudioStreamType_audio_stream_type_t(media::audio::common::AudioStreamType aidl);
+ConversionResult<media::audio::common::AudioStreamType>
+legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
+
// This type is unnamed in the original definition, thus we name it here.
using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
ConversionResult<audio_port_config_mix_ext_usecase>
diff --git a/media/codec2/components/aom/C2SoftAomEnc.cpp b/media/codec2/components/aom/C2SoftAomEnc.cpp
index 59cad9d..e08bf43 100644
--- a/media/codec2/components/aom/C2SoftAomEnc.cpp
+++ b/media/codec2/components/aom/C2SoftAomEnc.cpp
@@ -542,15 +542,15 @@
mCodecConfiguration->kf_max_dist = 3000;
// Encoder determines optimal key frame placement automatically.
mCodecConfiguration->kf_mode = AOM_KF_AUTO;
- // Initial value of the buffer level in ms.
- mCodecConfiguration->rc_buf_initial_sz = 500;
- // Amount of data that the encoder should try to maintain in ms.
- mCodecConfiguration->rc_buf_optimal_sz = 600;
// The amount of data that may be buffered by the decoding
// application in ms.
mCodecConfiguration->rc_buf_sz = 1000;
if (mBitrateControlMode == AOM_CBR) {
+ // Initial value of the buffer level in ms.
+ mCodecConfiguration->rc_buf_initial_sz = 500;
+ // Amount of data that the encoder should try to maintain in ms.
+ mCodecConfiguration->rc_buf_optimal_sz = 600;
// Maximum amount of bits that can be subtracted from the target
// bitrate - expressed as percentage of the target bitrate.
mCodecConfiguration->rc_undershoot_pct = 100;
@@ -563,7 +563,7 @@
mCodecConfiguration->rc_undershoot_pct = 100;
// Maximum amount of bits that can be added to the target
// bitrate - expressed as percentage of the target bitrate.
- mCodecConfiguration->rc_overshoot_pct = 25;
+ mCodecConfiguration->rc_overshoot_pct = 100;
}
if (mIntf->getSyncFramePeriod() >= 0) {
@@ -576,6 +576,12 @@
}
if (mMaxQuantizer > 0) {
mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
+ } else {
+ if (mBitrateControlMode == AOM_VBR) {
+ // For VBR we are limiting MaxQP to 52 (down 11 steps) to maintain quality
+ // 52 comes from experiments done on libaom standalone app
+ mCodecConfiguration->rc_max_quantizer = 52;
+ }
}
mCodecContext = new aom_codec_ctx_t;
@@ -630,11 +636,11 @@
return;
}
- std::shared_ptr<const C2GraphicView> rView;
+ std::shared_ptr<C2GraphicView> rView;
std::shared_ptr<C2Buffer> inputBuffer;
if (!work->input.buffers.empty()) {
inputBuffer = work->input.buffers[0];
- rView = std::make_shared<const C2GraphicView>(
+ rView = std::make_shared<C2GraphicView>(
inputBuffer->data().graphicBlocks().front().map().get());
if (rView->error() != C2_OK) {
ALOGE("graphic view map err = %d", rView->error());
@@ -672,6 +678,10 @@
return;
}
+ //(b/279387842)
+ //workaround for incorrect crop size in view when using surface mode
+ rView->setCrop_be(C2Rect(mSize->width, mSize->height));
+
if (!mHeadersReceived) {
Av1Config av1_config;
constexpr uint32_t header_length = 2048;
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 417b261..9a3399d 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -2016,7 +2016,8 @@
constexpr char C2_PARAMKEY_MAX_CODED_CHANNEL_COUNT[] = "coded.max-channel-count";
/**
- * Audio channel mask. Used by decoder to express audio channel mask of decoded content.
+ * Audio channel mask. Used by decoder to express audio channel mask of decoded content,
+ * or by encoder for the channel mask of the encoded content once decoded.
* Channel representation is specified according to the Java android.media.AudioFormat
* CHANNEL_OUT_* constants.
*/
diff --git a/media/codec2/hal/client/client.cpp b/media/codec2/hal/client/client.cpp
index 9359e29..97c0806 100644
--- a/media/codec2/hal/client/client.cpp
+++ b/media/codec2/hal/client/client.cpp
@@ -610,16 +610,9 @@
// Codec2Client
Codec2Client::Codec2Client(sp<Base> const& base,
+ sp<IConfigurable> const& configurable,
size_t serviceIndex)
- : Configurable{
- [base]() -> sp<IConfigurable> {
- Return<sp<IConfigurable>> transResult =
- base->getConfigurable();
- return transResult.isOk() ?
- static_cast<sp<IConfigurable>>(transResult) :
- nullptr;
- }()
- },
+ : Configurable{configurable},
mBase1_0{base},
mBase1_1{Base1_1::castFrom(base)},
mBase1_2{Base1_2::castFrom(base)},
@@ -1003,7 +996,11 @@
CHECK(baseStore) << "Codec2 service \"" << name << "\""
" inaccessible for unknown reasons.";
LOG(VERBOSE) << "Client to Codec2 service \"" << name << "\" created";
- return std::make_shared<Codec2Client>(baseStore, index);
+ Return<sp<IConfigurable>> transResult = baseStore->getConfigurable();
+ CHECK(transResult.isOk()) << "Codec2 service \"" << name << "\""
+ "does not have IConfigurable.";
+ sp<IConfigurable> configurable = static_cast<sp<IConfigurable>>(transResult);
+ return std::make_shared<Codec2Client>(baseStore, configurable, index);
}
c2_status_t Codec2Client::ForAllServices(
@@ -1523,8 +1520,8 @@
uint64_t consumerUsage = kDefaultConsumerUsage;
{
if (surface) {
- int usage = 0;
- status_t err = surface->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+ uint64_t usage = 0;
+ status_t err = surface->getConsumerUsage(&usage);
if (err != NO_ERROR) {
ALOGD("setOutputSurface -- failed to get consumer usage bits (%d/%s). ignoring",
err, asString(err));
@@ -1537,8 +1534,7 @@
// they do not exist inside of C2 scope. Any buffer usage shall be communicated
// through the sideband channel.
- // do an unsigned conversion as bit-31 may be 1
- consumerUsage = (uint32_t)usage | kDefaultConsumerUsage;
+ consumerUsage = usage | kDefaultConsumerUsage;
}
}
@@ -1562,6 +1558,8 @@
static_cast<uint64_t>(blockPoolId),
bqId == 0 ? nullHgbp : igbp);
+ mOutputBufferQueue->expireOldWaiters();
+
if (!transStatus.isOk()) {
LOG(ERROR) << "setOutputSurface -- transaction failed.";
return C2_TRANSACTION_FAILED;
@@ -1607,6 +1605,7 @@
<< status << ".";
}
}
+ mOutputBufferQueue->expireOldWaiters();
}
c2_status_t Codec2Client::Component::connectToInputSurface(
diff --git a/media/codec2/hal/client/include/codec2/hidl/client.h b/media/codec2/hal/client/include/codec2/hidl/client.h
index efbf179..5267394 100644
--- a/media/codec2/hal/client/include/codec2/hidl/client.h
+++ b/media/codec2/hal/client/include/codec2/hidl/client.h
@@ -146,6 +146,8 @@
typedef ::android::hardware::media::c2::V1_2::IComponentStore Base1_2;
typedef Base1_0 Base;
+ typedef ::android::hardware::media::c2::V1_0::IConfigurable IConfigurable;
+
struct Listener;
typedef Codec2ConfigurableClient Configurable;
@@ -230,8 +232,11 @@
static std::shared_ptr<InputSurface> CreateInputSurface(
char const* serviceName = nullptr);
- // base cannot be null.
- Codec2Client(sp<Base> const& base, size_t serviceIndex);
+ // base and/or configurable cannot be null.
+ Codec2Client(
+ sp<Base> const& base,
+ sp<IConfigurable> const& configurable,
+ size_t serviceIndex);
protected:
sp<Base1_0> mBase1_0;
diff --git a/media/codec2/hal/client/include/codec2/hidl/output.h b/media/codec2/hal/client/include/codec2/hidl/output.h
index 35a0224..2e89c3b 100644
--- a/media/codec2/hal/client/include/codec2/hidl/output.h
+++ b/media/codec2/hal/client/include/codec2/hidl/output.h
@@ -51,6 +51,10 @@
int maxDequeueBufferCount,
std::shared_ptr<V1_2::SurfaceSyncObj> *syncObj);
+ // If there are waiters to allocate from the old surface, wake up and expire
+ // them.
+ void expireOldWaiters();
+
// Stop using the current output surface. Pending buffer opeations will not
// perform anymore.
void stop();
@@ -90,6 +94,8 @@
std::weak_ptr<_C2BlockPoolData> mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
std::shared_ptr<C2SurfaceSyncMemory> mSyncMem;
bool mStopped;
+ std::mutex mOldMutex;
+ std::shared_ptr<C2SurfaceSyncMemory> mOldMem;
bool registerBuffer(const C2ConstGraphicBlock& block);
};
diff --git a/media/codec2/hal/client/output.cpp b/media/codec2/hal/client/output.cpp
index ce706cc..4eebd1c 100644
--- a/media/codec2/hal/client/output.cpp
+++ b/media/codec2/hal/client/output.cpp
@@ -217,6 +217,7 @@
sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
std::weak_ptr<_C2BlockPoolData>
poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+ std::shared_ptr<C2SurfaceSyncMemory> oldMem;
{
std::scoped_lock<std::mutex> l(mMutex);
bool stopped = mStopped;
@@ -238,7 +239,7 @@
}
return false;
}
- std::shared_ptr<C2SurfaceSyncMemory> oldMem = mSyncMem;
+ oldMem = mSyncMem;
C2SyncVariables *oldSync = mSyncMem ? mSyncMem->mem() : nullptr;
if (oldSync) {
oldSync->lock();
@@ -314,11 +315,26 @@
newSync->unlock();
}
}
+ {
+ std::scoped_lock<std::mutex> l(mOldMutex);
+ mOldMem = oldMem;
+ }
ALOGD("remote graphic buffer migration %zu/%zu",
success, tryNum);
return true;
}
+void OutputBufferQueue::expireOldWaiters() {
+ std::scoped_lock<std::mutex> l(mOldMutex);
+ if (mOldMem) {
+ C2SyncVariables *oldSync = mOldMem->mem();
+ if (oldSync) {
+ oldSync->notifyAll();
+ }
+ mOldMem.reset();
+ }
+}
+
void OutputBufferQueue::stop() {
std::scoped_lock<std::mutex> l(mMutex);
mStopped = true;
diff --git a/media/codec2/hal/plugin/samples/SampleFilterPlugin.cpp b/media/codec2/hal/plugin/samples/SampleFilterPlugin.cpp
index c77eb22..b5383ad 100644
--- a/media/codec2/hal/plugin/samples/SampleFilterPlugin.cpp
+++ b/media/codec2/hal/plugin/samples/SampleFilterPlugin.cpp
@@ -710,10 +710,6 @@
layerSettings.source.buffer.fence = Fence::NO_FENCE;
layerSettings.source.buffer.textureName = textureName;
layerSettings.source.buffer.usePremultipliedAlpha = false;
- layerSettings.source.buffer.isY410BT2020 =
- (layerSettings.sourceDataspace == ui::Dataspace::BT2020_ITU_PQ ||
- layerSettings.sourceDataspace == ui::Dataspace::BT2020_ITU_HLG) &&
- format == HAL_PIXEL_FORMAT_RGBA_1010102;
layerSettings.source.buffer.maxMasteringLuminance =
(hdrStaticInfo && *hdrStaticInfo &&
hdrStaticInfo->mastering.maxLuminance > 0 &&
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index eb1b4b5..5e53acc 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1828,6 +1828,7 @@
mCallback->onError(err2, ACTION_CODE_FATAL);
return;
}
+
err2 = mChannel->start(inputFormat, outputFormat, buffersBoundToCodec);
if (err2 != OK) {
mCallback->onError(err2, ACTION_CODE_FATAL);
@@ -2131,6 +2132,25 @@
RevertOutputFormatIfNeeded(outputFormat, config->mOutputFormat);
}
+ std::map<size_t, sp<MediaCodecBuffer>> clientInputBuffers;
+ status_t err = mChannel->prepareInitialInputBuffers(&clientInputBuffers);
+ if (err != OK) {
+ if (err == NO_MEMORY) {
+ // NO_MEMORY happens here when all the buffers are still
+ // with the codec. That is not an error as it is momentarily
+ // and the buffers are send to the client as soon as the codec
+ // releases them
+ ALOGI("Resuming with all input buffers still with codec");
+ } else {
+ ALOGE("Resume request for Input Buffers failed");
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ return;
+ }
+ }
+
+ // channel start should be called after prepareInitialBuffers
+ // Calling before can cause a failure during prepare when
+ // buffers are sent to the client before preparation from onWorkDone
(void)mChannel->start(nullptr, nullptr, [&]{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
@@ -2148,14 +2168,6 @@
state->set(RUNNING);
}
- std::map<size_t, sp<MediaCodecBuffer>> clientInputBuffers;
- status_t err = mChannel->prepareInitialInputBuffers(&clientInputBuffers);
- // FIXME(b/237656746)
- if (err != OK && err != NO_MEMORY) {
- ALOGE("Resume request for Input Buffers failed");
- mCallback->onError(err, ACTION_CODE_FATAL);
- return;
- }
mChannel->requestInitialInputBuffers(std::move(clientInputBuffers));
}
@@ -2557,43 +2569,6 @@
}
void CCodec::initiateReleaseIfStuck() {
- bool tunneled = false;
- bool isMediaTypeKnown = false;
- {
- static const std::set<std::string> kKnownMediaTypes{
- MIMETYPE_VIDEO_VP8,
- MIMETYPE_VIDEO_VP9,
- MIMETYPE_VIDEO_AV1,
- MIMETYPE_VIDEO_AVC,
- MIMETYPE_VIDEO_HEVC,
- MIMETYPE_VIDEO_MPEG4,
- MIMETYPE_VIDEO_H263,
- MIMETYPE_VIDEO_MPEG2,
- MIMETYPE_VIDEO_RAW,
- MIMETYPE_VIDEO_DOLBY_VISION,
-
- MIMETYPE_AUDIO_AMR_NB,
- MIMETYPE_AUDIO_AMR_WB,
- MIMETYPE_AUDIO_MPEG,
- MIMETYPE_AUDIO_AAC,
- MIMETYPE_AUDIO_QCELP,
- MIMETYPE_AUDIO_VORBIS,
- MIMETYPE_AUDIO_OPUS,
- MIMETYPE_AUDIO_G711_ALAW,
- MIMETYPE_AUDIO_G711_MLAW,
- MIMETYPE_AUDIO_RAW,
- MIMETYPE_AUDIO_FLAC,
- MIMETYPE_AUDIO_MSGSM,
- MIMETYPE_AUDIO_AC3,
- MIMETYPE_AUDIO_EAC3,
-
- MIMETYPE_IMAGE_ANDROID_HEIC,
- };
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- tunneled = config->mTunneled;
- isMediaTypeKnown = (kKnownMediaTypes.count(config->mCodingMediaType) != 0);
- }
std::string name;
bool pendingDeadline = false;
{
@@ -2605,16 +2580,6 @@
pendingDeadline = true;
}
}
- if (!tunneled && isMediaTypeKnown && name.empty()) {
- constexpr std::chrono::steady_clock::duration kWorkDurationThreshold = 3s;
- std::chrono::steady_clock::duration elapsed = mChannel->elapsed();
- if (elapsed >= kWorkDurationThreshold) {
- name = "queue";
- }
- if (elapsed > 0s) {
- pendingDeadline = true;
- }
- }
if (name.empty()) {
// We're not stuck.
if (pendingDeadline) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index da33b0d..1c86ba9 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1801,8 +1801,6 @@
output->buffers->flushStash();
}
}
- // reset the frames that are being tracked for onFrameRendered callbacks
- mTrackedFrames.clear();
}
void CCodecBufferChannel::onWorkDone(
@@ -2113,7 +2111,10 @@
outBuffer->meta()->setInt32("flags", BUFFER_FLAG_CODEC_CONFIG);
ALOGV("[%s] onWorkDone: csd index = %zu [%p]", mName, index, outBuffer.get());
- output.unlock();
+ // TRICKY: we want popped buffers reported in order, so sending
+ // the callback while holding the lock here. This assumes that
+ // onOutputBufferAvailable() does not block. onOutputBufferAvailable()
+ // callbacks are always sent with the Output lock held.
mCallback->onOutputBufferAvailable(index, outBuffer);
} else {
ALOGD("[%s] onWorkDone: unable to register csd", mName);
@@ -2203,7 +2204,10 @@
case OutputBuffers::DISCARD:
break;
case OutputBuffers::NOTIFY_CLIENT:
- output.unlock();
+ // TRICKY: we want popped buffers reported in order, so sending
+ // the callback while holding the lock here. This assumes that
+ // onOutputBufferAvailable() does not block. onOutputBufferAvailable()
+ // callbacks are always sent with the Output lock held.
mCallback->onOutputBufferAvailable(index, outBuffer);
break;
case OutputBuffers::REALLOCATE:
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index a893bc0..6c10549 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -940,6 +940,9 @@
add(ConfigMapper(KEY_CHANNEL_MASK, C2_PARAMKEY_CHANNEL_MASK, "value")
.limitTo(D::AUDIO & D::DECODER & D::READ));
+ add(ConfigMapper(KEY_CHANNEL_MASK, C2_PARAMKEY_CHANNEL_MASK, "value")
+ .limitTo(D::AUDIO & D::ENCODER & D::CONFIG));
+
add(ConfigMapper(KEY_AAC_SBR_MODE, C2_PARAMKEY_AAC_SBR_MODE, "value")
.limitTo(D::AUDIO & D::ENCODER & (D::CONFIG | D::PARAM | D::READ))
.withMapper([](C2Value v) -> C2Value {
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 5bd0114..b0d48b7 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -72,11 +72,6 @@
media::audio::common::AudioPortDeviceExt* aidl,
media::AudioPortDeviceExtSys* aidlDeviceExt);
-ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
- media::audio::common::AudioStreamType aidl);
-ConversionResult<media::audio::common::AudioStreamType>
-legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
-
ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
const media::AudioPortMixExtSys& aidlMixExt);
diff --git a/media/libaudiohal/impl/ConversionHelperAidl.h b/media/libaudiohal/impl/ConversionHelperAidl.h
index db6b6cf..5534d13 100644
--- a/media/libaudiohal/impl/ConversionHelperAidl.h
+++ b/media/libaudiohal/impl/ConversionHelperAidl.h
@@ -20,6 +20,9 @@
#include <string_view>
#include <vector>
+#include <android-base/expected.h>
+#include <error/Result.h>
+#include <media/AudioParameter.h>
#include <utils/String16.h>
#include <utils/Vector.h>
@@ -51,4 +54,24 @@
const std::string mClassName;
};
+// 'action' must accept a value of type 'T' and return 'status_t'.
+// The function returns 'true' if the parameter was found, and the action has succeeded.
+// The function returns 'false' if the parameter was not found.
+// Any errors get propagated, if there are errors it means the parameter was found.
+template<typename T, typename F>
+error::Result<bool> filterOutAndProcessParameter(
+ AudioParameter& parameters, const String8& key, const F& action) {
+ if (parameters.containsKey(key)) {
+ T value;
+ status_t status = parameters.get(key, value);
+ if (status == OK) {
+ parameters.remove(key);
+ status = action(value);
+ if (status == OK) return true;
+ }
+ return base::unexpected(status);
+ }
+ return false;
+}
+
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalAidl.cpp b/media/libaudiohal/impl/DeviceHalAidl.cpp
index 865d1d6..3125e311 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalAidl.cpp
@@ -35,6 +35,7 @@
#include "StreamHalAidl.h"
using aidl::android::aidl_utils::statusTFromBinderStatus;
+using aidl::android::media::audio::common::Boolean;
using aidl::android::media::audio::common::AudioChannelLayout;
using aidl::android::media::audio::common::AudioConfig;
using aidl::android::media::audio::common::AudioDevice;
@@ -68,6 +69,9 @@
using aidl::android::hardware::audio::common::RecordTrackMetadata;
using aidl::android::hardware::audio::core::AudioPatch;
using aidl::android::hardware::audio::core::AudioRoute;
+using aidl::android::hardware::audio::core::IBluetooth;
+using aidl::android::hardware::audio::core::IBluetoothA2dp;
+using aidl::android::hardware::audio::core::IBluetoothLe;
using aidl::android::hardware::audio::core::IModule;
using aidl::android::hardware::audio::core::ITelephony;
using aidl::android::hardware::audio::core::ModuleDebug;
@@ -124,7 +128,10 @@
DeviceHalAidl::DeviceHalAidl(const std::string& instance, const std::shared_ptr<IModule>& module)
: ConversionHelperAidl("DeviceHalAidl"),
mInstance(instance), mModule(module),
- mTelephony(retrieveSubInterface<ITelephony>(module, &IModule::getTelephony)) {
+ mTelephony(retrieveSubInterface<ITelephony>(module, &IModule::getTelephony)),
+ mBluetooth(retrieveSubInterface<IBluetooth>(module, &IModule::getBluetooth)),
+ mBluetoothA2dp(retrieveSubInterface<IBluetoothA2dp>(module, &IModule::getBluetoothA2dp)),
+ mBluetoothLe(retrieveSubInterface<IBluetoothLe>(module, &IModule::getBluetoothLe)) {
}
status_t DeviceHalAidl::getAudioPorts(std::vector<media::audio::common::AudioPort> *ports) {
@@ -265,15 +272,32 @@
return statusTFromBinderStatus(mModule->getMasterMute(state));
}
-status_t DeviceHalAidl::setParameters(const String8& kvPairs __unused) {
- TIME_CHECK();
+status_t DeviceHalAidl::setParameters(const String8& kvPairs) {
if (!mModule) return NO_INIT;
- ALOGE("%s not implemented yet", __func__);
+ AudioParameter parameters(kvPairs);
+ ALOGD("%s: parameters: \"%s\"", __func__, parameters.toString().c_str());
+
+ if (status_t status = filterAndUpdateBtA2dpParameters(parameters); status != OK) {
+ ALOGW("%s: filtering or updating BT A2DP parameters failed: %d", __func__, status);
+ }
+ if (status_t status = filterAndUpdateBtHfpParameters(parameters); status != OK) {
+ ALOGW("%s: filtering or updating BT HFP parameters failed: %d", __func__, status);
+ }
+ if (status_t status = filterAndUpdateBtLeParameters(parameters); status != OK) {
+ ALOGW("%s: filtering or updating BT LE parameters failed: %d", __func__, status);
+ }
+ if (status_t status = filterAndUpdateBtScoParameters(parameters); status != OK) {
+ ALOGW("%s: filtering or updating BT SCO parameters failed: %d", __func__, status);
+ }
+
+ ALOGW_IF(parameters.size() != 0, "%s: unknown parameters, ignored: \"%s\"",
+ __func__, parameters.toString().c_str());
return OK;
}
status_t DeviceHalAidl::getParameters(const String8& keys __unused, String8 *values) {
TIME_CHECK();
+ // FIXME(b/278976019): Support keyReconfigA2dpSupported via vendor plugin
values->clear();
if (!mModule) return NO_INIT;
ALOGE("%s not implemented yet", __func__);
@@ -1088,6 +1112,150 @@
return OK;
}
+status_t DeviceHalAidl::filterAndUpdateBtA2dpParameters(AudioParameter ¶meters) {
+ TIME_CHECK();
+ std::optional<bool> a2dpEnabled;
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtA2dpSuspended),
+ [&a2dpEnabled](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ a2dpEnabled = false; // 'suspended' == true
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ a2dpEnabled = true; // 'suspended' == false
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtA2dpSuspended, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
+ // FIXME(b/278976019): Support keyReconfigA2dp via vendor plugin
+ if (mBluetoothA2dp != nullptr && a2dpEnabled.has_value()) {
+ return statusTFromBinderStatus(mBluetoothA2dp->setEnabled(a2dpEnabled.value()));
+ }
+ return OK;
+}
+
+status_t DeviceHalAidl::filterAndUpdateBtHfpParameters(AudioParameter ¶meters) {
+ TIME_CHECK();
+ IBluetooth::HfpConfig hfpConfig;
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtHfpEnable),
+ [&hfpConfig](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ hfpConfig.isEnabled = Boolean{ .value = true };
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ hfpConfig.isEnabled = Boolean{ .value = false };
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtHfpEnable, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
+ parameters, String8(AudioParameter::keyBtHfpSamplingRate),
+ [&hfpConfig](int sampleRate) {
+ return sampleRate > 0 ?
+ hfpConfig.sampleRate = Int{ .value = sampleRate }, OK : BAD_VALUE;
+ }));
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
+ parameters, String8(AudioParameter::keyBtHfpVolume),
+ [&hfpConfig](int volume0to15) {
+ if (volume0to15 >= 0 && volume0to15 <= 15) {
+ hfpConfig.volume = Float{ .value = volume0to15 / 15.0f };
+ return OK;
+ }
+ return BAD_VALUE;
+ }));
+ if (mBluetooth != nullptr && hfpConfig != IBluetooth::HfpConfig{}) {
+ IBluetooth::HfpConfig newHfpConfig;
+ return statusTFromBinderStatus(mBluetooth->setHfpConfig(hfpConfig, &newHfpConfig));
+ }
+ return OK;
+}
+
+status_t DeviceHalAidl::filterAndUpdateBtLeParameters(AudioParameter ¶meters) {
+ TIME_CHECK();
+ std::optional<bool> leEnabled;
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtLeSuspended),
+ [&leEnabled](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ leEnabled = false; // 'suspended' == true
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ leEnabled = true; // 'suspended' == false
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtLeSuspended, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
+ if (mBluetoothLe != nullptr && leEnabled.has_value()) {
+ return statusTFromBinderStatus(mBluetoothLe->setEnabled(leEnabled.value()));
+ }
+ return OK;
+}
+
+status_t DeviceHalAidl::filterAndUpdateBtScoParameters(AudioParameter ¶meters) {
+ TIME_CHECK();
+ IBluetooth::ScoConfig scoConfig;
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtSco),
+ [&scoConfig](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.isEnabled = Boolean{ .value = true };
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.isEnabled = Boolean{ .value = false };
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtSco, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtScoHeadsetName),
+ [&scoConfig](const String8& name) {
+ scoConfig.debugName = name;
+ return OK;
+ }));
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtNrec),
+ [&scoConfig](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.isNrecEnabled = Boolean{ .value = true };
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.isNrecEnabled = Boolean{ .value = false };
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtNrec, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
+ (void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
+ parameters, String8(AudioParameter::keyBtScoWb),
+ [&scoConfig](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO_WB;
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO;
+ return OK;
+ }
+ ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtScoWb, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
+ if (mBluetooth != nullptr && scoConfig != IBluetooth::ScoConfig{}) {
+ IBluetooth::ScoConfig newScoConfig;
+ return statusTFromBinderStatus(mBluetooth->setScoConfig(scoConfig, &newScoConfig));
+ }
+ return OK;
+}
+
status_t DeviceHalAidl::findOrCreatePatch(
const AudioPatch& requestedPatch, AudioPatch* patch, bool* created) {
std::set<int32_t> sourcePortConfigIds(requestedPatch.sourcePortConfigIds.begin(),
diff --git a/media/libaudiohal/impl/DeviceHalAidl.h b/media/libaudiohal/impl/DeviceHalAidl.h
index 5c9950b..37d800b 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.h
+++ b/media/libaudiohal/impl/DeviceHalAidl.h
@@ -214,6 +214,10 @@
status_t createOrUpdatePortConfig(
const ::aidl::android::media::audio::common::AudioPortConfig& requestedPortConfig,
PortConfigs::iterator* result, bool *created);
+ status_t filterAndUpdateBtA2dpParameters(AudioParameter ¶meters);
+ status_t filterAndUpdateBtHfpParameters(AudioParameter ¶meters);
+ status_t filterAndUpdateBtLeParameters(AudioParameter ¶meters);
+ status_t filterAndUpdateBtScoParameters(AudioParameter ¶meters);
status_t findOrCreatePatch(
const std::set<int32_t>& sourcePortConfigIds,
const std::set<int32_t>& sinkPortConfigIds,
@@ -288,6 +292,9 @@
const std::string mInstance;
const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mModule;
const std::shared_ptr<::aidl::android::hardware::audio::core::ITelephony> mTelephony;
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IBluetooth> mBluetooth;
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IBluetoothA2dp> mBluetoothA2dp;
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IBluetoothLe> mBluetoothLe;
std::shared_ptr<::aidl::android::hardware::audio::core::sounddose::ISoundDose>
mSoundDose = nullptr;
Ports mPorts;
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 0dcb8ee..7b9088e 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -40,6 +40,8 @@
using ::aidl::android::hardware::audio::effect::Descriptor;
using ::aidl::android::hardware::audio::effect::IFactory;
using ::aidl::android::hardware::audio::effect::Processing;
+using ::aidl::android::media::audio::common::AudioSource;
+using ::aidl::android::media::audio::common::AudioStreamType;
using ::aidl::android::media::audio::common::AudioUuid;
using ::android::base::unexpected;
using ::android::detail::AudioHalVersionInfo;
@@ -96,7 +98,13 @@
return list;
}()),
mEffectCount(mNonProxyDescList.size() + mProxyDescList.size()),
- mEffectProcessings(nullptr /* TODO: add AIDL implementation */) {
+ mAidlProcessings([this]() -> std::vector<Processing> {
+ std::vector<Processing> processings;
+ if (!mFactory || !mFactory->queryProcessing(std::nullopt, &processings).isOk()) {
+ ALOGE("%s queryProcessing failed", __func__);
+ }
+ return processings;
+ }()) {
ALOG_ASSERT(mFactory != nullptr, "Provided IEffectsFactory service is NULL");
ALOGI("%s with %zu nonProxyEffects and %zu proxyEffects", __func__, mNonProxyDescList.size(),
mProxyDescList.size());
@@ -274,15 +282,79 @@
}
std::shared_ptr<const effectsConfig::Processings> EffectsFactoryHalAidl::getProcessings() const {
- return mEffectProcessings;
+
+ auto getConfigEffectWithDescriptor =
+ [](const auto& desc) -> std::shared_ptr<const effectsConfig::Effect> {
+ effectsConfig::Effect effect = {.name = desc.common.name, .isProxy = false};
+ if (const auto uuid =
+ ::aidl::android::aidl2legacy_AudioUuid_audio_uuid_t(desc.common.id.uuid);
+ uuid.ok()) {
+ static_cast<effectsConfig::EffectImpl>(effect).uuid = uuid.value();
+ return std::make_shared<const effectsConfig::Effect>(effect);
+ } else {
+ return nullptr;
+ }
+ };
+
+ auto getConfigProcessingWithAidlProcessing =
+ [&](const auto& aidlProcess, std::vector<effectsConfig::InputStream>& preprocess,
+ std::vector<effectsConfig::OutputStream>& postprocess) {
+ if (aidlProcess.type.getTag() == Processing::Type::streamType) {
+ AudioStreamType aidlType =
+ aidlProcess.type.template get<Processing::Type::streamType>();
+ const auto type =
+ ::aidl::android::aidl2legacy_AudioStreamType_audio_stream_type_t(
+ aidlType);
+ if (!type.ok()) {
+ return;
+ }
+
+ std::vector<std::shared_ptr<const effectsConfig::Effect>> effects;
+ std::transform(aidlProcess.ids.begin(), aidlProcess.ids.end(),
+ std::back_inserter(effects), getConfigEffectWithDescriptor);
+ effectsConfig::OutputStream stream = {.type = type.value(),
+ .effects = std::move(effects)};
+ postprocess.emplace_back(stream);
+ } else if (aidlProcess.type.getTag() == Processing::Type::source) {
+ AudioSource aidlType =
+ aidlProcess.type.template get<Processing::Type::source>();
+ const auto type =
+ ::aidl::android::aidl2legacy_AudioSource_audio_source_t(aidlType);
+ if (!type.ok()) {
+ return;
+ }
+
+ std::vector<std::shared_ptr<const effectsConfig::Effect>> effects;
+ std::transform(aidlProcess.ids.begin(), aidlProcess.ids.end(),
+ std::back_inserter(effects), getConfigEffectWithDescriptor);
+ effectsConfig::InputStream stream = {.type = type.value(),
+ .effects = std::move(effects)};
+ preprocess.emplace_back(stream);
+ }
+ };
+
+ static std::shared_ptr<const effectsConfig::Processings> processings(
+ [&]() -> std::shared_ptr<const effectsConfig::Processings> {
+ std::vector<effectsConfig::InputStream> preprocess;
+ std::vector<effectsConfig::OutputStream> postprocess;
+ for (const auto& processing : mAidlProcessings) {
+ getConfigProcessingWithAidlProcessing(processing, preprocess, postprocess);
+ }
+
+ if (0 == preprocess.size() && 0 == postprocess.size()) {
+ return nullptr;
+ }
+
+ return std::make_shared<const effectsConfig::Processings>(
+ effectsConfig::Processings({.preprocess = std::move(preprocess),
+ .postprocess = std::move(postprocess)}));
+ }());
+
+ return processings;
}
+// Return 0 for AIDL, as the AIDL interface is not aware of the configuration file.
::android::error::Result<size_t> EffectsFactoryHalAidl::getSkippedElements() const {
- if (!mEffectProcessings) {
- return ::android::base::unexpected(BAD_VALUE);
- }
-
- // Only return 0 for AIDL, because the AIDL interface doesn't aware of configuration file
return 0;
}
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.h b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
index 70a7012..39beea2 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
@@ -21,6 +21,7 @@
#include <mutex>
#include <aidl/android/hardware/audio/effect/IFactory.h>
+#include <aidl/android/hardware/audio/effect/Processing.h>
#include <android-base/thread_annotations.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <system/thread_defs.h>
@@ -82,7 +83,7 @@
// total number of effects including proxy effects
const size_t mEffectCount;
// Query result of pre and post processing from effect factory
- const std::shared_ptr<const effectsConfig::Processings> mEffectProcessings;
+ const std::vector<Processing> mAidlProcessings;
std::mutex mLock;
uint64_t mEffectIdCounter GUARDED_BY(mLock) = 0; // Align with HIDL (0 is INVALID_ID)
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
index eccdfe8..d1044dc 100644
--- a/media/libaudiohal/impl/StreamHalAidl.cpp
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -122,30 +122,6 @@
return OK;
}
-namespace {
-
-// 'action' must accept a value of type 'T' and return 'status_t'.
-// The function returns 'true' if the parameter was found, and the action has succeeded.
-// The function returns 'false' if the parameter was not found.
-// Any errors get propagated, if there are errors it means the parameter was found.
-template<typename T, typename F>
-error::Result<bool> filterOutAndProcessParameter(
- AudioParameter& parameters, const String8& key, const F& action) {
- if (parameters.containsKey(key)) {
- T value;
- status_t status = parameters.get(key, value);
- if (status == OK) {
- parameters.remove(key);
- status = action(value);
- if (status == OK) return true;
- }
- return base::unexpected(status);
- }
- return false;
-}
-
-} // namespace
-
status_t StreamHalAidl::setParameters(const String8& kvPairs) {
TIME_CHECK();
if (!mStream) return NO_INIT;
@@ -579,10 +555,10 @@
if (!mStream) return NO_INIT;
AudioParameter parameters(kvPairs);
- ALOGD("%s parameters: %s", __func__, parameters.toString().c_str());
+ ALOGD("%s: parameters: \"%s\"", __func__, parameters.toString().c_str());
if (status_t status = filterAndUpdateOffloadMetadata(parameters); status != OK) {
- ALOGW("%s filtering or updating offload metadata failed: %d", __func__, status);
+ ALOGW("%s: filtering or updating offload metadata failed: %d", __func__, status);
}
return StreamHalAidl::setParameters(parameters.toString());
diff --git a/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h b/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h
index 6e09463..2323ed6 100644
--- a/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h
+++ b/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h
@@ -30,6 +30,8 @@
minor = halMinor;
}
+ bool isHidl() const { return type == Type::HIDL; }
+
Type getType() const { return type; }
int getMajorVersion() const { return major; }
diff --git a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
index c076ccc..63f895f 100644
--- a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
+++ b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
@@ -15,6 +15,7 @@
*/
//#define LOG_NDEBUG 0
+#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -92,6 +93,47 @@
}
}
+TEST(libAudioHalTest, getProcessings) {
+ auto factory = EffectsFactoryHalInterface::create();
+ ASSERT_NE(nullptr, factory);
+
+ const auto &processings = factory->getProcessings();
+ if (processings) {
+ EXPECT_NE(0UL, processings->preprocess.size() + processings->postprocess.size() +
+ processings->deviceprocess.size());
+
+ auto processingChecker = [](const auto& processings) {
+ if (processings.size() != 0) {
+ // any process need at least 1 effect inside
+ std::for_each(processings.begin(), processings.end(), [](const auto& process) {
+ EXPECT_NE(0ul, process.effects.size());
+ // any effect should have a valid name string, and not proxy
+ for (const auto& effect : process.effects) {
+ SCOPED_TRACE("Effect: {" +
+ (effect == nullptr
+ ? "NULL}"
+ : ("{name: " + effect->name + ", isproxy: " +
+ (effect->isProxy ? "true" : "false") + ", sw: " +
+ (effect->libSw ? "non-null" : "null") + ", hw: " +
+ (effect->libHw ? "non-null" : "null") + "}")));
+ EXPECT_NE(nullptr, effect);
+ EXPECT_NE("", effect->name);
+ EXPECT_EQ(false, effect->isProxy);
+ EXPECT_EQ(nullptr, effect->libSw);
+ EXPECT_EQ(nullptr, effect->libHw);
+ }
+ });
+ }
+ };
+
+ processingChecker(processings->preprocess);
+ processingChecker(processings->postprocess);
+ processingChecker(processings->deviceprocess);
+ } else {
+ GTEST_SKIP() << "no processing found, skipping the test";
+ }
+}
+
TEST(libAudioHalTest, getHalVersion) {
auto factory = EffectsFactoryHalInterface::create();
ASSERT_NE(nullptr, factory);
diff --git a/media/libeffects/config/include/media/EffectsConfig.h b/media/libeffects/config/include/media/EffectsConfig.h
index a9730e5..09a060d 100644
--- a/media/libeffects/config/include/media/EffectsConfig.h
+++ b/media/libeffects/config/include/media/EffectsConfig.h
@@ -49,26 +49,27 @@
std::string name;
std::string path;
};
-using Libraries = std::vector<Library>;
+using Libraries = std::vector<std::shared_ptr<const Library>>;
struct EffectImpl {
- Library* library; //< Only valid as long as the associated library vector is unmodified
+ //< Only valid as long as the associated library vector is unmodified
+ std::shared_ptr<const Library> library;
effect_uuid_t uuid;
};
struct Effect : public EffectImpl {
std::string name;
bool isProxy;
- EffectImpl libSw; //< Only valid if isProxy
- EffectImpl libHw; //< Only valid if isProxy
+ std::shared_ptr<EffectImpl> libSw; //< Only valid if isProxy
+ std::shared_ptr<EffectImpl> libHw; //< Only valid if isProxy
};
-using Effects = std::vector<Effect>;
+using Effects = std::vector<std::shared_ptr<const Effect>>;
template <class Type>
struct Stream {
Type type;
- std::vector<std::reference_wrapper<Effect>> effects;
+ Effects effects;
};
using OutputStream = Stream<audio_stream_type_t>;
using InputStream = Stream<audio_source_t>;
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 3096659..2ff057e 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -19,6 +19,7 @@
#include <algorithm>
#include <cstdint>
#include <functional>
+#include <memory>
#include <string>
#include <unistd.h>
@@ -149,7 +150,10 @@
ALOGE("library must have a name and a path: %s", dump(xmlLibrary));
return false;
}
- libraries->push_back({name, path});
+
+ // need this temp variable because `struct Library` doesn't have a constructor
+ Library lib({.name = name, .path = path});
+ libraries->push_back(std::make_shared<const Library>(lib));
return true;
}
@@ -157,10 +161,10 @@
* @return nullptr if not found, the element address if found.
*/
template <class T>
-T* findByName(const char* name, std::vector<T>& collection) {
+T findByName(const char* name, std::vector<T>& collection) {
auto it = find_if(begin(collection), end(collection),
- [name] (auto& item) { return item.name == name; });
- return it != end(collection) ? &*it : nullptr;
+ [name](auto& item) { return item && item->name == name; });
+ return it != end(collection) ? *it : nullptr;
}
/** Parse an effect from an xml element describing it.
@@ -187,7 +191,7 @@
}
// Convert library name to a pointer to the previously loaded library
- auto* library = findByName(libraryName, libraries);
+ auto library = findByName(libraryName, libraries);
if (library == nullptr) {
ALOGE("Could not find library referenced in: %s", dump(xmlImpl));
return false;
@@ -211,20 +215,25 @@
effect.isProxy = true;
// Function to parse libhw and libsw
- auto parseProxy = [&xmlEffect, &parseImpl](const char* tag, EffectImpl& proxyLib) {
+ auto parseProxy = [&xmlEffect, &parseImpl](const char* tag,
+ const std::shared_ptr<EffectImpl>& proxyLib) {
auto* xmlProxyLib = xmlEffect.FirstChildElement(tag);
if (xmlProxyLib == nullptr) {
ALOGE("effectProxy must contain a <%s>: %s", tag, dump(xmlEffect));
return false;
}
- return parseImpl(*xmlProxyLib, proxyLib);
+ return parseImpl(*xmlProxyLib, *proxyLib);
};
+ effect.libSw = std::make_shared<EffectImpl>();
+ effect.libHw = std::make_shared<EffectImpl>();
if (!parseProxy("libhw", effect.libHw) || !parseProxy("libsw", effect.libSw)) {
+ effect.libSw.reset();
+ effect.libHw.reset();
return false;
}
}
- effects->push_back(std::move(effect));
+ effects->push_back(std::make_shared<const Effect>(effect));
return true;
}
@@ -250,12 +259,12 @@
ALOGE("<stream|device>/apply must have reference an effect: %s", dump(xmlApply));
return false;
}
- auto* effect = findByName(effectName, effects);
+ auto effect = findByName(effectName, effects);
if (effect == nullptr) {
ALOGE("Could not find effect referenced in: %s", dump(xmlApply));
return false;
}
- stream.effects.emplace_back(*effect);
+ stream.effects.emplace_back(effect);
}
streams->push_back(std::move(stream));
return true;
diff --git a/media/libeffects/factory/EffectsXmlConfigLoader.cpp b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
index 30a9007..9bff136 100644
--- a/media/libeffects/factory/EffectsXmlConfigLoader.cpp
+++ b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
@@ -64,7 +64,7 @@
std::string absolutePath;
if (!resolveLibrary(relativePath, &absolutePath)) {
- ALOGE("Could not find library in effect directories: %s", relativePath);
+ ALOGE("%s Could not find library in effect directories: %s", __func__, relativePath);
libEntry->path = strdup(relativePath);
return false;
}
@@ -75,20 +75,20 @@
std::unique_ptr<void, decltype(dlclose)*> libHandle(dlopen(path, RTLD_NOW),
dlclose);
if (libHandle == nullptr) {
- ALOGE("Could not dlopen library %s: %s", path, dlerror());
+ ALOGE("%s Could not dlopen library %s: %s", __func__, path, dlerror());
return false;
}
auto* description = static_cast<audio_effect_library_t*>(
dlsym(libHandle.get(), AUDIO_EFFECT_LIBRARY_INFO_SYM_AS_STR));
if (description == nullptr) {
- ALOGE("Invalid effect library, failed not find symbol '%s' in %s: %s",
+ ALOGE("%s Invalid effect library, failed not find symbol '%s' in %s: %s", __func__,
AUDIO_EFFECT_LIBRARY_INFO_SYM_AS_STR, path, dlerror());
return false;
}
if (description->tag != AUDIO_EFFECT_LIBRARY_TAG) {
- ALOGE("Bad tag %#08x in description structure, expected %#08x for library %s",
+ ALOGE("%s Bad tag %#08x in description structure, expected %#08x for library %s", __func__,
description->tag, AUDIO_EFFECT_LIBRARY_TAG, path);
return false;
}
@@ -96,8 +96,8 @@
uint32_t majorVersion = EFFECT_API_VERSION_MAJOR(description->version);
uint32_t expectedMajorVersion = EFFECT_API_VERSION_MAJOR(EFFECT_LIBRARY_API_VERSION_CURRENT);
if (majorVersion != expectedMajorVersion) {
- ALOGE("Unsupported major version %#08x, expected %#08x for library %s",
- majorVersion, expectedMajorVersion, path);
+ ALOGE("%s Unsupported major version %#08x, expected %#08x for library %s",
+ __func__, majorVersion, expectedMajorVersion, path);
return false;
}
@@ -155,14 +155,13 @@
{
size_t nbSkippedElement = 0;
for (auto& library : libs) {
-
// Construct a lib entry
auto libEntry = makeUniqueC<lib_entry_t>();
- libEntry->name = strdup(library.name.c_str());
+ libEntry->name = strdup(library->name.c_str());
libEntry->effects = nullptr;
pthread_mutex_init(&libEntry->lock, nullptr);
- if (!loadLibrary(library.path.c_str(), libEntry.get())) {
+ if (!loadLibrary(library->path.c_str(), libEntry.get())) {
// Register library load failure
listPush(std::move(libEntry), libFailedList);
++nbSkippedElement;
@@ -209,24 +208,24 @@
UniqueCPtr<effect_descriptor_t> effectDesc;
};
-LoadEffectResult loadEffect(const EffectImpl& effect, const std::string& name,
- list_elem_t* libList) {
+LoadEffectResult loadEffect(const std::shared_ptr<const EffectImpl>& effect,
+ const std::string& name, list_elem_t* libList) {
LoadEffectResult result;
// Find the effect library
- result.lib = findLibrary(effect.library->name.c_str(), libList);
+ result.lib = findLibrary(effect->library->name.c_str(), libList);
if (result.lib == nullptr) {
- ALOGE("Could not find library %s to load effect %s",
- effect.library->name.c_str(), name.c_str());
+ ALOGE("%s Could not find library %s to load effect %s",
+ __func__, effect->library->name.c_str(), name.c_str());
return result;
}
result.effectDesc = makeUniqueC<effect_descriptor_t>();
// Get the effect descriptor
- if (result.lib->desc->get_descriptor(&effect.uuid, result.effectDesc.get()) != 0) {
+ if (result.lib->desc->get_descriptor(&effect->uuid, result.effectDesc.get()) != 0) {
ALOGE("Error querying effect %s on lib %s",
- uuidToString(effect.uuid), result.lib->name);
+ uuidToString(effect->uuid), result.lib->name);
result.effectDesc.reset();
return result;
}
@@ -241,14 +240,15 @@
// Check effect is supported
uint32_t expectedMajorVersion = EFFECT_API_VERSION_MAJOR(EFFECT_CONTROL_API_VERSION);
if (EFFECT_API_VERSION_MAJOR(result.effectDesc->apiVersion) != expectedMajorVersion) {
- ALOGE("Bad API version %#08x for effect %s in lib %s, expected major %#08x",
+ ALOGE("%s Bad API version %#08x for effect %s in lib %s, expected major %#08x", __func__,
result.effectDesc->apiVersion, name.c_str(), result.lib->name, expectedMajorVersion);
return result;
}
lib_entry_t *_;
- if (findEffect(nullptr, &effect.uuid, &_, nullptr) == 0) {
- ALOGE("Effect %s uuid %s already exist", uuidToString(effect.uuid), name.c_str());
+ if (findEffect(nullptr, &effect->uuid, &_, nullptr) == 0) {
+ ALOGE("%s Effect %s uuid %s already exist", __func__, uuidToString(effect->uuid),
+ name.c_str());
return result;
}
@@ -261,8 +261,11 @@
size_t nbSkippedElement = 0;
for (auto& effect : effects) {
+ if (!effect) {
+ continue;
+ }
- auto effectLoadResult = loadEffect(effect, effect.name, libList);
+ auto effectLoadResult = loadEffect(effect, effect->name, libList);
if (!effectLoadResult.success) {
if (effectLoadResult.effectDesc != nullptr) {
listPush(std::move(effectLoadResult.effectDesc), skippedEffects);
@@ -271,9 +274,9 @@
continue;
}
- if (effect.isProxy) {
- auto swEffectLoadResult = loadEffect(effect.libSw, effect.name + " libsw", libList);
- auto hwEffectLoadResult = loadEffect(effect.libHw, effect.name + " libhw", libList);
+ if (effect->isProxy) {
+ auto swEffectLoadResult = loadEffect(effect->libSw, effect->name + " libsw", libList);
+ auto hwEffectLoadResult = loadEffect(effect->libHw, effect->name + " libhw", libList);
if (!swEffectLoadResult.success || !hwEffectLoadResult.success) {
// Push the main effect in the skipped list even if only a subeffect is invalid
// as the main effect is not usable without its subeffects.
@@ -287,7 +290,7 @@
// get_descriptor call, we replace it with the corresponding
// sw effect descriptor, but keep the Proxy UUID
*effectLoadResult.effectDesc = *swEffectLoadResult.effectDesc;
- effectLoadResult.effectDesc->uuid = effect.uuid;
+ effectLoadResult.effectDesc->uuid = effect->uuid;
effectLoadResult.effectDesc->flags |= EFFECT_FLAG_OFFLOAD_SUPPORTED;
@@ -326,8 +329,8 @@
loadEffects(result.parsedConfig->effects, gLibraryList,
&gSkippedEffects, &gSubEffectList);
- ALOGE_IF(result.nbSkippedElement != 0, "%zu errors during loading of configuration: %s",
- result.nbSkippedElement,
+ ALOGE_IF(result.nbSkippedElement != 0, "%s %zu errors during loading of configuration: %s",
+ __func__, result.nbSkippedElement,
result.configPath.empty() ? "No config file found" : result.configPath.c_str());
return result.nbSkippedElement;
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index c66861b..649f813 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -49,8 +49,9 @@
"liblog",
],
header_libs: [
- "libmedia_helper_headers",
"libaudio_system_headers",
+ "libhardware_headers",
+ "libmedia_helper_headers",
],
export_header_lib_headers: [
"libmedia_helper_headers",
diff --git a/media/libmediahelper/AudioParameter.cpp b/media/libmediahelper/AudioParameter.cpp
index e25f9b7..a61a1bc 100644
--- a/media/libmediahelper/AudioParameter.cpp
+++ b/media/libmediahelper/AudioParameter.cpp
@@ -20,6 +20,7 @@
#include <utils/Log.h>
#include <media/AudioParameter.h>
+#include <hardware/audio.h>
#include <system/audio.h>
namespace android {
@@ -34,7 +35,13 @@
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
const char * const AudioParameter::keyClosing = AUDIO_PARAMETER_KEY_CLOSING;
const char * const AudioParameter::keyExiting = AUDIO_PARAMETER_KEY_EXITING;
+const char * const AudioParameter::keyBtSco = AUDIO_PARAMETER_KEY_BT_SCO;
+const char * const AudioParameter::keyBtScoHeadsetName = AUDIO_PARAMETER_KEY_BT_SCO_HEADSET_NAME;
const char * const AudioParameter::keyBtNrec = AUDIO_PARAMETER_KEY_BT_NREC;
+const char * const AudioParameter::keyBtScoWb = AUDIO_PARAMETER_KEY_BT_SCO_WB;
+const char * const AudioParameter::keyBtHfpEnable = AUDIO_PARAMETER_KEY_HFP_ENABLE;
+const char * const AudioParameter::keyBtHfpSamplingRate = AUDIO_PARAMETER_KEY_HFP_SET_SAMPLING_RATE;
+const char * const AudioParameter::keyBtHfpVolume = AUDIO_PARAMETER_KEY_HFP_VOLUME;
const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
const char * const AudioParameter::keyPresentationId = AUDIO_PARAMETER_STREAM_PRESENTATION_ID;
const char * const AudioParameter::keyProgramId = AUDIO_PARAMETER_STREAM_PROGRAM_ID;
@@ -52,9 +59,13 @@
AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES;
const char * const AudioParameter::valueOn = AUDIO_PARAMETER_VALUE_ON;
const char * const AudioParameter::valueOff = AUDIO_PARAMETER_VALUE_OFF;
+const char * const AudioParameter::valueTrue = AUDIO_PARAMETER_VALUE_TRUE;
+const char * const AudioParameter::valueFalse = AUDIO_PARAMETER_VALUE_FALSE;
const char * const AudioParameter::valueListSeparator = AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+const char * const AudioParameter::keyBtA2dpSuspended = AUDIO_PARAMETER_KEY_BT_A2DP_SUSPENDED;
const char * const AudioParameter::keyReconfigA2dp = AUDIO_PARAMETER_RECONFIG_A2DP;
const char * const AudioParameter::keyReconfigA2dpSupported = AUDIO_PARAMETER_A2DP_RECONFIG_SUPPORTED;
+const char * const AudioParameter::keyBtLeSuspended = AUDIO_PARAMETER_KEY_BT_LE_SUSPENDED;
// const char * const AudioParameter::keyDeviceSupportedEncapsulationModes =
// AUDIO_PARAMETER_DEVICE_SUP_ENCAPSULATION_MODES;
// const char * const AudioParameter::keyDeviceSupportedEncapsulationMetadataTypes =
diff --git a/media/libmediahelper/include/media/AudioParameter.h b/media/libmediahelper/include/media/AudioParameter.h
index 6c34a4f..70f8af3 100644
--- a/media/libmediahelper/include/media/AudioParameter.h
+++ b/media/libmediahelper/include/media/AudioParameter.h
@@ -55,11 +55,22 @@
static const char * const keyClosing;
static const char * const keyExiting;
+ // keyBtSco: Whether BT SCO is 'on' or 'off'
+ // keyBtScoHeadsetName: BT SCO headset name (for debugging)
// keyBtNrec: BT SCO Noise Reduction + Echo Cancellation parameters
+ // keyBtScoWb: BT SCO NR wideband mode
+ // keyHfp...: Parameters of the Hands-Free Profile
+ static const char * const keyBtSco;
+ static const char * const keyBtScoHeadsetName;
+ static const char * const keyBtNrec;
+ static const char * const keyBtScoWb;
+ static const char * const keyBtHfpEnable;
+ static const char * const keyBtHfpSamplingRate;
+ static const char * const keyBtHfpVolume;
+
// keyHwAvSync: get HW synchronization source identifier from a device
// keyMonoOutput: Enable mono audio playback
// keyStreamHwAvSync: set HW synchronization source identifier on a stream
- static const char * const keyBtNrec;
static const char * const keyHwAvSync;
static const char * const keyMonoOutput;
static const char * const keyStreamHwAvSync;
@@ -90,13 +101,19 @@
static const char * const valueOn;
static const char * const valueOff;
+ static const char * const valueTrue;
+ static const char * const valueFalse;
static const char * const valueListSeparator;
+ // keyBtA2dpSuspended: 'true' or 'false'
// keyReconfigA2dp: Ask HwModule to reconfigure A2DP offloaded codec
// keyReconfigA2dpSupported: Query if HwModule supports A2DP offload codec config
+ // keyBtLeSuspended: 'true' or 'false'
+ static const char * const keyBtA2dpSuspended;
static const char * const keyReconfigA2dp;
static const char * const keyReconfigA2dpSupported;
+ static const char * const keyBtLeSuspended;
// For querying device supported encapsulation capabilities. All returned values are integer,
// which are bit fields composed from using encapsulation capability values as position bits.
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index a3c2f1a..2240223 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -86,6 +86,11 @@
if (item != NULL) item->setRate(attr, count, duration);
}
+void mediametrics_setString(mediametrics_handle_t handle, attr_t attr,
+ const std::string &string) {
+ mediametrics_setCString(handle, attr, string.c_str());
+}
+
void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
const char *value) {
Item *item = (Item *) handle;
@@ -152,6 +157,14 @@
return item->getRate(attr, count, duration, rate);
}
+bool mediametrics_getString(mediametrics_handle_t handle, attr_t attr,
+ std::string *string) {
+ Item *item = (Item *) handle;
+ if (item == NULL) return false;
+
+ return item->getString(attr, string);
+}
+
// NB: caller owns the string that comes back, is responsible for freeing it
bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
char **value) {
diff --git a/media/libmediametrics/include/media/MediaMetrics.h b/media/libmediametrics/include/media/MediaMetrics.h
index 76abe86..58612a3 100644
--- a/media/libmediametrics/include/media/MediaMetrics.h
+++ b/media/libmediametrics/include/media/MediaMetrics.h
@@ -50,7 +50,7 @@
void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
int64_t count, int64_t duration);
void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
- const char * value);
+ const char * value);
// fused get/add/set; if attr wasn't there, it's a simple set.
// these do not provide atomicity or mutual exclusion, only simpler code sequences.
@@ -95,4 +95,11 @@
__END_DECLS
+#ifdef __cplusplus
+#include <string>
+void mediametrics_setString(mediametrics_handle_t handle, attr_t attr,
+ const std::string &value);
+bool mediametrics_getString(mediametrics_handle_t handle, attr_t attr, std::string *value);
+#endif // __cplusplus
+
#endif
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index de56665..03834d4 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -1048,6 +1048,9 @@
}
return true;
}
+ bool getString(const char *key, std::string *value) const {
+ return get(key, value);
+ }
// Caller owns the returned string
bool getCString(const char *key, char **value) const {
std::string s;
@@ -1057,9 +1060,6 @@
}
return false;
}
- bool getString(const char *key, std::string *value) const {
- return get(key, value);
- }
const Prop::Elem* get(const char *key) const {
const Prop *prop = findProp(key);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 569a25f..f1534c9 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -270,6 +270,7 @@
"SurfaceUtils.cpp",
"ThrottledSource.cpp",
"Utils.cpp",
+ "VideoRenderQualityTracker.cpp",
"VideoFrameSchedulerBase.cpp",
"VideoFrameScheduler.cpp",
],
diff --git a/media/libstagefright/FrameCaptureLayer.cpp b/media/libstagefright/FrameCaptureLayer.cpp
index d2cfd41..4e71943 100644
--- a/media/libstagefright/FrameCaptureLayer.cpp
+++ b/media/libstagefright/FrameCaptureLayer.cpp
@@ -64,14 +64,6 @@
return updatedDataspace;
}
-bool isHdrY410(const BufferItem &bi) {
- ui::Dataspace dataspace = translateDataspace(static_cast<ui::Dataspace>(bi.mDataSpace));
- // pixel format is HDR Y410 masquerading as RGBA_1010102
- return ((dataspace == ui::Dataspace::BT2020_ITU_PQ ||
- dataspace == ui::Dataspace::BT2020_ITU_HLG) &&
- bi.mGraphicBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
-}
-
struct FrameCaptureLayer::BufferLayer : public FrameCaptureProcessor::Layer {
BufferLayer(const BufferItem &bi) : mBufferItem(bi) {}
void getLayerSettings(
@@ -95,7 +87,6 @@
layerSettings->source.buffer.fence = mBufferItem.mFence;
layerSettings->source.buffer.textureName = textureName;
layerSettings->source.buffer.usePremultipliedAlpha = false;
- layerSettings->source.buffer.isY410BT2020 = isHdrY410(mBufferItem);
bool hasSmpte2086 = mBufferItem.mHdrMetadata.validTypes & HdrMetadata::SMPTE2086;
bool hasCta861_3 = mBufferItem.mHdrMetadata.validTypes & HdrMetadata::CTA861_3;
layerSettings->source.buffer.maxMasteringLuminance = hasSmpte2086
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 080c3d0..c02573e 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -22,7 +22,6 @@
#include <set>
#include <random>
#include <stdlib.h>
-
#include <inttypes.h>
#include <stdlib.h>
#include <dlfcn.h>
@@ -30,7 +29,6 @@
#include <C2Buffer.h>
#include "include/SoftwareRenderer.h"
-#include "PlaybackDurationAccumulator.h"
#include <android/binder_manager.h>
#include <android/content/pm/IPackageManagerNative.h>
@@ -109,7 +107,9 @@
static const char *kCodecModeImage = "image";
static const char *kCodecModeUnknown = "unknown";
static const char *kCodecEncoder = "android.media.mediacodec.encoder"; /* 0,1 */
+static const char *kCodecHardware = "android.media.mediacodec.hardware"; /* 0,1 */
static const char *kCodecSecure = "android.media.mediacodec.secure"; /* 0, 1 */
+static const char *kCodecTunneled = "android.media.mediacodec.tunneled"; /* 0,1 */
static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
@@ -173,9 +173,9 @@
static const char *kCodecParsedColorStandard = "android.media.mediacodec.parsed-color-standard";
static const char *kCodecParsedColorRange = "android.media.mediacodec.parsed-color-range";
static const char *kCodecParsedColorTransfer = "android.media.mediacodec.parsed-color-transfer";
-static const char *kCodecHDRStaticInfo = "android.media.mediacodec.hdr-static-info";
-static const char *kCodecHDR10PlusInfo = "android.media.mediacodec.hdr10-plus-info";
-static const char *kCodecHDRFormat = "android.media.mediacodec.hdr-format";
+static const char *kCodecHdrStaticInfo = "android.media.mediacodec.hdr-static-info";
+static const char *kCodecHdr10PlusInfo = "android.media.mediacodec.hdr10-plus-info";
+static const char *kCodecHdrFormat = "android.media.mediacodec.hdr-format";
// array/sync/async/block modes
static const char *kCodecArrayMode = "android.media.mediacodec.array-mode";
static const char *kCodecOperationMode = "android.media.mediacodec.operation-mode";
@@ -196,13 +196,44 @@
static const char *kCodecRecentLatencyAvg = "android.media.mediacodec.recent.avg"; /* in us */
static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist"; /* in us */
-static const char *kCodecPlaybackDurationSec =
- "android.media.mediacodec.playback-duration-sec"; /* in sec */
/* -1: shaper disabled
>=0: number of fields changed */
static const char *kCodecShapingEnhanced = "android.media.mediacodec.shaped";
+// Render metrics
+static const char *kCodecPlaybackDurationSec = "android.media.mediacodec.playback-duration-sec";
+static const char *kCodecFirstRenderTimeUs = "android.media.mediacodec.first-render-time-us";
+static const char *kCodecFramesReleased = "android.media.mediacodec.frames-released";
+static const char *kCodecFramesRendered = "android.media.mediacodec.frames-rendered";
+static const char *kCodecFramesDropped = "android.media.mediacodec.frames-dropped";
+static const char *kCodecFramesSkipped = "android.media.mediacodec.frames-skipped";
+static const char *kCodecFramerateContent = "android.media.mediacodec.framerate-content";
+static const char *kCodecFramerateDesired = "android.media.mediacodec.framerate-desired";
+static const char *kCodecFramerateActual = "android.media.mediacodec.framerate-actual";
+static const char *kCodecFreezeCount = "android.media.mediacodec.freeze-count";
+static const char *kCodecFreezeScore = "android.media.mediacodec.freeze-score";
+static const char *kCodecFreezeRate = "android.media.mediacodec.freeze-rate";
+static const char *kCodecFreezeDurationMsAvg = "android.media.mediacodec.freeze-duration-ms-avg";
+static const char *kCodecFreezeDurationMsMax = "android.media.mediacodec.freeze-duration-ms-max";
+static const char *kCodecFreezeDurationMsHistogram =
+ "android.media.mediacodec.freeze-duration-ms-histogram";
+static const char *kCodecFreezeDurationMsHistogramBuckets =
+ "android.media.mediacodec.freeze-duration-ms-histogram-buckets";
+static const char *kCodecFreezeDistanceMsAvg = "android.media.mediacodec.freeze-distance-ms-avg";
+static const char *kCodecFreezeDistanceMsHistogram =
+ "android.media.mediacodec.freeze-distance-ms-histogram";
+static const char *kCodecFreezeDistanceMsHistogramBuckets =
+ "android.media.mediacodec.freeze-distance-ms-histogram-buckets";
+static const char *kCodecJudderCount = "android.media.mediacodec.judder-count";
+static const char *kCodecJudderScore = "android.media.mediacodec.judder-score";
+static const char *kCodecJudderRate = "android.media.mediacodec.judder-rate";
+static const char *kCodecJudderScoreAvg = "android.media.mediacodec.judder-score-avg";
+static const char *kCodecJudderScoreMax = "android.media.mediacodec.judder-score-max";
+static const char *kCodecJudderScoreHistogram = "android.media.mediacodec.judder-score-histogram";
+static const char *kCodecJudderScoreHistogramBuckets =
+ "android.media.mediacodec.judder-score-histogram-buckets";
+
// XXX suppress until we get our representation right
static bool kEmitHistogram = false;
@@ -960,8 +991,7 @@
mHaveInputSurface(false),
mHavePendingInputBuffers(false),
mCpuBoostRequested(false),
- mPlaybackDurationAccumulator(new PlaybackDurationAccumulator()),
- mIsSurfaceToScreen(false),
+ mIsSurfaceToDisplay(false),
mLatencyUnknown(0),
mBytesEncoded(0),
mEarliestEncodedPtsUs(INT64_MAX),
@@ -1096,6 +1126,50 @@
mediametrics_setInt32(mMetricsHandle, kCodecResolutionChangeCount,
mReliabilityContextMetrics.resolutionChangeCount);
+ // Video rendering quality metrics
+ {
+ const VideoRenderQualityMetrics &m = mVideoRenderQualityTracker.getMetrics();
+ if (m.frameRenderedCount > 0) {
+ mediametrics_setInt64(mMetricsHandle, kCodecFirstRenderTimeUs, m.firstRenderTimeUs);
+ mediametrics_setInt64(mMetricsHandle, kCodecFramesReleased, m.frameReleasedCount);
+ mediametrics_setInt64(mMetricsHandle, kCodecFramesRendered, m.frameRenderedCount);
+ mediametrics_setInt64(mMetricsHandle, kCodecFramesSkipped, m.frameSkippedCount);
+ mediametrics_setInt64(mMetricsHandle, kCodecFramesDropped, m.frameDroppedCount);
+ mediametrics_setDouble(mMetricsHandle, kCodecFramerateContent, m.contentFrameRate);
+ mediametrics_setDouble(mMetricsHandle, kCodecFramerateDesired, m.desiredFrameRate);
+ mediametrics_setDouble(mMetricsHandle, kCodecFramerateActual, m.actualFrameRate);
+ }
+ if (m.freezeDurationMsHistogram.getCount() >= 1) {
+ const MediaHistogram<int32_t> &h = m.freezeDurationMsHistogram;
+ mediametrics_setInt64(mMetricsHandle, kCodecFreezeScore, m.freezeScore);
+ mediametrics_setDouble(mMetricsHandle, kCodecFreezeRate, m.freezeRate);
+ mediametrics_setInt64(mMetricsHandle, kCodecFreezeCount, h.getCount());
+ mediametrics_setInt32(mMetricsHandle, kCodecFreezeDurationMsAvg, h.getAvg());
+ mediametrics_setInt32(mMetricsHandle, kCodecFreezeDurationMsMax, h.getMax());
+ mediametrics_setString(mMetricsHandle, kCodecFreezeDurationMsHistogram, h.emit());
+ mediametrics_setString(mMetricsHandle, kCodecFreezeDurationMsHistogramBuckets,
+ h.emitBuckets());
+ }
+ if (m.freezeDistanceMsHistogram.getCount() >= 1) {
+ const MediaHistogram<int32_t> &h = m.freezeDistanceMsHistogram;
+ mediametrics_setInt32(mMetricsHandle, kCodecFreezeDistanceMsAvg, h.getAvg());
+ mediametrics_setString(mMetricsHandle, kCodecFreezeDistanceMsHistogram, h.emit());
+ mediametrics_setString(mMetricsHandle, kCodecFreezeDistanceMsHistogramBuckets,
+ h.emitBuckets());
+ }
+ if (m.judderScoreHistogram.getCount() >= 1) {
+ const MediaHistogram<int32_t> &h = m.judderScoreHistogram;
+ mediametrics_setInt64(mMetricsHandle, kCodecJudderScore, m.judderScore);
+ mediametrics_setDouble(mMetricsHandle, kCodecJudderRate, m.judderRate);
+ mediametrics_setInt64(mMetricsHandle, kCodecJudderCount, h.getCount());
+ mediametrics_setInt32(mMetricsHandle, kCodecJudderScoreAvg, h.getAvg());
+ mediametrics_setInt32(mMetricsHandle, kCodecJudderScoreMax, h.getMax());
+ mediametrics_setString(mMetricsHandle, kCodecJudderScoreHistogram, h.emit());
+ mediametrics_setString(mMetricsHandle, kCodecJudderScoreHistogramBuckets,
+ h.emitBuckets());
+ }
+ }
+
if (mLatencyHist.getCount() != 0 ) {
mediametrics_setInt64(mMetricsHandle, kCodecLatencyMax, mLatencyHist.getMax());
mediametrics_setInt64(mMetricsHandle, kCodecLatencyMin, mLatencyHist.getMin());
@@ -1111,7 +1185,7 @@
if (mLatencyUnknown > 0) {
mediametrics_setInt64(mMetricsHandle, kCodecLatencyUnknown, mLatencyUnknown);
}
- int64_t playbackDurationSec = mPlaybackDurationAccumulator->getDurationInSeconds();
+ int64_t playbackDurationSec = mPlaybackDurationAccumulator.getDurationInSeconds();
if (playbackDurationSec > 0) {
mediametrics_setInt64(mMetricsHandle, kCodecPlaybackDurationSec, playbackDurationSec);
}
@@ -1174,14 +1248,14 @@
&& ColorUtils::isHDRStaticInfoValid(&info)) {
mHdrInfoFlags |= kFlagHasHdrStaticInfo;
}
- mediametrics_setInt32(mMetricsHandle, kCodecHDRStaticInfo,
+ mediametrics_setInt32(mMetricsHandle, kCodecHdrStaticInfo,
(mHdrInfoFlags & kFlagHasHdrStaticInfo) ? 1 : 0);
sp<ABuffer> hdr10PlusInfo;
if (mOutputFormat->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
&& hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
mHdrInfoFlags |= kFlagHasHdr10PlusInfo;
}
- mediametrics_setInt32(mMetricsHandle, kCodecHDR10PlusInfo,
+ mediametrics_setInt32(mMetricsHandle, kCodecHdr10PlusInfo,
(mHdrInfoFlags & kFlagHasHdr10PlusInfo) ? 1 : 0);
// hdr format
@@ -1194,7 +1268,7 @@
&& codedFormat->findInt32(KEY_PROFILE, &profile)
&& colorTransfer != -1) {
hdr_format hdrFormat = getHdrFormat(mime, profile, colorTransfer);
- mediametrics_setInt32(mMetricsHandle, kCodecHDRFormat, static_cast<int>(hdrFormat));
+ mediametrics_setInt32(mMetricsHandle, kCodecHdrFormat, static_cast<int>(hdrFormat));
}
}
@@ -1302,16 +1376,15 @@
return;
}
- Histogram recentHist;
-
// build an empty histogram
+ MediaHistogram<int64_t> recentHist;
recentHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
// stuff it with the samples in the ring buffer
{
Mutex::Autolock al(mRecentLock);
- for (int i=0; i<kRecentLatencyFrames; i++) {
+ for (int i = 0; i < kRecentLatencyFrames; i++) {
if (mRecentSamples[i] != kRecentSampleInvalid) {
recentHist.insert(mRecentSamples[i]);
}
@@ -1319,7 +1392,7 @@
}
// spit the data (if any) into the supplied analytics record
- if (recentHist.getCount()!= 0 ) {
+ if (recentHist.getCount() != 0 ) {
mediametrics_setInt64(item, kCodecRecentLatencyMax, recentHist.getMax());
mediametrics_setInt64(item, kCodecRecentLatencyMin, recentHist.getMin());
mediametrics_setInt64(item, kCodecRecentLatencyAvg, recentHist.getAvg());
@@ -1436,116 +1509,34 @@
ALOGV("TunnelPeekState: %s -> %s", asString(previousState), asString(mTunnelPeekState));
}
-void MediaCodec::updatePlaybackDuration(const sp<AMessage> &msg) {
+void MediaCodec::processRenderedFrames(const sp<AMessage> &msg) {
int what = 0;
msg->findInt32("what", &what);
if (msg->what() != kWhatCodecNotify && what != kWhatOutputFramesRendered) {
static bool logged = false;
if (!logged) {
logged = true;
- ALOGE("updatePlaybackDuration: expected kWhatOuputFramesRendered (%d)", msg->what());
+ ALOGE("processRenderedFrames: expected kWhatOutputFramesRendered (%d)", msg->what());
}
return;
}
- // Playback duration only counts if the buffers are going to the screen.
- if (!mIsSurfaceToScreen) {
- return;
- }
- int64_t renderTimeNs;
- size_t index = 0;
- while (msg->findInt64(AStringPrintf("%zu-system-nano", index++).c_str(), &renderTimeNs)) {
- mPlaybackDurationAccumulator->processRenderTime(renderTimeNs);
- }
-}
-
-bool MediaCodec::Histogram::setup(int nbuckets, int64_t width, int64_t floor)
-{
- if (nbuckets <= 0 || width <= 0) {
- return false;
- }
-
- // get histogram buckets
- if (nbuckets == mBucketCount && mBuckets != NULL) {
- // reuse our existing buffer
- memset(mBuckets, 0, sizeof(*mBuckets) * mBucketCount);
- } else {
- // get a new pre-zeroed buffer
- int64_t *newbuckets = (int64_t *)calloc(nbuckets, sizeof (*mBuckets));
- if (newbuckets == NULL) {
- goto bad;
+ // Rendered frames only matter if they're being sent to the display
+ if (mIsSurfaceToDisplay) {
+ int64_t renderTimeNs;
+ for (size_t index = 0;
+ msg->findInt64(AStringPrintf("%zu-system-nano", index).c_str(), &renderTimeNs);
+ index++) {
+ // Capture metrics for playback duration
+ mPlaybackDurationAccumulator.onFrameRendered(renderTimeNs);
+ // Capture metrics for quality
+ int64_t mediaTimeUs = 0;
+ if (!msg->findInt64(AStringPrintf("%zu-media-time-us", index).c_str(), &mediaTimeUs)) {
+ ALOGE("processRenderedFrames: no media time found");
+ continue;
+ }
+ mVideoRenderQualityTracker.onFrameRendered(mediaTimeUs, renderTimeNs);
}
- if (mBuckets != NULL)
- free(mBuckets);
- mBuckets = newbuckets;
}
-
- mWidth = width;
- mFloor = floor;
- mCeiling = floor + nbuckets * width;
- mBucketCount = nbuckets;
-
- mMin = INT64_MAX;
- mMax = INT64_MIN;
- mSum = 0;
- mCount = 0;
- mBelow = mAbove = 0;
-
- return true;
-
- bad:
- if (mBuckets != NULL) {
- free(mBuckets);
- mBuckets = NULL;
- }
-
- return false;
-}
-
-void MediaCodec::Histogram::insert(int64_t sample)
-{
- // histogram is not set up
- if (mBuckets == NULL) {
- return;
- }
-
- mCount++;
- mSum += sample;
- if (mMin > sample) mMin = sample;
- if (mMax < sample) mMax = sample;
-
- if (sample < mFloor) {
- mBelow++;
- } else if (sample >= mCeiling) {
- mAbove++;
- } else {
- int64_t slot = (sample - mFloor) / mWidth;
- CHECK(slot < mBucketCount);
- mBuckets[slot]++;
- }
- return;
-}
-
-std::string MediaCodec::Histogram::emit()
-{
- std::string value;
- char buffer[64];
-
- // emits: width,Below{bucket0,bucket1,...., bucketN}above
- // unconfigured will emit: 0,0{}0
- // XXX: is this best representation?
- snprintf(buffer, sizeof(buffer), "%" PRId64 ",%" PRId64 ",%" PRId64 "{",
- mFloor, mWidth, mBelow);
- value = buffer;
- for (int i = 0; i < mBucketCount; i++) {
- if (i != 0) {
- value = value + ",";
- }
- snprintf(buffer, sizeof(buffer), "%" PRId64, mBuckets[i]);
- value = value + buffer;
- }
- snprintf(buffer, sizeof(buffer), "}%" PRId64 , mAbove);
- value = value + buffer;
- return value;
}
// when we send a buffer to the codec;
@@ -3622,8 +3613,7 @@
setState(UNINITIALIZED);
} else {
- setState(
- (mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
+ setState((mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
}
break;
}
@@ -3748,6 +3738,9 @@
mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
}
+ mediametrics_setInt32(mMetricsHandle, kCodecHardware,
+ MediaCodecList::isSoftwareCodec(mComponentName) ? 0 : 1);
+
mResourceManagerProxy->addResource(MediaResource::CodecResource(
mFlags & kFlagIsSecure, toMediaResourceSubType(mDomain)));
@@ -3964,7 +3957,7 @@
asString(previousState),
asString(TunnelPeekState::kBufferRendered));
}
- updatePlaybackDuration(msg);
+ processRenderedFrames(msg);
// check that we have a notification set
if (mOnFrameRenderedNotification != NULL) {
sp<AMessage> notify = mOnFrameRenderedNotification->dup();
@@ -4158,6 +4151,11 @@
mState, stateString(mState).c_str());
break;
}
+
+ if (mIsSurfaceToDisplay) {
+ mVideoRenderQualityTracker.resetForDiscontinuity();
+ }
+
// Notify the RM that the codec has been stopped.
ClientConfigParcel clientConfig;
initClientConfigParcel(clientConfig);
@@ -4213,6 +4211,10 @@
break;
}
+ if (mIsSurfaceToDisplay) {
+ mVideoRenderQualityTracker.resetForDiscontinuity();
+ }
+
if (mFlags & kFlagIsAsync) {
setState(FLUSHED);
} else {
@@ -4464,6 +4466,7 @@
} else {
mTunneled = false;
}
+ mediametrics_setInt32(mMetricsHandle, kCodecTunneled, mTunneled ? 1 : 0);
int32_t background = 0;
if (format->findInt32("android._background-mode", &background) && background) {
@@ -5927,7 +5930,9 @@
// If rendering to the screen, then schedule a time in the future to poll to see if this
// frame was ever rendered to seed onFrameRendered callbacks.
- if (mIsSurfaceToScreen) {
+ if (mIsSurfaceToDisplay) {
+ noRenderTime ? mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs)
+ : mVideoRenderQualityTracker.onFrameReleased(mediaTimeUs, renderTimeNs);
// can't initialize this in the constructor because the Looper parent class needs to be
// initialized first
if (mMsgPollForRenderedBuffers == nullptr) {
@@ -5957,6 +5962,11 @@
ALOGI("rendring output error %d", err);
}
} else {
+ if (mIsSurfaceToDisplay) {
+ int64_t mediaTimeUs = -1;
+ buffer->meta()->findInt64("timeUs", &mediaTimeUs);
+ mVideoRenderQualityTracker.onFrameSkipped(mediaTimeUs);
+ }
mBufferChannel->discardBuffer(buffer);
}
@@ -6023,7 +6033,7 @@
// in case we don't connect, ensure that we don't signal the surface is
// connected to the screen
- mIsSurfaceToScreen = false;
+ mIsSurfaceToDisplay = false;
err = nativeWindowConnect(surface.get(), "connectToSurface");
if (err == OK) {
@@ -6053,7 +6063,7 @@
// keep track whether or not the buffers of the connected surface go to the screen
int result = 0;
surface->query(NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &result);
- mIsSurfaceToScreen = result != 0;
+ mIsSurfaceToDisplay = result != 0;
}
}
// do not return ALREADY_EXISTS unless surfaces are the same
@@ -6071,7 +6081,7 @@
}
// assume disconnected even on error
mSurface.clear();
- mIsSurfaceToScreen = false;
+ mIsSurfaceToDisplay = false;
}
return err;
}
diff --git a/media/libstagefright/VideoRenderQualityTracker.cpp b/media/libstagefright/VideoRenderQualityTracker.cpp
new file mode 100644
index 0000000..1072cdd
--- /dev/null
+++ b/media/libstagefright/VideoRenderQualityTracker.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VideoRenderQualityTracker"
+#include <utils/Log.h>
+
+#include <media/stagefright/VideoRenderQualityTracker.h>
+
+#include <assert.h>
+#include <cmath>
+#include <sys/time.h>
+
+namespace android {
+
+static constexpr float FRAME_RATE_UNDETERMINED = VideoRenderQualityMetrics::FRAME_RATE_UNDETERMINED;
+static constexpr float FRAME_RATE_24_3_2_PULLDOWN =
+ VideoRenderQualityMetrics::FRAME_RATE_24_3_2_PULLDOWN;
+
+VideoRenderQualityMetrics::VideoRenderQualityMetrics() {
+ clear();
+}
+
+void VideoRenderQualityMetrics::clear() {
+ firstRenderTimeUs = 0;
+ frameReleasedCount = 0;
+ frameRenderedCount = 0;
+ frameDroppedCount = 0;
+ frameSkippedCount = 0;
+ contentFrameRate = FRAME_RATE_UNDETERMINED;
+ desiredFrameRate = FRAME_RATE_UNDETERMINED;
+ actualFrameRate = FRAME_RATE_UNDETERMINED;
+ freezeDurationMsHistogram.clear();
+ freezeDistanceMsHistogram.clear();
+ judderScoreHistogram.clear();
+}
+
+VideoRenderQualityTracker::Configuration::Configuration() {
+ enabled = true;
+
+ // Assume that the app is skipping frames because it's detected that the frame couldn't be
+ // rendered in time.
+ areSkippedFramesDropped = true;
+
+ // 400ms is 8 frames at 20 frames per second and 24 frames at 60 frames per second
+ maxExpectedContentFrameDurationUs = 400 * 1000;
+
+ // Allow for 2 milliseconds of deviation when detecting frame rates
+ frameRateDetectionToleranceUs = 2 * 1000;
+
+ // Allow for a tolerance of 200 milliseconds for determining if we moved forward in content time
+ // because of frame drops for live content, or because the user is seeking.
+ contentTimeAdvancedForLiveContentToleranceUs = 200 * 1000;
+
+ // Freeze configuration
+ freezeDurationMsHistogramBuckets = {1, 20, 40, 60, 80, 100, 120, 150, 175, 225, 300, 400, 500};
+ freezeDurationMsHistogramToScore = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+ freezeDistanceMsHistogramBuckets = {0, 20, 100, 400, 1000, 2000, 3000, 4000, 8000, 15000, 30000,
+ 60000};
+
+ // Judder configuration
+ judderErrorToleranceUs = 2000;
+ judderScoreHistogramBuckets = {1, 4, 5, 9, 11, 20, 30, 40, 50, 60, 70, 80};
+ judderScoreHistogramToScore = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+}
+
+VideoRenderQualityTracker::VideoRenderQualityTracker() : mConfiguration(Configuration()) {
+ configureHistograms(mMetrics, mConfiguration);
+ clear();
+}
+
+VideoRenderQualityTracker::VideoRenderQualityTracker(const Configuration &configuration) :
+ mConfiguration(configuration) {
+ configureHistograms(mMetrics, mConfiguration);
+ clear();
+}
+
+void VideoRenderQualityTracker::onFrameSkipped(int64_t contentTimeUs) {
+ if (!mConfiguration.enabled) {
+ return;
+ }
+
+ // Frames skipped at the beginning shouldn't really be counted as skipped frames, since the
+ // app might be seeking to a starting point that isn't the first key frame.
+ if (mLastRenderTimeUs == -1) {
+ return;
+ }
+ // Frames skipped at the end of playback shouldn't be counted as skipped frames, since the
+ // app could be terminating the playback. The pending count will be added to the metrics if and
+ // when the next frame is rendered.
+ mPendingSkippedFrameContentTimeUsList.push_back(contentTimeUs);
+}
+
+void VideoRenderQualityTracker::onFrameReleased(int64_t contentTimeUs) {
+ onFrameReleased(contentTimeUs, nowUs() * 1000);
+}
+
+void VideoRenderQualityTracker::onFrameReleased(int64_t contentTimeUs,
+ int64_t desiredRenderTimeNs) {
+ if (!mConfiguration.enabled) {
+ return;
+ }
+
+ int64_t desiredRenderTimeUs = desiredRenderTimeNs / 1000;
+ resetIfDiscontinuity(contentTimeUs, desiredRenderTimeUs);
+ mMetrics.frameReleasedCount++;
+ mNextExpectedRenderedFrameQueue.push({contentTimeUs, desiredRenderTimeUs});
+ mLastContentTimeUs = contentTimeUs;
+}
+
+void VideoRenderQualityTracker::onFrameRendered(int64_t contentTimeUs, int64_t actualRenderTimeNs) {
+ if (!mConfiguration.enabled) {
+ return;
+ }
+
+ int64_t actualRenderTimeUs = actualRenderTimeNs / 1000;
+
+ if (mLastRenderTimeUs != -1) {
+ mRenderDurationMs += (actualRenderTimeUs - mLastRenderTimeUs) / 1000;
+ }
+ // Now that a frame has been rendered, the previously skipped frames can be processed as skipped
+ // frames since the app is not skipping them to terminate playback.
+ for (int64_t contentTimeUs : mPendingSkippedFrameContentTimeUsList) {
+ processMetricsForSkippedFrame(contentTimeUs);
+ }
+ mPendingSkippedFrameContentTimeUsList = {};
+
+ static const FrameInfo noFrame = {-1, -1};
+ FrameInfo nextExpectedFrame = noFrame;
+ while (!mNextExpectedRenderedFrameQueue.empty()) {
+ nextExpectedFrame = mNextExpectedRenderedFrameQueue.front();
+ mNextExpectedRenderedFrameQueue.pop();
+ // Happy path - the rendered frame is what we expected it to be
+ if (contentTimeUs == nextExpectedFrame.contentTimeUs) {
+ break;
+ }
+ // This isn't really supposed to happen - the next rendered frame should be the expected
+ // frame, or, if there's frame drops, it will be a frame later in the content stream
+ if (contentTimeUs < nextExpectedFrame.contentTimeUs) {
+ ALOGW("Rendered frame is earlier than the next expected frame (%lld, %lld)",
+ (long long) contentTimeUs, (long long) nextExpectedFrame.contentTimeUs);
+ break;
+ }
+ processMetricsForDroppedFrame(nextExpectedFrame.contentTimeUs,
+ nextExpectedFrame.desiredRenderTimeUs);
+ }
+ processMetricsForRenderedFrame(nextExpectedFrame.contentTimeUs,
+ nextExpectedFrame.desiredRenderTimeUs, actualRenderTimeUs);
+ mLastRenderTimeUs = actualRenderTimeUs;
+}
+
+const VideoRenderQualityMetrics &VideoRenderQualityTracker::getMetrics() {
+ if (!mConfiguration.enabled) {
+ return mMetrics;
+ }
+
+ mMetrics.freezeScore = 0;
+ if (mConfiguration.freezeDurationMsHistogramToScore.size() ==
+ mMetrics.freezeDurationMsHistogram.size()) {
+ for (int i = 0; i < mMetrics.freezeDurationMsHistogram.size(); ++i) {
+ int32_t count = 0;
+ for (int j = i; j < mMetrics.freezeDurationMsHistogram.size(); ++j) {
+ count += mMetrics.freezeDurationMsHistogram[j];
+ }
+ mMetrics.freezeScore += count / mConfiguration.freezeDurationMsHistogramToScore[i];
+ }
+ }
+ mMetrics.freezeRate = float(double(mMetrics.freezeDurationMsHistogram.getSum()) /
+ mRenderDurationMs);
+
+ mMetrics.judderScore = 0;
+ if (mConfiguration.judderScoreHistogramToScore.size() == mMetrics.judderScoreHistogram.size()) {
+ for (int i = 0; i < mMetrics.judderScoreHistogram.size(); ++i) {
+ int32_t count = 0;
+ for (int j = i; j < mMetrics.judderScoreHistogram.size(); ++j) {
+ count += mMetrics.judderScoreHistogram[j];
+ }
+ mMetrics.judderScore += count / mConfiguration.judderScoreHistogramToScore[i];
+ }
+ }
+ mMetrics.judderRate = float(double(mMetrics.judderScoreHistogram.getCount()) /
+ (mMetrics.frameReleasedCount + mMetrics.frameSkippedCount));
+
+ return mMetrics;
+}
+
+void VideoRenderQualityTracker::clear() {
+ mRenderDurationMs = 0;
+ mMetrics.clear();
+ resetForDiscontinuity();
+}
+
+void VideoRenderQualityTracker::resetForDiscontinuity() {
+ mLastContentTimeUs = -1;
+ mLastRenderTimeUs = -1;
+ mLastFreezeEndTimeUs = -1;
+
+ // Don't worry about tracking frame rendering times from now up until playback catches up to the
+ // discontinuity. While stuttering or freezing could be found in the next few frames, the impact
+ // to the user is is minimal, so better to just keep things simple and don't bother.
+ mNextExpectedRenderedFrameQueue = {};
+
+ // Ignore any frames that were skipped just prior to the discontinuity.
+ mPendingSkippedFrameContentTimeUsList = {};
+
+ // All frame durations can be now ignored since all bets are off now on what the render
+ // durations should be after the discontinuity.
+ for (int i = 0; i < FrameDurationUs::SIZE; ++i) {
+ mActualFrameDurationUs[i] = -1;
+ mDesiredFrameDurationUs[i] = -1;
+ mContentFrameDurationUs[i] = -1;
+ }
+}
+
+bool VideoRenderQualityTracker::resetIfDiscontinuity(int64_t contentTimeUs,
+ int64_t desiredRenderTimeUs) {
+ if (mLastContentTimeUs == -1) {
+ resetForDiscontinuity();
+ return true;
+ }
+ if (contentTimeUs < mLastContentTimeUs) {
+ ALOGI("Video playback jumped %d ms backwards in content time (%d -> %d)",
+ int((mLastContentTimeUs - contentTimeUs) / 1000), int(mLastContentTimeUs / 1000),
+ int(contentTimeUs / 1000));
+ resetForDiscontinuity();
+ return true;
+ }
+ if (contentTimeUs - mLastContentTimeUs > mConfiguration.maxExpectedContentFrameDurationUs) {
+ // The content frame duration could be long due to frame drops for live content. This can be
+ // detected by looking at the app's desired rendering duration. If the app's rendered frame
+ // duration is roughly the same as the content's frame duration, then it is assumed that
+ // the forward discontinuity is due to frame drops for live content. A false positive can
+ // occur if the time the user spends seeking is equal to the duration of the seek. This is
+ // very unlikely to occur in practice but CAN occur - the user starts seeking forward, gets
+ // distracted, and then returns to seeking forward.
+ int64_t contentFrameDurationUs = contentTimeUs - mLastContentTimeUs;
+ int64_t desiredFrameDurationUs = desiredRenderTimeUs - mLastRenderTimeUs;
+ bool skippedForwardDueToLiveContentFrameDrops =
+ abs(contentFrameDurationUs - desiredFrameDurationUs) <
+ mConfiguration.contentTimeAdvancedForLiveContentToleranceUs;
+ if (!skippedForwardDueToLiveContentFrameDrops) {
+ ALOGI("Video playback jumped %d ms forward in content time (%d -> %d) ",
+ int((contentTimeUs - mLastContentTimeUs) / 1000), int(mLastContentTimeUs / 1000),
+ int(contentTimeUs / 1000));
+ resetForDiscontinuity();
+ return true;
+ }
+ }
+ return false;
+}
+
+void VideoRenderQualityTracker::processMetricsForSkippedFrame(int64_t contentTimeUs) {
+ mMetrics.frameSkippedCount++;
+ if (mConfiguration.areSkippedFramesDropped) {
+ processMetricsForDroppedFrame(contentTimeUs, -1);
+ return;
+ }
+ updateFrameDurations(mContentFrameDurationUs, contentTimeUs);
+ updateFrameDurations(mDesiredFrameDurationUs, -1);
+ updateFrameDurations(mActualFrameDurationUs, -1);
+ updateFrameRate(mMetrics.contentFrameRate, mContentFrameDurationUs, mConfiguration);
+}
+
+void VideoRenderQualityTracker::processMetricsForDroppedFrame(int64_t contentTimeUs,
+ int64_t desiredRenderTimeUs) {
+ mMetrics.frameDroppedCount++;
+ updateFrameDurations(mContentFrameDurationUs, contentTimeUs);
+ updateFrameDurations(mDesiredFrameDurationUs, desiredRenderTimeUs);
+ updateFrameDurations(mActualFrameDurationUs, -1);
+ updateFrameRate(mMetrics.contentFrameRate, mContentFrameDurationUs, mConfiguration);
+ updateFrameRate(mMetrics.desiredFrameRate, mDesiredFrameDurationUs, mConfiguration);
+}
+
+void VideoRenderQualityTracker::processMetricsForRenderedFrame(int64_t contentTimeUs,
+ int64_t desiredRenderTimeUs,
+ int64_t actualRenderTimeUs) {
+ // Capture the timestamp at which the first frame was rendered
+ if (mMetrics.firstRenderTimeUs == 0) {
+ mMetrics.firstRenderTimeUs = actualRenderTimeUs;
+ }
+
+ mMetrics.frameRenderedCount++;
+
+ // The content time is -1 when it was rendered after a discontinuity (e.g. seek) was detected.
+ // So, even though a frame was rendered, it's impact on the user is insignificant, so don't do
+ // anything other than count it as a rendered frame.
+ if (contentTimeUs == -1) {
+ return;
+ }
+ updateFrameDurations(mContentFrameDurationUs, contentTimeUs);
+ updateFrameDurations(mDesiredFrameDurationUs, desiredRenderTimeUs);
+ updateFrameDurations(mActualFrameDurationUs, actualRenderTimeUs);
+ updateFrameRate(mMetrics.contentFrameRate, mContentFrameDurationUs, mConfiguration);
+ updateFrameRate(mMetrics.desiredFrameRate, mDesiredFrameDurationUs, mConfiguration);
+ updateFrameRate(mMetrics.actualFrameRate, mActualFrameDurationUs, mConfiguration);
+
+ // If the previous frame was dropped, there was a freeze if we've already rendered a frame
+ if (mActualFrameDurationUs[1] == -1 && mLastRenderTimeUs != -1) {
+ processFreeze(actualRenderTimeUs, mLastRenderTimeUs, mLastFreezeEndTimeUs, mMetrics);
+ mLastFreezeEndTimeUs = actualRenderTimeUs;
+ }
+
+ // Judder is computed on the prior video frame, not the current video frame
+ int64_t judderScore = computePreviousJudderScore(mActualFrameDurationUs,
+ mContentFrameDurationUs,
+ mConfiguration);
+ if (judderScore != 0) {
+ mMetrics.judderScoreHistogram.insert(judderScore);
+ }
+}
+
+void VideoRenderQualityTracker::processFreeze(int64_t actualRenderTimeUs, int64_t lastRenderTimeUs,
+ int64_t lastFreezeEndTimeUs,
+ VideoRenderQualityMetrics &m) {
+ int64_t freezeDurationMs = (actualRenderTimeUs - lastRenderTimeUs) / 1000;
+ m.freezeDurationMsHistogram.insert(freezeDurationMs);
+ if (lastFreezeEndTimeUs != -1) {
+ int64_t distanceSinceLastFreezeMs = (lastRenderTimeUs - lastFreezeEndTimeUs) / 1000;
+ m.freezeDistanceMsHistogram.insert(distanceSinceLastFreezeMs);
+ }
+}
+
+int64_t VideoRenderQualityTracker::computePreviousJudderScore(
+ const FrameDurationUs &actualFrameDurationUs,
+ const FrameDurationUs &contentFrameDurationUs,
+ const Configuration &c) {
+ // If the frame before or after was dropped, then don't generate a judder score, since any
+ // problems with frame drops are scored as a freeze instead.
+ if (actualFrameDurationUs[0] == -1 || actualFrameDurationUs[1] == -1 ||
+ actualFrameDurationUs[2] == -1) {
+ return 0;
+ }
+
+ // Don't score judder for when playback is paused or rebuffering (long frame duration), or if
+ // the player is intentionally playing each frame at a slow rate (e.g. half-rate). If the long
+ // frame duration was unintentional, it is assumed that this will be coupled with a later frame
+ // drop, and be scored as a freeze instead of judder.
+ if (actualFrameDurationUs[1] >= 2 * contentFrameDurationUs[1]) {
+ return 0;
+ }
+
+ // The judder score is based on the error of this frame
+ int64_t errorUs = actualFrameDurationUs[1] - contentFrameDurationUs[1];
+ // Don't score judder if the previous frame has high error, but this frame has low error
+ if (abs(errorUs) < c.judderErrorToleranceUs) {
+ return 0;
+ }
+
+ // Add a penalty if this frame has judder that amplifies the problem introduced by previous
+ // judder, instead of catching up for the previous judder (50, 16, 16, 50) vs (50, 16, 50, 16)
+ int64_t previousErrorUs = actualFrameDurationUs[2] - contentFrameDurationUs[2];
+ // Don't add the pentalty for errors from the previous frame if the previous frame has low error
+ if (abs(previousErrorUs) >= c.judderErrorToleranceUs) {
+ errorUs = abs(errorUs) + abs(errorUs + previousErrorUs);
+ }
+
+ // Avoid scoring judder for 3:2 pulldown or other minimally-small frame duration errors
+ if (abs(errorUs) < contentFrameDurationUs[1] / 4) {
+ return 0;
+ }
+
+ return abs(errorUs) / 1000; // error in millis to keep numbers small
+}
+
+void VideoRenderQualityTracker::configureHistograms(VideoRenderQualityMetrics &m,
+ const Configuration &c) {
+ m.freezeDurationMsHistogram.setup(c.freezeDurationMsHistogramBuckets);
+ m.freezeDistanceMsHistogram.setup(c.freezeDistanceMsHistogramBuckets);
+ m.judderScoreHistogram.setup(c.judderScoreHistogramBuckets);
+}
+
+int64_t VideoRenderQualityTracker::nowUs() {
+ struct timespec t;
+ t.tv_sec = t.tv_nsec = 0;
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ return (t.tv_sec * 1000000000LL + t.tv_nsec) / 1000LL;
+}
+
+void VideoRenderQualityTracker::updateFrameDurations(FrameDurationUs &durationUs,
+ int64_t newTimestampUs) {
+ for (int i = FrameDurationUs::SIZE - 1; i > 0; --i ) {
+ durationUs[i] = durationUs[i - 1];
+ }
+ if (newTimestampUs == -1) {
+ durationUs[0] = -1;
+ } else {
+ durationUs[0] = durationUs.priorTimestampUs == -1 ? -1 :
+ newTimestampUs - durationUs.priorTimestampUs;
+ durationUs.priorTimestampUs = newTimestampUs;
+ }
+}
+
+void VideoRenderQualityTracker::updateFrameRate(float &frameRate, const FrameDurationUs &durationUs,
+ const Configuration &c) {
+ float newFrameRate = detectFrameRate(durationUs, c);
+ if (newFrameRate != FRAME_RATE_UNDETERMINED) {
+ frameRate = newFrameRate;
+ }
+}
+
+float VideoRenderQualityTracker::detectFrameRate(const FrameDurationUs &durationUs,
+ const Configuration &c) {
+ // At least 3 frames are necessary to detect stable frame rates
+ assert(FrameDurationUs::SIZE >= 3);
+ if (durationUs[0] == -1 || durationUs[1] == -1 || durationUs[2] == -1) {
+ return FRAME_RATE_UNDETERMINED;
+ }
+ // Only determine frame rate if the render durations are stable across 3 frames
+ if (abs(durationUs[0] - durationUs[1]) > c.frameRateDetectionToleranceUs ||
+ abs(durationUs[0] - durationUs[2]) > c.frameRateDetectionToleranceUs) {
+ return is32pulldown(durationUs, c) ? FRAME_RATE_24_3_2_PULLDOWN : FRAME_RATE_UNDETERMINED;
+ }
+ return 1000.0 * 1000.0 / durationUs[0];
+}
+
+bool VideoRenderQualityTracker::is32pulldown(const FrameDurationUs &durationUs,
+ const Configuration &c) {
+ // At least 5 frames are necessary to detect stable 3:2 pulldown
+ assert(FrameDurationUs::SIZE >= 5);
+ if (durationUs[0] == -1 || durationUs[1] == -1 || durationUs[2] == -1 || durationUs[3] == -1 ||
+ durationUs[4] == -1) {
+ return false;
+ }
+ // 3:2 pulldown expects that every other frame has identical duration...
+ if (abs(durationUs[0] - durationUs[2]) > c.frameRateDetectionToleranceUs ||
+ abs(durationUs[1] - durationUs[3]) > c.frameRateDetectionToleranceUs ||
+ abs(durationUs[0] - durationUs[4]) > c.frameRateDetectionToleranceUs) {
+ return false;
+ }
+ // ... for either 2 vsysncs or 3 vsyncs
+ if ((abs(durationUs[0] - 33333) < c.frameRateDetectionToleranceUs &&
+ abs(durationUs[1] - 50000) < c.frameRateDetectionToleranceUs) ||
+ (abs(durationUs[0] - 50000) < c.frameRateDetectionToleranceUs &&
+ abs(durationUs[1] - 33333) < c.frameRateDetectionToleranceUs)) {
+ return true;
+ }
+ return false;
+}
+
+} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 3d4b6f8..52d7d3d 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -30,6 +30,9 @@
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/CodecErrorLog.h>
#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaHistogram.h>
+#include <media/stagefright/PlaybackDurationAccumulator.h>
+#include <media/stagefright/VideoRenderQualityTracker.h>
#include <utils/Vector.h>
class C2Buffer;
@@ -63,7 +66,6 @@
struct PersistentSurface;
class SoftwareRenderer;
class Surface;
-class PlaybackDurationAccumulator;
namespace hardware {
namespace cas {
namespace native {
@@ -459,7 +461,7 @@
void onGetMetrics(const sp<AMessage>& msg);
constexpr const char *asString(TunnelPeekState state, const char *default_string="?");
void updateTunnelPeek(const sp<AMessage> &msg);
- void updatePlaybackDuration(const sp<AMessage> &msg);
+ void processRenderedFrames(const sp<AMessage> &msg);
inline void initClientConfigParcel(ClientConfigParcel& clientConfig);
@@ -569,8 +571,9 @@
sp<CryptoAsync> mCryptoAsync;
sp<ALooper> mCryptoLooper;
- std::unique_ptr<PlaybackDurationAccumulator> mPlaybackDurationAccumulator;
- bool mIsSurfaceToScreen;
+ bool mIsSurfaceToDisplay;
+ PlaybackDurationAccumulator mPlaybackDurationAccumulator;
+ VideoRenderQualityTracker mVideoRenderQualityTracker;
MediaCodec(
const sp<ALooper> &looper, pid_t pid, uid_t uid,
@@ -712,31 +715,8 @@
int mRecentHead;
Mutex mRecentLock;
- class Histogram {
- public:
- Histogram() : mFloor(0), mWidth(0), mBelow(0), mAbove(0),
- mMin(INT64_MAX), mMax(INT64_MIN), mSum(0), mCount(0),
- mBucketCount(0), mBuckets(NULL) {};
- ~Histogram() { clear(); };
- void clear() { if (mBuckets != NULL) free(mBuckets); mBuckets = NULL; };
- bool setup(int nbuckets, int64_t width, int64_t floor = 0);
- void insert(int64_t sample);
- int64_t getMin() const { return mMin; }
- int64_t getMax() const { return mMax; }
- int64_t getCount() const { return mCount; }
- int64_t getSum() const { return mSum; }
- int64_t getAvg() const { return mSum / (mCount == 0 ? 1 : mCount); }
- std::string emit();
- private:
- int64_t mFloor, mCeiling, mWidth;
- int64_t mBelow, mAbove;
- int64_t mMin, mMax, mSum, mCount;
+ MediaHistogram<int64_t> mLatencyHist;
- int mBucketCount;
- int64_t *mBuckets;
- };
-
- Histogram mLatencyHist;
// An unique ID for the codec - Used by the metrics.
uint64_t mCodecId = 0;
diff --git a/media/libstagefright/include/media/stagefright/MediaHistogram.h b/media/libstagefright/include/media/stagefright/MediaHistogram.h
new file mode 100644
index 0000000..da8415a
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaHistogram.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HISTOGRAM_H_
+#define MEDIA_HISTOGRAM_H_
+
+#include <limits>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace android {
+
+template<typename T>
+class MediaHistogram {
+public:
+ MediaHistogram();
+ void clear();
+ bool setup(int bucketCount, T width, T floor = 0);
+ bool setup(const std::vector<T> &bucketLimits);
+ void insert(T sample);
+ size_t size();
+ int64_t operator[](int);
+ T getMin() const { return mMin; }
+ T getMax() const { return mMax; }
+ T getCount() const { return mCount; }
+ T getSum() const { return mSum; }
+ T getAvg() const { return mSum / (mCount == 0 ? 1 : mCount); }
+ T getPercentile(int) const;
+ std::string emit() const;
+ std::string emitBuckets() const;
+private:
+ MediaHistogram(const MediaHistogram &); // disallow
+
+ bool allocate(int bucketCount, bool withBucketLimits);
+
+ T mFloor, mCeiling, mWidth;
+ T mMin, mMax, mSum;
+ int64_t mBelow, mAbove, mCount;
+ std::vector<T> mBuckets;
+ std::vector<T> mBucketLimits;
+};
+
+template<typename T>
+MediaHistogram<T>::MediaHistogram() {
+ mWidth = mCeiling = mFloor = -1;
+ clear();
+}
+
+template<typename T>
+void MediaHistogram<T>::clear() {
+ for (int i = 0; i < mBuckets.size(); ++i) {
+ mBuckets[i] = 0;
+ }
+ mMin = std::numeric_limits<T>::max();
+ mMax = std::numeric_limits<T>::min();
+ mSum = 0;
+ mCount = 0;
+ mBelow = mAbove = 0;
+}
+
+template<typename T>
+bool MediaHistogram<T>::setup(int bucketCount, T width, T floor) {
+ if (bucketCount <= 0 || width <= 0) {
+ return false;
+ }
+ if (!allocate(bucketCount, false)) {
+ return false;
+ }
+ mWidth = width;
+ mFloor = floor;
+ mCeiling = floor + bucketCount * width;
+ clear();
+ return true;
+}
+
+template<typename T>
+bool MediaHistogram<T>::setup(const std::vector<T> &bucketLimits) {
+ if (bucketLimits.size() <= 1) {
+ return false;
+ }
+ int bucketCount = bucketLimits.size() - 1;
+ if (!allocate(bucketCount, true)) {
+ return false;
+ }
+
+ mWidth = -1;
+ mFloor = bucketLimits[0];
+ for (int i = 0; i < bucketCount; ++i) {
+ mBucketLimits[i] = bucketLimits[i + 1];
+ }
+ mCeiling = bucketLimits[bucketCount];
+ clear();
+ return true;
+}
+
+template<typename T>
+bool MediaHistogram<T>::allocate(int bucketCount, bool withBucketLimits) {
+ assert(bucketCount > 0);
+ if (bucketCount != mBuckets.size()) {
+ mBuckets = std::vector<T>(bucketCount, 0);
+ }
+ if (withBucketLimits && mBucketLimits.size() != bucketCount) {
+ mBucketLimits = std::vector<T>(bucketCount, 0);
+ }
+ return true;
+}
+
+template<typename T>
+void MediaHistogram<T>::insert(T sample) {
+ // histogram is not set up
+ if (mBuckets.size() == 0) {
+ return;
+ }
+
+ mCount++;
+ mSum += sample;
+ if (mMin > sample) mMin = sample;
+ if (mMax < sample) mMax = sample;
+
+ if (sample < mFloor) {
+ mBelow++;
+ } else if (sample >= mCeiling) {
+ mAbove++;
+ } else if (mWidth == -1) {
+ // A binary search might be more efficient for large number of buckets, but it is expected
+ // that there will never be a large amount of buckets, so keep the code simple.
+ for (int slot = 0; slot < mBucketLimits.size(); ++slot) {
+ if (sample < mBucketLimits[slot]) {
+ mBuckets[slot]++;
+ break;
+ }
+ }
+ } else {
+ int64_t slot = (sample - mFloor) / mWidth;
+ assert(slot < mBuckets.size());
+ mBuckets[slot]++;
+ }
+ return;
+}
+
+template<typename T>
+size_t MediaHistogram<T>::size() {
+ return mBuckets.size() + 1;
+}
+
+template<typename T>
+int64_t MediaHistogram<T>::operator[](int i) {
+ assert(i >= 0);
+ assert(i <= mBuckets.size());
+ if (i == mBuckets.size()) {
+ return mAbove;
+ }
+ return mBuckets[i];
+}
+
+template<typename T>
+std::string MediaHistogram<T>::emit() const {
+ // emits: floor,width,below{bucket0,bucket1,...., bucketN}above
+ // or.. emits: below{bucket0,bucket1,...., bucketN}above
+ // unconfigured will emit: 0{}0
+ // XXX: is this best representation?
+ std::stringstream ss("");
+ if (mWidth == -1) {
+ ss << mBelow << "{";
+ } else {
+ ss << mFloor << "," << mWidth << "," << mBelow << "{";
+ }
+ for (int i = 0; i < mBuckets.size(); i++) {
+ if (i != 0) {
+ ss << ",";
+ }
+ ss << mBuckets[i];
+ }
+ ss << "}" << mAbove;
+ return ss.str();
+}
+
+template<typename T>
+std::string MediaHistogram<T>::emitBuckets() const {
+ std::stringstream ss("");
+ if (mWidth == -1) {
+ ss << mFloor;
+ for (int i = 0; i < mBucketLimits.size(); ++i) {
+ ss << ',' << mBucketLimits[i];
+ }
+ ss << ',' << mCeiling;
+ } else {
+ ss << mFloor;
+ for (int i = 0; i < mBuckets.size(); ++i) {
+ ss << ',' << (mFloor + i * mWidth);
+ }
+ ss << ',' << mCeiling;
+ }
+ return ss.str();
+}
+
+} // android
+
+#endif // MEDIA_HISTOGRAM_H_
\ No newline at end of file
diff --git a/media/libstagefright/PlaybackDurationAccumulator.h b/media/libstagefright/include/media/stagefright/PlaybackDurationAccumulator.h
similarity index 95%
rename from media/libstagefright/PlaybackDurationAccumulator.h
rename to media/libstagefright/include/media/stagefright/PlaybackDurationAccumulator.h
index cb5f0c4..bdf1171 100644
--- a/media/libstagefright/PlaybackDurationAccumulator.h
+++ b/media/libstagefright/include/media/stagefright/PlaybackDurationAccumulator.h
@@ -33,7 +33,7 @@
}
// Process a render time expressed in nanoseconds.
- void processRenderTime(int64_t newRenderTimeNs) {
+ void onFrameRendered(int64_t newRenderTimeNs) {
// If we detect wrap-around or out of order frames, just ignore the duration for this
// and the next frame.
if (newRenderTimeNs < mPreviousRenderTimeNs) {
@@ -59,7 +59,7 @@
int64_t mPreviousRenderTimeNs;
};
-}
+} // android
-#endif
+#endif // PLAYBACK_DURATION_ACCUMULATOR_H_
diff --git a/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h b/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h
new file mode 100644
index 0000000..ec25a36
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/VideoRenderQualityTracker.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_RENDER_QUALITY_TRACKER_H_
+
+#define VIDEO_RENDER_QUALITY_TRACKER_H_
+
+#include <assert.h>
+#include <list>
+#include <queue>
+
+#include <media/stagefright/MediaHistogram.h>
+
+namespace android {
+
+// A variety of video rendering quality metrics.
+struct VideoRenderQualityMetrics {
+ static constexpr float FRAME_RATE_UNDETERMINED = -1.0f;
+ static constexpr float FRAME_RATE_24_3_2_PULLDOWN = -2.0f;
+
+ VideoRenderQualityMetrics();
+
+ void clear();
+
+ // The render time of the first video frame.
+ int64_t firstRenderTimeUs;
+
+ // The number of frames released to be rendered.
+ int64_t frameReleasedCount;
+
+ // The number of frames actually rendered.
+ int64_t frameRenderedCount;
+
+ // The number of frames dropped - frames that were released but never rendered.
+ int64_t frameDroppedCount;
+
+ // The number of frames that were intentionally dropped/skipped by the app.
+ int64_t frameSkippedCount;
+
+ // The frame rate as detected by looking at the position timestamp from the content stream.
+ float contentFrameRate;
+
+ // The frame rate as detected by looking at the desired render time passed in by the app.
+ float desiredFrameRate;
+
+ // The frame rate as detected by looking at the actual render time, as returned by the system
+ // post-render.
+ float actualFrameRate;
+
+ // A histogram of the durations of freezes due to dropped/skipped frames.
+ MediaHistogram<int32_t> freezeDurationMsHistogram;
+ // The computed overall freeze score using the above histogram and score conversion table.
+ int32_t freezeScore;
+ // The computed percentage of total playback duration that was frozen.
+ float freezeRate;
+
+ // A histogram of the durations between each freeze.
+ MediaHistogram<int32_t> freezeDistanceMsHistogram;
+
+ // A histogram of the judder scores.
+ MediaHistogram<int32_t> judderScoreHistogram;
+ // The computed overall judder score using the above histogram and score conversion table.
+ int32_t judderScore;
+ // The computed percentage of total frames that had judder.
+ float judderRate;
+};
+
+///////////////////////////////////////////////////////
+// This class analyzes various timestamps related to video rendering to compute a set of metrics
+// that attempt to capture the quality of the user experience during video playback.
+//
+// The following timestamps (in microseconds) are analyzed to compute these metrics:
+// * The content timestamp found in the content stream, indicating the position of each video
+// frame.
+// * The desired timestamp passed in by the app, indicating at what point in time in the future
+// the app would like the frame to be rendered.
+// * The actual timestamp passed in by the display subsystem, indicating the point in time at
+// which the frame was actually rendered.
+//
+// Core to the algorithms are deriving frame durations based on these timestamps and determining
+// the result of each video frame in the content stream:
+// * skipped: the app didn't want to render the frame
+// * dropped: the display subsystem could not render the frame in time
+// * rendered: the display subsystem rendered the frame
+//
+class VideoRenderQualityTracker {
+public:
+ // Configurable elements of the metrics algorithms
+ class Configuration {
+ public:
+ Configuration();
+
+ // Whether or not frame render quality is tracked.
+ bool enabled;
+
+ // Whether or not frames that are intentionally not rendered by the app should be considered
+ // as dropped.
+ bool areSkippedFramesDropped;
+
+ // How large of a jump forward in content time is allowed before it is considered a
+ // discontinuity (seek/playlist) and various internal states are reset.
+ int32_t maxExpectedContentFrameDurationUs;
+
+ // How much tolerance in frame duration when considering whether or not two frames have the
+ // same frame rate.
+ int32_t frameRateDetectionToleranceUs;
+
+ // A skip forward in content time could occur during frame drops of live content. Therefore
+ // the content frame duration and the app-desired frame duration are compared using this
+ // tolerance to determine whether the app is intentionally seeking forward or whether the
+ // skip forward in content time is due to frame drops. If the app-desired frame duration is
+ // short, but the content frame duration is large, it is assumed the app is intentionally
+ // seeking forward.
+ int32_t contentTimeAdvancedForLiveContentToleranceUs;
+
+ // Freeze configuration
+ //
+ // The values used to distribute freeze durations across a histogram.
+ std::vector<int32_t> freezeDurationMsHistogramBuckets;
+ // The values used to compare against freeze duration counts when determining an overall
+ // score.
+ std::vector<int64_t> freezeDurationMsHistogramToScore;
+ // The values used to distribute distances between freezes across a histogram.
+ std::vector<int32_t> freezeDistanceMsHistogramBuckets;
+
+ // Judder configuration
+ //
+ // A judder error lower than this value is not scored as judder.
+ int32_t judderErrorToleranceUs;
+ // The values used to distribute judder scores across a histogram.
+ std::vector<int32_t> judderScoreHistogramBuckets;
+ // The values used to compare against judder score histogram counts when determining an
+ // overall score.
+ std::vector<int32_t> judderScoreHistogramToScore;
+ };
+
+ VideoRenderQualityTracker();
+ VideoRenderQualityTracker(const Configuration &configuration);
+
+ // Called when the app has intentionally decided not to render this frame.
+ void onFrameSkipped(int64_t contentTimeUs);
+
+ // Called when the app has requested the frame to be rendered as soon as possible.
+ void onFrameReleased(int64_t contentTimeUs);
+
+ // Called when the app has requested the frame to be rendered at a specific point in time in the
+ // future.
+ void onFrameReleased(int64_t contentTimeUs, int64_t desiredRenderTimeNs);
+
+ // Called when the system has detected that the frame has actually been rendered to the display.
+ void onFrameRendered(int64_t contentTimeUs, int64_t actualRenderTimeNs);
+
+ // Retrieve the metrics.
+ const VideoRenderQualityMetrics &getMetrics();
+
+ // Called when a change in codec state will result in a content discontinuity - e.g. flush.
+ void resetForDiscontinuity();
+
+ // Clear out all metrics and tracking - e.g. codec reconfigured.
+ void clear();
+
+private:
+ // Tracking of frames that are pending to be rendered to the display.
+ struct FrameInfo {
+ int64_t contentTimeUs;
+ int64_t desiredRenderTimeUs;
+ };
+
+ // Historic tracking of frame durations
+ struct FrameDurationUs {
+ static const int SIZE = 5;
+
+ FrameDurationUs() {
+ for (int i = 0; i < SIZE; ++i) {
+ durationUs[i] = -1;
+ }
+ priorTimestampUs = -1;
+ }
+
+ int32_t &operator[](int index) {
+ assert(index < SIZE);
+ return durationUs[index];
+ }
+
+ const int32_t &operator[](int index) const {
+ assert(index < SIZE);
+ return durationUs[index];
+ }
+
+ // The duration of the past N frames.
+ int32_t durationUs[SIZE];
+
+ // The timestamp of the previous frame.
+ int64_t priorTimestampUs;
+ };
+
+ // Configure histograms for the metrics.
+ static void configureHistograms(VideoRenderQualityMetrics &m, const Configuration &c);
+
+ // The current time in microseconds.
+ static int64_t nowUs();
+
+ // A new frame has been processed, so update the frame durations based on the new frame
+ // timestamp.
+ static void updateFrameDurations(FrameDurationUs &durationUs, int64_t newTimestampUs);
+
+ // Update a frame rate if, and only if, one can be detected.
+ static void updateFrameRate(float &frameRate, const FrameDurationUs &durationUs,
+ const Configuration &c);
+
+ // Examine the past few frames to detect the frame rate based on each frame's render duration.
+ static float detectFrameRate(const FrameDurationUs &durationUs, const Configuration &c);
+
+ // Determine whether or not 3:2 pulldowng for displaying 24fps content on 60Hz displays is
+ // occurring.
+ static bool is32pulldown(const FrameDurationUs &durationUs, const Configuration &c);
+
+ // Process a frame freeze.
+ static void processFreeze(int64_t actualRenderTimeUs, int64_t lastRenderTimeUs,
+ int64_t lastFreezeEndTimeUs, VideoRenderQualityMetrics &m);
+
+ // Compute a judder score for the previously-rendered frame.
+ static int64_t computePreviousJudderScore(const FrameDurationUs &actualRenderDurationUs,
+ const FrameDurationUs &contentRenderDurationUs,
+ const Configuration &c);
+
+ // Check to see if a discontinuity has occurred by examining the content time and the
+ // app-desired render time. If so, reset some internal state.
+ bool resetIfDiscontinuity(int64_t contentTimeUs, int64_t desiredRenderTimeUs);
+
+ // Update the metrics because a skipped frame was detected.
+ void processMetricsForSkippedFrame(int64_t contentTimeUs);
+
+ // Update the metrics because a dropped frame was detected.
+ void processMetricsForDroppedFrame(int64_t contentTimeUs, int64_t desiredRenderTimeUs);
+
+ // Update the metrics because a rendered frame was detected.
+ void processMetricsForRenderedFrame(int64_t contentTimeUs, int64_t desiredRenderTimeUs,
+ int64_t actualRenderTimeUs);
+
+ // Configurable elements of the metrics algorithms.
+ const Configuration mConfiguration;
+
+ // Metrics are updated every time a frame event occurs - skipped, dropped, rendered.
+ VideoRenderQualityMetrics mMetrics;
+
+ // The most recently processed timestamp referring to the position in the content stream.
+ int64_t mLastContentTimeUs;
+
+ // The most recently processed timestamp referring to the wall clock time a frame was rendered.
+ int64_t mLastRenderTimeUs;
+
+ // The most recent timestamp of the first frame rendered after the freeze.
+ int64_t mLastFreezeEndTimeUs;
+
+ // The render duration of the playback.
+ int64_t mRenderDurationMs;
+
+ // Frames skipped at the end of playback shouldn't really be considered skipped, therefore keep
+ // a list of the frames, and process them as skipped frames the next time a frame is rendered.
+ std::list<int64_t> mPendingSkippedFrameContentTimeUsList;
+
+ // Since the system only signals when a frame is rendered, dropped frames are detected by
+ // checking to see if the next expected frame is rendered. If not, it is considered dropped.
+ std::queue<FrameInfo> mNextExpectedRenderedFrameQueue;
+
+ // Frame durations derived from timestamps encoded into the content stream. These are the
+ // durations that each frame is supposed to be rendered for.
+ FrameDurationUs mContentFrameDurationUs;
+
+ // Frame durations derived from timestamps passed in by the app, indicating the wall clock time
+ // at which the app would like to have the frame rendered.
+ FrameDurationUs mDesiredFrameDurationUs;
+
+ // Frame durations derived from timestamps captured by the display subsystem, indicating the
+ // wall clock atime at which the frame is actually rendered.
+ FrameDurationUs mActualFrameDurationUs;
+};
+
+} // namespace android
+
+#endif // VIDEO_RENDER_QUALITY_TRACKER_H_
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index e6b67ce..7f89605 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -55,3 +55,20 @@
"-Wall",
],
}
+
+cc_test {
+ name: "VideoRenderQualityTracker_test",
+ srcs: ["VideoRenderQualityTracker_test.cpp"],
+
+ // TODO(b/234833109): Figure out why shared_libs linkage causes stack corruption
+ static_libs: [
+ "libstagefright",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+}
diff --git a/media/libstagefright/tests/VideoRenderQualityTracker_test.cpp b/media/libstagefright/tests/VideoRenderQualityTracker_test.cpp
new file mode 100644
index 0000000..9f14663
--- /dev/null
+++ b/media/libstagefright/tests/VideoRenderQualityTracker_test.cpp
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "VideoRenderQualityTracker_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/VideoRenderQualityTracker.h>
+
+namespace android {
+
+using Metrics = VideoRenderQualityMetrics;
+using Configuration = VideoRenderQualityTracker::Configuration;
+
+static constexpr float FRAME_RATE_UNDETERMINED = VideoRenderQualityMetrics::FRAME_RATE_UNDETERMINED;
+static constexpr float FRAME_RATE_24_3_2_PULLDOWN =
+ VideoRenderQualityMetrics::FRAME_RATE_24_3_2_PULLDOWN;
+
+class Helper {
+public:
+ Helper(double contentFrameDurationMs, const Configuration &configuration) :
+ mVideoRenderQualityTracker(configuration) {
+ mContentFrameDurationUs = int64_t(contentFrameDurationMs * 1000);
+ mMediaTimeUs = 0;
+ mClockTimeNs = 0;
+ }
+
+ void changeContentFrameDuration(double contentFrameDurationMs) {
+ mContentFrameDurationUs = int64_t(contentFrameDurationMs * 1000);
+ }
+
+ template<typename T>
+ void render(std::initializer_list<T> renderDurationMsList) {
+ for (auto renderDurationMs : renderDurationMsList) {
+ mVideoRenderQualityTracker.onFrameReleased(mMediaTimeUs);
+ mVideoRenderQualityTracker.onFrameRendered(mMediaTimeUs, mClockTimeNs);
+ mMediaTimeUs += mContentFrameDurationUs;
+ mClockTimeNs += int64_t(renderDurationMs * 1000 * 1000);
+ }
+ }
+
+ void render(int numFrames, float durationMs = -1) {
+ int64_t durationUs = durationMs < 0 ? mContentFrameDurationUs : durationMs * 1000;
+ for (int i = 0; i < numFrames; ++i) {
+ mVideoRenderQualityTracker.onFrameReleased(mMediaTimeUs);
+ mVideoRenderQualityTracker.onFrameRendered(mMediaTimeUs, mClockTimeNs);
+ mMediaTimeUs += mContentFrameDurationUs;
+ mClockTimeNs += durationUs * 1000;
+ }
+ }
+
+ void skip(int numFrames) {
+ for (int i = 0; i < numFrames; ++i) {
+ mVideoRenderQualityTracker.onFrameSkipped(mMediaTimeUs);
+ mMediaTimeUs += mContentFrameDurationUs;
+ mClockTimeNs += mContentFrameDurationUs * 1000;
+ }
+ }
+
+ void drop(int numFrames) {
+ for (int i = 0; i < numFrames; ++i) {
+ mVideoRenderQualityTracker.onFrameReleased(mMediaTimeUs);
+ mMediaTimeUs += mContentFrameDurationUs;
+ mClockTimeNs += mContentFrameDurationUs * 1000;
+ }
+ }
+
+ const Metrics & getMetrics() {
+ return mVideoRenderQualityTracker.getMetrics();
+ }
+
+private:
+ VideoRenderQualityTracker mVideoRenderQualityTracker;
+ int64_t mContentFrameDurationUs;
+ int64_t mMediaTimeUs;
+ int64_t mClockTimeNs;
+};
+
+class VideoRenderQualityTrackerTest : public ::testing::Test {
+public:
+ VideoRenderQualityTrackerTest() {}
+};
+
+TEST_F(VideoRenderQualityTrackerTest, countsReleasedFrames) {
+ Configuration c;
+ Helper h(16.66, c);
+ h.drop(10);
+ h.render({16.66, 16.66, 16.66});
+ h.skip(10); // skipped frames aren't released so they are not counted
+ h.render({16.66, 16.66, 16.66, 16.66});
+ h.drop(10);
+ EXPECT_EQ(27, h.getMetrics().frameReleasedCount);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, countsSkippedFrames) {
+ Configuration c;
+ Helper h(16.66, c);
+ h.drop(10); // dropped frames are not counted
+ h.skip(10); // frames skipped before rendering a frame are not counted
+ h.render({16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.drop(10); // dropped frames are not counted
+ h.skip(10);
+ h.render({16.66, 16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.skip(10); // frames skipped at the end of playback are not counted
+ h.drop(10); // dropped frames are not counted
+ EXPECT_EQ(10, h.getMetrics().frameSkippedCount);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenSkippedFramesAreDropped_countsDroppedFrames) {
+ Configuration c;
+ c.areSkippedFramesDropped = true;
+ Helper h(16.66, c);
+ h.skip(10); // skipped frames at the beginning of playback are not counted
+ h.drop(10);
+ h.skip(10); // skipped frames at the beginning of playback after dropped frames are not counted
+ h.render({16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.drop(10);
+ h.skip(10);
+ h.render({16.66, 16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.drop(10); // dropped frames at the end of playback are not counted
+ h.skip(10); // skipped frames at the end of playback are not counted
+ EXPECT_EQ(30, h.getMetrics().frameDroppedCount);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenNotSkippedFramesAreDropped_countsDroppedFrames) {
+ Configuration c;
+ c.areSkippedFramesDropped = false;
+ Helper h(16.66, c);
+ h.skip(10); // skipped frames at the beginning of playback are not counted
+ h.drop(10);
+ h.skip(10); // skipped frames at the beginning of playback after dropped frames are not coutned
+ h.render({16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.drop(10);
+ h.skip(10); // skipped frames are not counted
+ h.render({16.66, 16.66, 16.66, 16.66}); // rendered frames are not counted
+ h.drop(10); // dropped frames at the end of playback are not counted
+ h.skip(10); // skipped frames at the end of playback are not counted
+ EXPECT_EQ(20, h.getMetrics().frameDroppedCount);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, countsRenderedFrames) {
+ Configuration c;
+ Helper h(16.66, c);
+ h.drop(10); // dropped frames are not counted
+ h.render({16.66, 16.66, 16.66});
+ h.skip(10); // skipped frames are not counted
+ h.render({16.66, 16.66, 16.66, 16.66});
+ h.drop(10); // dropped frames are not counted
+ EXPECT_EQ(7, h.getMetrics().frameRenderedCount);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, detectsFrameRate) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(16.66, c);
+ h.render({16.6, 16.7, 16.6, 16.7});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 60.0, 0.5);
+ EXPECT_NEAR(h.getMetrics().actualFrameRate, 60.0, 0.5);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenLowTolerance_doesntDetectFrameRate) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 0;
+ Helper h(16.66, c);
+ h.render({16.6, 16.7, 16.6, 16.7});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 60.0, 0.5);
+ EXPECT_EQ(h.getMetrics().actualFrameRate, FRAME_RATE_UNDETERMINED);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenFrameRateDestabilizes_detectsFrameRate) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(16.66, c);
+ h.render({16.6, 16.7, 16.6, 16.7});
+ h.render({30.0, 16.6, 30.0, 16.6});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 60.0, 0.5);
+ EXPECT_NEAR(h.getMetrics().actualFrameRate, 60.0, 0.5);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, detects32Pulldown) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(41.66, c);
+ h.render({49.9, 33.2, 50.0, 33.4, 50.1, 33.2});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 24.0, 0.5);
+ EXPECT_EQ(h.getMetrics().actualFrameRate, FRAME_RATE_24_3_2_PULLDOWN);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenBad32Pulldown_doesntDetect32Pulldown) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(41.66, c);
+ h.render({50.0, 33.33, 33.33, 50.00, 33.33, 50.00});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 24.0, 0.5);
+ EXPECT_EQ(h.getMetrics().actualFrameRate, FRAME_RATE_UNDETERMINED);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenFrameRateChanges_detectsMostRecentFrameRate) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(16.66, c);
+ h.render({16.6, 16.7, 16.6, 16.7});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 60.0, 0.5);
+ EXPECT_NEAR(h.getMetrics().actualFrameRate, 60.0, 0.5);
+ h.changeContentFrameDuration(41.66);
+ h.render({50.0, 33.33, 50.0, 33.33, 50.0, 33.33});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 24.0, 0.5);
+ EXPECT_EQ(h.getMetrics().actualFrameRate, FRAME_RATE_24_3_2_PULLDOWN);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenFrameRateIsUnstable_doesntDetectFrameRate) {
+ Configuration c;
+ c.frameRateDetectionToleranceUs = 2 * 1000; // 2 ms
+ Helper h(16.66, c);
+ h.render({16.66, 30.0, 16.66, 30.0, 16.66});
+ EXPECT_NEAR(h.getMetrics().contentFrameRate, 60.0, 0.5);
+ EXPECT_EQ(h.getMetrics().actualFrameRate, FRAME_RATE_UNDETERMINED);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, capturesFreezeDurationHistogram) {
+ Configuration c;
+ // +17 because freeze durations include the render time of the previous frame
+ c.freezeDurationMsHistogramBuckets = {2 * 17 + 17, 3 * 17 + 17, 6 * 17 + 17};
+ Helper h(17, c);
+ h.render(1);
+ h.drop(1); // below
+ h.render(1);
+ h.drop(3); // bucket 1
+ h.render(1);
+ h.drop(2); // bucket 0
+ h.render(1);
+ h.drop(4); // bucket 1
+ h.render(1);
+ h.drop(2); // bucket 0
+ h.render(1);
+ h.drop(5); // bucket 1
+ h.render(1);
+ h.drop(10); // above
+ h.render(1);
+ h.drop(15); // above
+ h.render(1);
+ EXPECT_EQ(h.getMetrics().freezeDurationMsHistogram.emit(), "1{2,3}2");
+ EXPECT_EQ(h.getMetrics().freezeDurationMsHistogram.getCount(), 8);
+ // the smallest frame drop was 1, +17 because it includes the previous frame render time
+ EXPECT_EQ(h.getMetrics().freezeDurationMsHistogram.getMin(), 1 * 17 + 17);
+ // the largest frame drop was 10, +17 because it includes the previous frame render time
+ EXPECT_EQ(h.getMetrics().freezeDurationMsHistogram.getMax(), 15 * 17 + 17);
+ // total frame drop count, multiplied by 17, plus 17 for each occurrence, divided by occurrences
+ EXPECT_EQ(h.getMetrics().freezeDurationMsHistogram.getAvg(), ((1 + 3 + 2 + 4 + 2 + 5 + 10 + 15)
+ * 17 + 8 * 17) / 8);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, capturesFreezeDistanceHistogram) {
+ Configuration c;
+ c.freezeDistanceMsHistogramBuckets = {1 * 17, 5 * 17, 6 * 17};
+ Helper h(17, c);
+ h.render(1);
+ h.drop(1);
+ h.render(5); // bucket 0
+ h.drop(3);
+ h.render(3); // bucket 0
+ h.drop(2);
+ h.render(9); // above
+ h.drop(5);
+ h.render(1); // below
+ h.drop(2);
+ h.render(6); // bucket 1
+ h.drop(4);
+ h.render(12); // above
+ h.drop(2);
+ h.render(1);
+ EXPECT_EQ(h.getMetrics().freezeDistanceMsHistogram.emit(), "1{2,1}2");
+ EXPECT_EQ(h.getMetrics().freezeDistanceMsHistogram.getCount(), 6);
+ // the smallest render between drops was 1, -17 because the last frame rendered also froze
+ EXPECT_EQ(h.getMetrics().freezeDistanceMsHistogram.getMin(), 1 * 17 - 17);
+ // the largest render between drops was 12, -17 because the last frame rendered also froze
+ EXPECT_EQ(h.getMetrics().freezeDistanceMsHistogram.getMax(), 12 * 17 - 17);
+ // total render count between, multiplied by 17, minus 17 for each occurrence, divided by
+ // occurrences
+ EXPECT_EQ(h.getMetrics().freezeDistanceMsHistogram.getAvg(), ((5 + 3 + 9 + 1 + 6 + 12) * 17 -
+ 6 * 17) / 6);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when60hz_hasNoJudder) {
+ Configuration c;
+ Helper h(16.66, c); // ~24Hz
+ h.render({16.66, 16.66, 16.66, 16.66, 16.66, 16.66, 16.66});
+ EXPECT_LE(h.getMetrics().judderScoreHistogram.getMax(), 0);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenSmallVariance60hz_hasNoJudder) {
+ Configuration c;
+ Helper h(16.66, c); // ~24Hz
+ h.render({14, 18, 14, 18, 14, 18, 14, 18});
+ EXPECT_LE(h.getMetrics().judderScoreHistogram.getMax(), 0);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenBadSmallVariance60Hz_hasJudder) {
+ Configuration c;
+ Helper h(16.66, c); // ~24Hz
+ h.render({14, 18, 14, /* no 18 between 14s */ 14, 18, 14, 18});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 1);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when30Hz_hasNoJudder) {
+ Configuration c;
+ Helper h(33.33, c);
+ h.render({33.33, 33.33, 33.33, 33.33, 33.33, 33.33});
+ EXPECT_LE(h.getMetrics().judderScoreHistogram.getMax(), 0);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenSmallVariance30Hz_hasNoJudder) {
+ Configuration c;
+ Helper h(33.33, c);
+ h.render({29.0, 35.0, 29.0, 35.0, 29.0, 35.0});
+ EXPECT_LE(h.getMetrics().judderScoreHistogram.getMax(), 0);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenBadSmallVariance30Hz_hasJudder) {
+ Configuration c;
+ Helper h(33.33, c);
+ h.render({29.0, 35.0, 29.0, /* no 35 between 29s */ 29.0, 35.0, 29.0, 35.0});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 1);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenBad30HzTo60Hz_hasJudder) {
+ Configuration c;
+ Helper h(33.33, c);
+ h.render({33.33, 33.33, 50.0, /* frame stayed 1 vsync too long */ 16.66, 33.33, 33.33});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 2); // note: 2 counts of judder
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when24HzTo60Hz_hasNoJudder) {
+ Configuration c;
+ Helper h(41.66, c);
+ h.render({50.0, 33.33, 50.0, 33.33, 50.0, 33.33});
+ EXPECT_LE(h.getMetrics().judderScoreHistogram.getMax(), 0);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when25HzTo60Hz_hasJudder) {
+ Configuration c;
+ Helper h(40, c);
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ EXPECT_GT(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when50HzTo60Hz_hasJudder) {
+ Configuration c;
+ Helper h(20, c);
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ EXPECT_GT(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, when30HzTo50Hz_hasJudder) {
+ Configuration c;
+ Helper h(33.33, c);
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ EXPECT_GT(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenSmallVariancePulldown24HzTo60Hz_hasNoJudder) {
+ Configuration c;
+ Helper h(41.66, c);
+ h.render({52.0, 31.33, 52.0, 31.33, 52.0, 31.33});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 0);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, whenBad24HzTo60Hz_hasJudder) {
+ Configuration c;
+ Helper h(41.66, c);
+ h.render({50.0, 33.33, 50.0, 33.33, /* no 50 between 33s */ 33.33, 50.0, 33.33});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 1);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, capturesJudderScoreHistogram) {
+ Configuration c;
+ c.judderErrorToleranceUs = 2000;
+ c.judderScoreHistogramBuckets = {1, 5, 8};
+ Helper h(16, c);
+ h.render({16, 16, 23, 16, 16, 10, 16, 4, 16, 20, 16, 16});
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.emit(), "0{1,2}1");
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getCount(), 4);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getMin(), 4);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getMax(), 12);
+ EXPECT_EQ(h.getMetrics().judderScoreHistogram.getAvg(), (7 + 6 + 12 + 4) / 4);
+}
+
+TEST_F(VideoRenderQualityTrackerTest, ranksJudderScoresInOrder) {
+ // Each rendering is ranked from best to worst from a user experience
+ Configuration c;
+ c.judderErrorToleranceUs = 2000;
+ c.judderScoreHistogramBuckets = {0, 1000};
+ int64_t previousScore = 0;
+
+ // 30fps poorly displayed at 60Hz
+ {
+ Helper h(33.33, c);
+ h.render({33.33, 33.33, 16.66, 50.0, 33.33, 33.33});
+ int64_t scoreBad30fpsTo60Hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(scoreBad30fpsTo60Hz, previousScore);
+ previousScore = scoreBad30fpsTo60Hz;
+ }
+
+ // 25fps displayed at 60hz
+ {
+ Helper h(40, c);
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ h.render({33.33, 33.33, 50.0});
+ int64_t score25fpsTo60hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(score25fpsTo60hz, previousScore);
+ previousScore = score25fpsTo60hz;
+ }
+
+ // 50fps displayed at 60hz
+ {
+ Helper h(20, c);
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ h.render({16.66, 16.66, 16.66, 33.33});
+ int64_t score50fpsTo60hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(score50fpsTo60hz, previousScore);
+ previousScore = score50fpsTo60hz;
+ }
+
+ // 24fps poorly displayed at 60Hz
+ {
+ Helper h(41.66, c);
+ h.render({50.0, 33.33, 50.0, 33.33, 33.33, 50.0, 33.33});
+ int64_t scoreBad24HzTo60Hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(scoreBad24HzTo60Hz, previousScore);
+ previousScore = scoreBad24HzTo60Hz;
+ }
+
+ // 30fps displayed at 50hz
+ {
+ Helper h(33.33, c);
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ h.render({40.0, 40.0, 40.0, 60.0});
+ int64_t score30fpsTo50hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(score30fpsTo50hz, previousScore);
+ previousScore = score30fpsTo50hz;
+ }
+
+ // 24fps displayed at 50Hz
+ {
+ Helper h(41.66, c);
+ h.render(40.0, 11);
+ h.render(60.0, 1);
+ h.render(40.0, 11);
+ h.render(60.0, 1);
+ h.render(40.0, 11);
+ int64_t score24HzTo50Hz = h.getMetrics().judderScoreHistogram.getMax();
+ EXPECT_GT(score24HzTo50Hz, previousScore);
+ previousScore = score24HzTo50Hz;
+ }
+}
+
+} // android
diff --git a/media/libstagefright/tests/mediacodec/Android.bp b/media/libstagefright/tests/mediacodec/Android.bp
index 9cdc6d4..23882ea 100644
--- a/media/libstagefright/tests/mediacodec/Android.bp
+++ b/media/libstagefright/tests/mediacodec/Android.bp
@@ -70,4 +70,4 @@
test_suites: [
"general-tests",
],
-}
+}
\ No newline at end of file
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ab197f8..325adfa 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -58,6 +58,7 @@
#include <audiomanager/IAudioManager.h>
#include "AudioFlinger.h"
+#include "EffectConfiguration.h"
#include "NBAIO_Tee.h"
#include "PropertyUtils.h"
@@ -372,7 +373,7 @@
BatteryNotifier::getInstance().noteResetAudio();
mDevicesFactoryHal = DevicesFactoryHalInterface::create();
- mEffectsFactoryHal = EffectsFactoryHalInterface::create();
+ mEffectsFactoryHal = audioflinger::EffectConfiguration::getEffectsFactoryHal();
mMediaLogNotifier->run("MediaLogNotifier");
std::vector<pid_t> halPids;
@@ -841,6 +842,8 @@
for (const auto& vibratorInfo : mAudioVibratorInfos) {
dprintf(fd, " - %s\n", vibratorInfo.toString().c_str());
}
+ dprintf(fd, "Bluetooth latency modes are %senabled\n",
+ mBluetoothLatencyModesEnabled ? "" : "not ");
}
void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args __unused)
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 2f61a01..4fb6138 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -23,6 +23,7 @@
#include <audio_utils/primitives.h>
#include "AudioFlinger.h"
+#include "EffectConfiguration.h"
#include <media/audiohal/EffectsFactoryHalInterface.h>
// ----------------------------------------------------------------------------
@@ -111,14 +112,16 @@
status_t AudioFlinger::DeviceEffectManager::checkEffectCompatibility(
const effect_descriptor_t *desc) {
- sp<EffectsFactoryHalInterface> effectsFactory = mAudioFlinger.getEffectsFactory();
+ const sp<EffectsFactoryHalInterface> effectsFactory =
+ audioflinger::EffectConfiguration::getEffectsFactoryHal();
if (effectsFactory == nullptr) {
return BAD_VALUE;
}
- static AudioHalVersionInfo sMinDeviceEffectHalVersion =
+ static const AudioHalVersionInfo sMinDeviceEffectHalVersion =
AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 6, 0);
- AudioHalVersionInfo halVersion = effectsFactory->getHalVersion();
+ static const AudioHalVersionInfo halVersion =
+ audioflinger::EffectConfiguration::getAudioHalVersionInfo();
// We can trust AIDL generated AudioHalVersionInfo comparison operator (based on std::tie) as
// long as the type, major and minor sequence doesn't change in the definition.
@@ -137,7 +140,8 @@
const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t deviceId,
sp<EffectHalInterface> *effect) {
status_t status = NO_INIT;
- sp<EffectsFactoryHalInterface> effectsFactory = mAudioFlinger.getEffectsFactory();
+ const sp<EffectsFactoryHalInterface> effectsFactory =
+ audioflinger::EffectConfiguration::getEffectsFactoryHal();
if (effectsFactory != 0) {
status = effectsFactory->createEffect(
pEffectUuid, sessionId, AUDIO_IO_HANDLE_NONE, deviceId, effect);
diff --git a/services/audioflinger/EffectConfiguration.h b/services/audioflinger/EffectConfiguration.h
new file mode 100644
index 0000000..2f07fa2
--- /dev/null
+++ b/services/audioflinger/EffectConfiguration.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+namespace android::audioflinger {
+
+/**
+ * Effect Configuration abstraction and helper class.
+ */
+class EffectConfiguration {
+public:
+ static bool isHidl() {
+ static const bool isHidl = getAudioHalVersionInfo().isHidl();
+ return isHidl;
+ }
+
+ static const sp<EffectsFactoryHalInterface>& getEffectsFactoryHal() {
+ static const auto effectsFactoryHal = EffectsFactoryHalInterface::create();
+ return effectsFactoryHal;
+ }
+
+ static const detail::AudioHalVersionInfo& getAudioHalVersionInfo() {
+ static const auto audioHalVersionInfo = getEffectsFactoryHal() ?
+ getEffectsFactoryHal()->getHalVersion() : detail::AudioHalVersionInfo{
+ detail::AudioHalVersionInfo::Type::HIDL, 0 /* major */, 0 /* minor */ };
+ return audioHalVersionInfo;
+ }
+};
+
+} // namespace android::audioflinger
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index f324408..77aa804 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -44,6 +44,7 @@
#include <mediautils/TimeCheck.h>
#include "AudioFlinger.h"
+#include "EffectConfiguration.h"
// ----------------------------------------------------------------------------
@@ -65,6 +66,7 @@
namespace android {
using aidl_utils::statusTFromBinderStatus;
+using audioflinger::EffectConfiguration;
using binder::Status;
namespace {
@@ -982,6 +984,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
+ EffectConfiguration::isHidl() && // only HIDL effects support channel conversion
mIsOutput &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
@@ -1012,7 +1015,8 @@
mSupportsFloat = true;
}
- if (status != NO_ERROR) {
+ // only HIDL effects support integer conversion.
+ if (status != NO_ERROR && EffectConfiguration::isHidl()) {
ALOGV("EFFECT_CMD_SET_CONFIG failed with float format, retry with int16_t.");
mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -3032,7 +3036,8 @@
const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t deviceId,
sp<EffectHalInterface> *effect) {
status_t status = NO_INIT;
- sp<EffectsFactoryHalInterface> effectsFactory = mAudioFlinger.getEffectsFactory();
+ const sp<EffectsFactoryHalInterface> effectsFactory =
+ EffectConfiguration::getEffectsFactoryHal();
if (effectsFactory != 0) {
status = effectsFactory->createEffect(pEffectUuid, sessionId, io(), deviceId, effect);
}
@@ -3375,8 +3380,18 @@
ALOGV("%s type %d device type %d address %s device ID %d patch.isSoftware() %d",
__func__, port->type, port->ext.device.type,
port->ext.device.address, port->id, patch.isSoftware());
- if (port->type != AUDIO_PORT_TYPE_DEVICE || port->ext.device.type != mDevice.mType
- || port->ext.device.address != mDevice.address()) {
+ if (port->type != AUDIO_PORT_TYPE_DEVICE || port->ext.device.type != mDevice.mType ||
+ port->ext.device.address != mDevice.address()) {
+ return NAME_NOT_FOUND;
+ }
+ if (((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) &&
+ (audio_port_config_has_input_direction(port))) {
+ ALOGI("%s don't create postprocessing effect on record port", __func__);
+ return NAME_NOT_FOUND;
+ }
+ if (((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) &&
+ (!audio_port_config_has_input_direction(port))) {
+ ALOGI("%s don't create preprocessing effect on playback port", __func__);
return NAME_NOT_FOUND;
}
status_t status = NAME_NOT_FOUND;
@@ -3407,20 +3422,12 @@
} else if (patch.isSoftware() || patch.thread().promote() != nullptr) {
sp <ThreadBase> thread;
if (audio_port_config_has_input_direction(port)) {
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGI("%s don't create postprocessing effect on record thread", __func__);
- return NAME_NOT_FOUND;
- }
if (patch.isSoftware()) {
thread = patch.mRecord.thread();
} else {
thread = patch.thread().promote();
}
} else {
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
- ALOGI("%s don't create preprocessing effect on playback thread", __func__);
- return NAME_NOT_FOUND;
- }
if (patch.isSoftware()) {
thread = patch.mPlayback.thread();
} else {
@@ -3436,6 +3443,7 @@
} else {
status = BAD_VALUE;
}
+
if (status == NO_ERROR || status == ALREADY_EXISTS) {
Status bs;
if (isEnabled()) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 8d0c648..293117f 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6320,6 +6320,12 @@
} else {
dprintf(fd, " No FastMixer\n");
}
+
+ dprintf(fd, "Bluetooth latency modes are %senabled\n",
+ mBluetoothLatencyModesEnabled ? "" : "not ");
+ dprintf(fd, "HAL does %ssupport Bluetooth latency modes\n", mOutput != nullptr &&
+ mOutput->audioHwDev->supportsBluetoothVariableLatency() ? "" : "not ");
+ dprintf(fd, "Supported latency modes: %s\n", toString(mSupportedLatencyModes).c_str());
}
uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -9985,6 +9991,9 @@
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
audio_io_handle_t io = mId;
+ AttributionSourceState adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ client.attributionSource);
+
if (isOutput()) {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = mSampleRate;
@@ -10000,7 +10009,7 @@
ret = AudioSystem::getOutputForAttr(&mAttr, &io,
mSessionId,
&stream,
- client.attributionSource,
+ adjAttributionSource,
&config,
flags,
&deviceId,
@@ -10019,7 +10028,7 @@
ret = AudioSystem::getInputForAttr(&mAttr, &io,
RECORD_RIID_INVALID,
mSessionId,
- client.attributionSource,
+ adjAttributionSource,
&config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ,
&deviceId,
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index dad663e..2eb0177 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -92,7 +92,7 @@
template <typename Key>
Element<Key> *Engine::getFromCollection(const Key &key) const
{
- const Collection<Key> collection = getCollection<Key>();
+ const Collection<Key> &collection = getCollection<Key>();
return collection.get(key);
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ab1a050..f093e68 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -4373,6 +4373,11 @@
ALOGE("%s the requested device is currently unavailable", __func__);
return BAD_VALUE;
}
+ if (!audio_is_usb_out_device(deviceDescriptor->type())) {
+ ALOGE("%s the requested device(type=%#x) is not usb device", __func__,
+ deviceDescriptor->type());
+ return BAD_VALUE;
+ }
for (const auto& hwModule : mHwModules) {
for (const auto& curProfile : hwModule->getOutputProfiles()) {
if (curProfile->supportsDevice(deviceDescriptor)) {
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index b111865..70a1785 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -924,8 +924,7 @@
for (auto& stream : processingChain) {
auto effectDescs = std::make_unique<EffectDescVector>();
for (auto& effect : stream.effects) {
- effectDescs->mEffects.add(
- new EffectDesc{effect.get().name.c_str(), effect.get().uuid});
+ effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
}
streams.add(stream.type, effectDescs.release());
}
@@ -935,8 +934,7 @@
for (auto& deviceProcess : processingChain) {
auto effectDescs = std::make_unique<EffectDescVector>();
for (auto& effect : deviceProcess.effects) {
- effectDescs->mEffects.add(
- new EffectDesc{effect.get().name.c_str(), effect.get().uuid});
+ effectDescs->mEffects.add(new EffectDesc{effect->name.c_str(), effect->uuid});
}
auto deviceEffects = std::make_unique<DeviceEffects>(
std::move(effectDescs), deviceProcess.type, deviceProcess.address);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 0d12060..2e7b3ff 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -818,8 +818,29 @@
Mutex::Autolock _l(mLock);
+ ALOGW_IF(client->silenced, "startInput on silenced input for port %d, uid %d. Unsilencing.",
+ portIdAidl,
+ client->attributionSource.uid);
+
+ if (client->active) {
+ ALOGE("Client should never be active before startInput. Uid %d port %d",
+ client->attributionSource.uid, portId);
+ finishRecording(client->attributionSource, client->attributes.source);
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+
+ // Force the possibly silenced client to be unsilenced since we just called
+ // startRecording (i.e. we have assumed it is unsilenced).
+ // At this point in time, the client is inactive, so no calls to appops are sent in
+ // setAppState_l.
+ // This ensures existing clients have the same behavior as new clients (starting unsilenced).
+ // TODO(b/282076713)
+ setAppState_l(client, APP_STATE_TOP);
+
client->active = true;
client->startTimeNs = systemTime();
+ // This call updates the silenced state, and since we are active, appropriately notifies appops
+ // if we silence the track.
updateUidStates_l();
status_t status;
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index bc3c199..9367949 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -1843,14 +1843,12 @@
void AudioPolicyService::SensorPrivacyPolicy::registerSelf() {
SensorPrivacyManager spm;
mSensorPrivacyEnabled = spm.isSensorPrivacyEnabled();
- (void)spm.addToggleSensorPrivacyListener(this);
spm.addSensorPrivacyListener(this);
}
void AudioPolicyService::SensorPrivacyPolicy::unregisterSelf() {
SensorPrivacyManager spm;
spm.removeSensorPrivacyListener(this);
- spm.removeToggleSensorPrivacyListener(this);
}
bool AudioPolicyService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
@@ -1919,6 +1917,7 @@
// since it controls the mic permission for legacy apps.
mAppOpsManager.startWatchingMode(mAppOp, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
mAttributionSource.packageName.value_or(""))),
+ AppOpsManager::WATCH_FOREGROUND_CHANGES,
mOpCallback);
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index cecced4..668a51a 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -3074,6 +3074,13 @@
return binder::Status::ok();
}
+Status CameraService::reportExtensionSessionStats(
+ const hardware::CameraExtensionSessionStats& stats, String16* sessionKey /*out*/) {
+ ALOGV("%s: reported %s", __FUNCTION__, stats.toString().c_str());
+ *sessionKey = mCameraServiceProxyWrapper->updateExtensionStats(stats);
+ return Status::ok();
+}
+
void CameraService::removeByClient(const BasicClient* client) {
Mutex::Autolock lock(mServiceLock);
for (auto& i : mActiveClientManager.getAll()) {
@@ -3910,18 +3917,21 @@
void CameraService::UidPolicy::registerWithActivityManager() {
Mutex::Autolock _l(mUidLock);
+ int32_t emptyUidArray[] = { };
if (mRegistered) return;
status_t res = mAm.linkToDeath(this);
- mAm.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
+ mAm.registerUidObserverForUids(this, ActivityManager::UID_OBSERVER_GONE
| ActivityManager::UID_OBSERVER_IDLE
| ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE
| ActivityManager::UID_OBSERVER_PROC_OOM_ADJ,
ActivityManager::PROCESS_STATE_UNKNOWN,
- String16("cameraserver"));
+ String16("cameraserver"), emptyUidArray, 0, mObserverToken);
if (res == OK) {
mRegistered = true;
ALOGV("UidPolicy: Registered with ActivityManager");
+ } else {
+ ALOGE("UidPolicy: Failed to register with ActivityManager: 0x%08x", res);
}
}
@@ -4019,13 +4029,16 @@
if (it->second.hasCamera) {
for (auto &monitoredUid : mMonitoredUids) {
if (monitoredUid.first != uid && adj > monitoredUid.second.procAdj) {
+ ALOGV("%s: notify uid %d", __FUNCTION__, monitoredUid.first);
notifyUidSet.emplace(monitoredUid.first);
}
}
+ ALOGV("%s: notify uid %d", __FUNCTION__, uid);
notifyUidSet.emplace(uid);
} else {
for (auto &monitoredUid : mMonitoredUids) {
if (monitoredUid.second.hasCamera && adj < monitoredUid.second.procAdj) {
+ ALOGV("%s: notify uid %d", __FUNCTION__, uid);
notifyUidSet.emplace(uid);
}
}
@@ -4056,6 +4069,10 @@
monitoredUid.procAdj = resource_policy::UNKNOWN_ADJ;
monitoredUid.refCount = 1;
it = mMonitoredUids.emplace(std::pair<uid_t, MonitoredUid>(uid, monitoredUid)).first;
+ status_t res = mAm.addUidToObserver(mObserverToken, String16("cameraserver"), uid);
+ if (res != OK) {
+ ALOGE("UidPolicy: Failed to add uid to observer: 0x%08x", res);
+ }
}
if (openCamera) {
@@ -4073,6 +4090,10 @@
it->second.refCount--;
if (it->second.refCount == 0) {
mMonitoredUids.erase(it);
+ status_t res = mAm.removeUidFromObserver(mObserverToken, String16("cameraserver"), uid);
+ if (res != OK) {
+ ALOGE("UidPolicy: Failed to remove uid from observer: 0x%08x", res);
+ }
} else if (closeCamera) {
it->second.hasCamera = false;
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 8c57d26..3214d4c 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -218,6 +218,9 @@
/*out*/
sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession);
+ virtual binder::Status reportExtensionSessionStats(
+ const hardware::CameraExtensionSessionStats& stats, String16* sessionKey /*out*/);
+
// Extra permissions checks
virtual status_t onTransact(uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags);
@@ -798,6 +801,7 @@
// Monitored uid map
std::unordered_map<uid_t, MonitoredUid> mMonitoredUids;
std::unordered_map<uid_t, bool> mOverrideUids;
+ sp<IBinder> mObserverToken;
}; // class UidPolicy
// If sensor privacy is enabled then all apps, including those that are active, should be
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.cpp b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
index 28dff7d..1c1bd24 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.cpp
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
@@ -43,8 +43,7 @@
mTidMap[currentThreadId].cycles++;
if (mTidMap[currentThreadId].cycles >= mMaxCycles) {
- std::string abortMessage = getAbortMessage(getpid(), currentThreadId,
- mTidMap[currentThreadId].functionName);
+ std::string abortMessage = getAbortMessage(mTidMap[currentThreadId].functionName);
android_set_abort_message(abortMessage.c_str());
ALOGW("CameraServiceWatchdog triggering abort for pid: %d tid: %d", getpid(),
currentThreadId);
@@ -60,10 +59,9 @@
return true;
}
-std::string CameraServiceWatchdog::getAbortMessage(int pid, int tid, std::string functionName) {
+std::string CameraServiceWatchdog::getAbortMessage(const std::string& functionName) {
std::string res = "CameraServiceWatchdog triggering abort during "
- + functionName + " | pid: " + std::to_string(pid)
- + " tid: " + std::to_string(tid);
+ + functionName;
return res;
}
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index 6f8858a..de6ac9e 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -144,7 +144,7 @@
*/
void stop(uint32_t tid);
- std::string getAbortMessage(int pid, int tid, std::string functionName);
+ std::string getAbortMessage(const std::string& functionName);
virtual bool threadLoop();
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
index 8223371..6588470 100644
--- a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
@@ -658,6 +658,13 @@
return res;
}
+ if ((res = native_window_set_usage(mOutputSurface.get(),
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN)) != OK) {
+ ALOGE("%s: Unable to configure stream buffer usage for stream %d", __FUNCTION__,
+ mP010StreamId);
+ return res;
+ }
+
int maxProducerBuffers;
ANativeWindow *anw = mP010Surface.get();
if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 4225366..3aff2ac 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -26,8 +26,14 @@
namespace android {
-using hardware::ICameraServiceProxy;
+using hardware::CameraExtensionSessionStats;
using hardware::CameraSessionStats;
+using hardware::ICameraServiceProxy;
+
+namespace {
+// Sentinel value to be returned when extension session with a stale or invalid key is reported.
+const String16 POISON_EXT_STATS_KEY("poisoned_stats");
+} // anonymous namespace
/**
* CameraSessionStatsWrapper functions
@@ -53,6 +59,7 @@
mSessionStats.mNewCameraState = CameraSessionStats::CAMERA_STATE_CLOSED;
mSessionStats.mLatencyMs = latencyMs;
mSessionStats.mDeviceError = deviceError;
+ mSessionStats.mSessionIndex = 0;
updateProxyDeviceState(proxyBinder);
}
@@ -74,6 +81,7 @@
mSessionStats.mNewCameraState = CameraSessionStats::CAMERA_STATE_ACTIVE;
mSessionStats.mMaxPreviewFps = maxPreviewFps;
+ mSessionStats.mSessionIndex++;
updateProxyDeviceState(proxyBinder);
// Reset mCreationDuration to -1 to distinguish between 1st session
@@ -95,10 +103,12 @@
mSessionStats.mUserTag = String16(userTag.c_str());
mSessionStats.mVideoStabilizationMode = videoStabilizationMode;
mSessionStats.mStreamStats = streamStats;
+
updateProxyDeviceState(proxyBinder);
mSessionStats.mInternalReconfigure = 0;
mSessionStats.mStreamStats.clear();
+ mSessionStats.mCameraExtensionSessionStats = {};
}
int64_t CameraServiceProxyWrapper::CameraSessionStatsWrapper::getLogId() {
@@ -106,6 +116,65 @@
return mSessionStats.mLogId;
}
+String16 CameraServiceProxyWrapper::CameraSessionStatsWrapper::updateExtensionSessionStats(
+ const hardware::CameraExtensionSessionStats& extStats) {
+ Mutex::Autolock l(mLock);
+ CameraExtensionSessionStats& currStats = mSessionStats.mCameraExtensionSessionStats;
+ if (currStats.key != extStats.key) {
+ // Mismatched keys. Extensions stats likely reported for a closed session
+ ALOGW("%s: mismatched extensions stats key: current='%s' reported='%s'. Dropping stats.",
+ __FUNCTION__, String8(currStats.key).c_str(), String8(extStats.key).c_str());
+ return POISON_EXT_STATS_KEY; // return poisoned key to so future calls are
+ // definitely dropped.
+ }
+
+ // Matching keys...
+ if (currStats.key.size()) {
+ // non-empty matching keys. overwrite.
+ ALOGV("%s: Overwriting extension session stats: %s", __FUNCTION__,
+ extStats.toString().c_str());
+ currStats = extStats;
+ return currStats.key;
+ }
+
+ // Matching empty keys...
+ if (mSessionStats.mClientName != extStats.clientName) {
+ ALOGW("%s: extension stats reported for unexpected package: current='%s' reported='%s'. "
+ "Dropping stats.", __FUNCTION__,
+ String8(mSessionStats.mClientName).c_str(),
+ String8(extStats.clientName).c_str());
+ return POISON_EXT_STATS_KEY;
+ }
+
+ // Matching empty keys for the current client...
+ if (mSessionStats.mNewCameraState == CameraSessionStats::CAMERA_STATE_OPEN ||
+ mSessionStats.mNewCameraState == CameraSessionStats::CAMERA_STATE_IDLE) {
+ // Camera is open, but not active. It is possible that the active callback hasn't
+ // occurred yet. Keep the stats, but don't associate it with any session.
+ ALOGV("%s: extension stat reported for an open, but not active camera. "
+ "Saving stats, but not generating key.", __FUNCTION__);
+ currStats = extStats;
+ return {}; // Subsequent calls will handle setting the correct key.
+ }
+
+ if (mSessionStats.mNewCameraState == CameraSessionStats::CAMERA_STATE_ACTIVE) {
+ // camera is active. First call for the session!
+ currStats = extStats;
+
+ // Generate a new key from logId and sessionIndex.
+ std::ostringstream key;
+ key << mSessionStats.mSessionIndex << '/' << mSessionStats.mLogId;
+ currStats.key = String16(key.str().c_str());
+ ALOGV("%s: New extension session stats: %s", __FUNCTION__, currStats.toString().c_str());
+ return currStats.key;
+ }
+
+ // Camera is closed. Probably a stale call.
+ ALOGW("%s: extension stats reported for closed camera id '%s'. Dropping stats.",
+ __FUNCTION__, String8(mSessionStats.mCameraId).c_str());
+ return {};
+}
+
/**
* CameraServiceProxyWrapper functions
*/
@@ -335,4 +404,21 @@
return ret;
}
+String16 CameraServiceProxyWrapper::updateExtensionStats(
+ const hardware::CameraExtensionSessionStats& extStats) {
+ std::shared_ptr<CameraSessionStatsWrapper> stats;
+ String8 cameraId = String8(extStats.cameraId);
+ {
+ Mutex::Autolock _l(mLock);
+ if (mSessionStatsMap.count(cameraId) == 0) {
+ ALOGE("%s CameraExtensionSessionStats reported for camera id that isn't open: %s",
+ __FUNCTION__, cameraId.c_str());
+ return {};
+ }
+
+ stats = mSessionStatsMap[cameraId];
+ return stats->updateExtensionSessionStats(extStats);
+ }
+}
+
} // namespace android
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index d47c738..e32580c 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -65,6 +65,8 @@
const std::string& userTag, int32_t videoStabilizationMode,
const std::vector<hardware::CameraStreamStats>& streamStats);
+ String16 updateExtensionSessionStats(const hardware::CameraExtensionSessionStats& extStats);
+
// Returns the logId associated with this event.
int64_t getLogId();
};
@@ -127,6 +129,9 @@
// frameworks/av/camera/include/camera/CameraSessionStats.h for more details about this
// identifier. Returns a non-0 value on success.
int64_t getCurrentLogIdForCamera(const String8& cameraId);
+
+ // Update the stored extension stats to the latest values
+ String16 updateExtensionStats(const hardware::CameraExtensionSessionStats& extStats);
};
} // android
diff --git a/services/mediametrics/StringUtils.cpp b/services/mediametrics/StringUtils.cpp
index d1c7a18..5766f1c 100644
--- a/services/mediametrics/StringUtils.cpp
+++ b/services/mediametrics/StringUtils.cpp
@@ -15,11 +15,13 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaMetricsService::stringutils"
+#define LOG_TAG "mediametrics::stringutils"
#include <utils/Log.h>
#include "StringUtils.h"
+#include <charconv>
+
#include "AudioTypes.h"
namespace android::mediametrics::stringutils {
@@ -54,6 +56,26 @@
}
}
+bool parseVector(const std::string &str, std::vector<int32_t> *vector) {
+ std::vector<int32_t> values;
+ const char *p = str.c_str();
+ const char *last = p + str.size();
+ while (p != last) {
+ if (*p == ',' || *p == '{' || *p == '}') {
+ p++;
+ }
+ int32_t value = -1;
+ auto [ptr, error] = std::from_chars(p, last, value);
+ if (error == std::errc::invalid_argument || error == std::errc::result_out_of_range) {
+ return false;
+ }
+ p = ptr;
+ values.push_back(value);
+ }
+ *vector = std::move(values);
+ return true;
+}
+
std::vector<std::pair<std::string, std::string>> getDeviceAddressPairs(const std::string& devices)
{
std::vector<std::pair<std::string, std::string>> result;
diff --git a/services/mediametrics/include/mediametricsservice/StatsdLog.h b/services/mediametrics/include/mediametricsservice/StatsdLog.h
index e207bac..5d5009e 100644
--- a/services/mediametrics/include/mediametricsservice/StatsdLog.h
+++ b/services/mediametrics/include/mediametricsservice/StatsdLog.h
@@ -16,11 +16,13 @@
#pragma once
-#include <audio_utils/SimpleLog.h>
#include <map>
#include <mutex>
#include <sstream>
+#include <android-base/thread_annotations.h>
+#include <audio_utils/SimpleLog.h>
+
namespace android::mediametrics {
class StatsdLog {
@@ -61,9 +63,9 @@
}
private:
+ mutable std::mutex mLock;
SimpleLog mSimpleLog; // internally locked
std::map<int /* atom */, size_t /* count */> mCountMap GUARDED_BY(mLock); // sorted
- mutable std::mutex mLock;
};
} // namespace android::mediametrics
diff --git a/services/mediametrics/include/mediametricsservice/StringUtils.h b/services/mediametrics/include/mediametricsservice/StringUtils.h
index 78c25ff..ed2cf2e 100644
--- a/services/mediametrics/include/mediametricsservice/StringUtils.h
+++ b/services/mediametrics/include/mediametricsservice/StringUtils.h
@@ -72,6 +72,12 @@
std::vector<std::string> split(const std::string& flags, const char *delim);
/**
+ * Parses a vector of integers using ',' '{' and '}' as delimeters. Leaves
+ * vector unmodified if the parsing fails.
+ */
+bool parseVector(const std::string &str, std::vector<int32_t> *vector);
+
+/**
* Parse the devices string and return a vector of device address pairs.
*
* A failure to parse returns early with the contents that were able to be parsed.
diff --git a/services/mediametrics/include/mediametricsservice/iface_statsd.h b/services/mediametrics/include/mediametricsservice/iface_statsd.h
index 5bc293b..34d71ba 100644
--- a/services/mediametrics/include/mediametricsservice/iface_statsd.h
+++ b/services/mediametrics/include/mediametricsservice/iface_statsd.h
@@ -15,7 +15,9 @@
*/
#include <memory>
+
#include <stats_event.h>
+#include <StatsdLog.h>
namespace android {
namespace mediametrics {
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 158914a..ad4cfce 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -23,6 +23,7 @@
#include <pthread.h>
#include <pwd.h>
#include <stdint.h>
+#include <string>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
@@ -32,14 +33,149 @@
#include <stats_media_metrics.h>
#include <stats_event.h>
-#include "cleaner.h"
-#include "MediaMetricsService.h"
-#include "ValidateId.h"
-#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
-#include "iface_statsd.h"
+#include <frameworks/proto_logging/stats/message/mediametrics_message.pb.h>
+#include <mediametricsservice/cleaner.h>
+#include <mediametricsservice/iface_statsd.h>
+#include <mediametricsservice/MediaMetricsService.h>
+#include <mediametricsservice/StringUtils.h>
+#include <mediametricsservice/ValidateId.h>
namespace android {
+using stats::media_metrics::stats_write;
+using stats::media_metrics::MEDIA_CODEC_RENDERED;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_UNKNOWN;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_INVALID;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_ZERO;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_UNKNOWN;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_UNDETERMINED;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_24_3_2_PULLDOWN;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_NONE;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HLG;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HDR10;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HDR10_PLUS;
+using stats::media_metrics::MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_DOLBY_VISION;
+
+static const int BITRATE_UNKNOWN =
+ stats::media_metrics::MEDIA_CODEC_RENDERED__BITRATE__BITRATE_UNKNOWN;
+
+static const std::pair<char const *, int> CODEC_LOOKUP[] = {
+ { "avc", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_AVC },
+ { "h264", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_AVC },
+ { "hevc", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_HEVC },
+ { "h265", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_HEVC },
+ { "vp8", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_VP8 },
+ { "vp9", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_VP9 },
+ { "av1", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_AV1 },
+ { "av01", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_AV1 },
+ { "dolby-vision", stats::media_metrics::MEDIA_CODEC_RENDERED__CODEC__CODEC_HEVC },
+};
+
+static const int32_t RESOLUTION_LOOKUP[] = {
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_MAX_SIZE,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_32K,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_16K,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_8K_UHD,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_8K_UHD_ALMOST,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_4K_UHD_ALMOST,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_1440X2560,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_1080X2400,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_1080X2340,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_1080P_FHD,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_1080P_FHD_ALMOST,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_720P_HD,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_720P_HD_ALMOST,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_576X1024,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_540X960,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_480X854,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_480X640,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_360X640,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_352X640,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_VERY_LOW,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_SMALLEST,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_ZERO,
+};
+
+static const int32_t FRAMERATE_LOOKUP[] = {
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_24,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_25,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_30,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_50,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_60,
+ stats::media_metrics::MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_120,
+};
+
+static int32_t getMetricsCodecEnum(const std::string &mime, const std::string &componentName) {
+ for (const auto & codecStrAndEnum : CODEC_LOOKUP) {
+ if (strcasestr(mime.c_str(), codecStrAndEnum.first) != nullptr ||
+ strcasestr(componentName.c_str(), codecStrAndEnum.first) != nullptr) {
+ return codecStrAndEnum.second;
+ }
+ }
+ return MEDIA_CODEC_RENDERED__CODEC__CODEC_UNKNOWN;
+}
+
+static int32_t getMetricsResolutionEnum(int32_t width, int32_t height) {
+ if (width == 0 || height == 0) {
+ return MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_ZERO;
+ }
+ int64_t pixels = int64_t(width) * height / 1000;
+ if (width < 0 || height < 0 || pixels > RESOLUTION_LOOKUP[0]) {
+ return MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_INVALID;
+ }
+ for (int32_t resolutionEnum : RESOLUTION_LOOKUP) {
+ if (pixels > resolutionEnum) {
+ return resolutionEnum;
+ }
+ }
+ return MEDIA_CODEC_RENDERED__RESOLUTION__RESOLUTION_ZERO;
+}
+
+static int32_t getMetricsFramerateEnum(float inFramerate) {
+ if (inFramerate == -1.0f) {
+ return MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_UNDETERMINED;
+ }
+ if (inFramerate == -2.0f) {
+ return MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_24_3_2_PULLDOWN;
+ }
+ int framerate = int(inFramerate * 100); // Table is in hundredths of frames per second
+ static const int framerateTolerance = 40; // Tolerance is 0.4 frames per second - table is 100s
+ for (int32_t framerateEnum : FRAMERATE_LOOKUP) {
+ if (abs(framerate - framerateEnum) < framerateTolerance) {
+ return framerateEnum;
+ }
+ }
+ return MEDIA_CODEC_RENDERED__CONTENT_FRAMERATE__FRAMERATE_UNKNOWN;
+}
+
+static int32_t getMetricsHdrFormatEnum(std::string &mime, std::string &componentName,
+ int32_t configColorTransfer, int32_t parsedColorTransfer,
+ int32_t hdr10StaticInfo, int32_t hdr10PlusInfo) {
+ if (hdr10PlusInfo) {
+ return MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HDR10_PLUS;
+ }
+ if (hdr10StaticInfo) {
+ return MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HDR10;
+ }
+ // 7 = COLOR_TRANSFER_HLG in MediaCodecConstants.h
+ if (configColorTransfer == 7 || parsedColorTransfer == 7) {
+ return MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_HLG;
+ }
+ if (strcasestr(mime.c_str(), "dolby-vision") != nullptr ||
+ strcasestr(componentName.c_str(), "dvhe") != nullptr ||
+ strcasestr(componentName.c_str(), "dvav") != nullptr ||
+ strcasestr(componentName.c_str(), "dav1") != nullptr) {
+ return MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_DOLBY_VISION;
+ }
+ return MEDIA_CODEC_RENDERED__HDR_FORMAT__HDR_FORMAT_NONE;
+}
+
+static void parseVector(const std::string &str, std::vector<int32_t> *vector) {
+ if (!mediametrics::stringutils::parseVector(str, vector)) {
+ ALOGE("failed to parse integer vector from '%s'", str.c_str());
+ }
+}
+
bool statsd_codec(const std::shared_ptr<const mediametrics::Item>& item,
const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
@@ -48,17 +184,17 @@
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, stats::media_metrics::MEDIA_CODEC_REPORTED);
- const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
- AStatsEvent_writeInt64(event, timestamp_nanos);
+ const nsecs_t timestampNanos = MediaMetricsService::roundTime(item->getTimestamp());
+ AStatsEvent_writeInt64(event, timestampNanos);
- std::string package_name = item->getPkgName();
- AStatsEvent_writeString(event, package_name.c_str());
+ std::string packageName = item->getPkgName();
+ AStatsEvent_writeString(event, packageName.c_str());
- int64_t package_version_code = item->getPkgVersionCode();
- AStatsEvent_writeInt64(event, package_version_code);
+ int64_t packageVersionCode = item->getPkgVersionCode();
+ AStatsEvent_writeInt64(event, packageVersionCode);
- int64_t media_apex_version = 0;
- AStatsEvent_writeInt64(event, media_apex_version);
+ int64_t mediaApexVersion = 0;
+ AStatsEvent_writeInt64(event, mediaApexVersion);
// the rest into our own proto
//
@@ -84,17 +220,25 @@
}
AStatsEvent_writeString(event, mode.c_str());
- int32_t encoder = -1;
- if (item->getInt32("android.media.mediacodec.encoder", &encoder)) {
- metrics_proto.set_encoder(encoder);
+ int32_t isEncoder = -1;
+ if (item->getInt32("android.media.mediacodec.encoder", &isEncoder)) {
+ metrics_proto.set_encoder(isEncoder);
}
- AStatsEvent_writeInt32(event, encoder);
+ AStatsEvent_writeInt32(event, isEncoder);
- int32_t secure = -1;
- if (item->getInt32("android.media.mediacodec.secure", &secure)) {
- metrics_proto.set_secure(secure);
+ int32_t isSecure = -1;
+ if (item->getInt32("android.media.mediacodec.secure", &isSecure)) {
+ metrics_proto.set_secure(isSecure);
}
- AStatsEvent_writeInt32(event, secure);
+ AStatsEvent_writeInt32(event, isSecure);
+
+ int32_t isHardware = -1;
+ item->getInt32("android.media.mediacodec.hardware", &isHardware);
+ // not logged to MediaCodecReported or MediametricsCodecReported
+
+ int32_t isTunneled = -1;
+ item->getInt32("android.media.mediacodec.tunneled", &isTunneled);
+ // not logged to MediaCodecReported or MediametricsCodecReported
int32_t width = -1;
if (item->getInt32("android.media.mediacodec.width", &width)) {
@@ -133,79 +277,78 @@
AStatsEvent_writeInt32(event, level);
- int32_t max_width = -1;
- if ( item->getInt32("android.media.mediacodec.maxwidth", &max_width)) {
- metrics_proto.set_max_width(max_width);
+ int32_t maxWidth = -1;
+ if ( item->getInt32("android.media.mediacodec.maxwidth", &maxWidth)) {
+ metrics_proto.set_max_width(maxWidth);
}
- AStatsEvent_writeInt32(event, max_width);
+ AStatsEvent_writeInt32(event, maxWidth);
- int32_t max_height = -1;
- if ( item->getInt32("android.media.mediacodec.maxheight", &max_height)) {
- metrics_proto.set_max_height(max_height);
+ int32_t maxHeight = -1;
+ if ( item->getInt32("android.media.mediacodec.maxheight", &maxHeight)) {
+ metrics_proto.set_max_height(maxHeight);
}
- AStatsEvent_writeInt32(event, max_height);
+ AStatsEvent_writeInt32(event, maxHeight);
- int32_t error_code = -1;
- if ( item->getInt32("android.media.mediacodec.errcode", &error_code)) {
- metrics_proto.set_error_code(error_code);
+ int32_t errorCode = -1;
+ if ( item->getInt32("android.media.mediacodec.errcode", &errorCode)) {
+ metrics_proto.set_error_code(errorCode);
}
- AStatsEvent_writeInt32(event, error_code);
+ AStatsEvent_writeInt32(event, errorCode);
- std::string error_state;
- if ( item->getString("android.media.mediacodec.errstate", &error_state)) {
- metrics_proto.set_error_state(error_state);
+ std::string errorState;
+ if ( item->getString("android.media.mediacodec.errstate", &errorState)) {
+ metrics_proto.set_error_state(errorState);
}
- AStatsEvent_writeString(event, error_state.c_str());
+ AStatsEvent_writeString(event, errorState.c_str());
- int64_t latency_max = -1;
- if (item->getInt64("android.media.mediacodec.latency.max", &latency_max)) {
- metrics_proto.set_latency_max(latency_max);
+ int64_t latencyMax = -1;
+ if (item->getInt64("android.media.mediacodec.latency.max", &latencyMax)) {
+ metrics_proto.set_latency_max(latencyMax);
}
- AStatsEvent_writeInt64(event, latency_max);
+ AStatsEvent_writeInt64(event, latencyMax);
- int64_t latency_min = -1;
- if (item->getInt64("android.media.mediacodec.latency.min", &latency_min)) {
- metrics_proto.set_latency_min(latency_min);
+ int64_t latencyMin = -1;
+ if (item->getInt64("android.media.mediacodec.latency.min", &latencyMin)) {
+ metrics_proto.set_latency_min(latencyMin);
}
- AStatsEvent_writeInt64(event, latency_min);
+ AStatsEvent_writeInt64(event, latencyMin);
- int64_t latency_avg = -1;
- if (item->getInt64("android.media.mediacodec.latency.avg", &latency_avg)) {
- metrics_proto.set_latency_avg(latency_avg);
+ int64_t latencyAvg = -1;
+ if (item->getInt64("android.media.mediacodec.latency.avg", &latencyAvg)) {
+ metrics_proto.set_latency_avg(latencyAvg);
}
- AStatsEvent_writeInt64(event, latency_avg);
+ AStatsEvent_writeInt64(event, latencyAvg);
- int64_t latency_count = -1;
- if (item->getInt64("android.media.mediacodec.latency.n", &latency_count)) {
- metrics_proto.set_latency_count(latency_count);
+ int64_t latencyCount = -1;
+ if (item->getInt64("android.media.mediacodec.latency.n", &latencyCount)) {
+ metrics_proto.set_latency_count(latencyCount);
}
- AStatsEvent_writeInt64(event, latency_count);
+ AStatsEvent_writeInt64(event, latencyCount);
- int64_t latency_unknown = -1;
- if (item->getInt64("android.media.mediacodec.latency.unknown", &latency_unknown)) {
- metrics_proto.set_latency_unknown(latency_unknown);
+ int64_t latencyUnknown = -1;
+ if (item->getInt64("android.media.mediacodec.latency.unknown", &latencyUnknown)) {
+ metrics_proto.set_latency_unknown(latencyUnknown);
}
- AStatsEvent_writeInt64(event, latency_unknown);
+ AStatsEvent_writeInt64(event, latencyUnknown);
- int32_t queue_secure_input_buffer_error = -1;
+ int32_t queueSecureInputBufferError = -1;
if (item->getInt32("android.media.mediacodec.queueSecureInputBufferError",
- &queue_secure_input_buffer_error)) {
- metrics_proto.set_queue_secure_input_buffer_error(queue_secure_input_buffer_error);
+ &queueSecureInputBufferError)) {
+ metrics_proto.set_queue_secure_input_buffer_error(queueSecureInputBufferError);
}
- AStatsEvent_writeInt32(event, queue_secure_input_buffer_error);
+ AStatsEvent_writeInt32(event, queueSecureInputBufferError);
- int32_t queue_input_buffer_error = -1;
- if (item->getInt32("android.media.mediacodec.queueInputBufferError",
- &queue_input_buffer_error)) {
- metrics_proto.set_queue_input_buffer_error(queue_input_buffer_error);
+ int32_t queueInputBufferError = -1;
+ if (item->getInt32("android.media.mediacodec.queueInputBufferError", &queueInputBufferError)) {
+ metrics_proto.set_queue_input_buffer_error(queueInputBufferError);
}
- AStatsEvent_writeInt32(event, queue_input_buffer_error);
+ AStatsEvent_writeInt32(event, queueInputBufferError);
- std::string bitrate_mode;
- if (item->getString("android.media.mediacodec.bitrate_mode", &bitrate_mode)) {
- metrics_proto.set_bitrate_mode(bitrate_mode);
+ std::string bitrateMode;
+ if (item->getString("android.media.mediacodec.bitrate_mode", &bitrateMode)) {
+ metrics_proto.set_bitrate_mode(bitrateMode);
}
- AStatsEvent_writeString(event, bitrate_mode.c_str());
+ AStatsEvent_writeString(event, bitrateMode.c_str());
int32_t bitrate = -1;
if (item->getInt32("android.media.mediacodec.bitrate", &bitrate)) {
@@ -213,18 +356,18 @@
}
AStatsEvent_writeInt32(event, bitrate);
- int64_t lifetime_millis = -1;
- if (item->getInt64("android.media.mediacodec.lifetimeMs", &lifetime_millis)) {
- lifetime_millis = mediametrics::bucket_time_minutes(lifetime_millis);
- metrics_proto.set_lifetime_millis(lifetime_millis);
+ int64_t lifetimeMillis = -1;
+ if (item->getInt64("android.media.mediacodec.lifetimeMs", &lifetimeMillis)) {
+ lifetimeMillis = mediametrics::bucket_time_minutes(lifetimeMillis);
+ metrics_proto.set_lifetime_millis(lifetimeMillis);
}
- AStatsEvent_writeInt64(event, lifetime_millis);
+ AStatsEvent_writeInt64(event, lifetimeMillis);
- int64_t playback_duration_sec = -1;
- item->getInt64("android.media.mediacodec.playback-duration-sec", &playback_duration_sec);
+ int64_t playbackDurationSec = -1;
+ item->getInt64("android.media.mediacodec.playback-duration-sec", &playbackDurationSec);
// DO NOT record playback-duration in the metrics_proto - it should only
// exist in the flattened atom
- AStatsEvent_writeInt64(event, playback_duration_sec);
+ AStatsEvent_writeInt64(event, playbackDurationSec);
std::string sessionId;
if (item->getString("android.media.mediacodec.log-session-id", &sessionId)) {
@@ -505,61 +648,182 @@
}
AStatsEvent_writeInt32(event, resolutionChangeCount);
+ int64_t firstRenderTimeUs = -1;
+ item->getInt64("android.media.mediacodec.first-render-time-us", &firstRenderTimeUs);
+ int64_t framesReleased = -1;
+ item->getInt64("android.media.mediacodec.frames-released", &framesReleased);
+ int64_t framesRendered = -1;
+ item->getInt64("android.media.mediacodec.frames-rendered", &framesRendered);
+ int64_t framesDropped = -1;
+ item->getInt64("android.media.mediacodec.frames-dropped", &framesDropped);
+ int64_t framesSkipped = -1;
+ item->getInt64("android.media.mediacodec.frames-skipped", &framesSkipped);
+ double framerateContent = -1;
+ item->getDouble("android.media.mediacodec.framerate-content", &framerateContent);
+ double framerateActual = -1;
+ item->getDouble("android.media.mediacodec.framerate-actual", &framerateActual);
+ int64_t freezeScore = -1;
+ item->getInt64("android.media.mediacodec.freeze-score", &freezeScore);
+ double freezeRate = -1;
+ item->getDouble("android.media.mediacodec.freeze-rate", &freezeRate);
+ std::string freezeScoreHistogramStr;
+ item->getString("android.media.mediacodec.freeze-score-histogram", &freezeScoreHistogramStr);
+ std::string freezeScoreHistogramBucketsStr;
+ item->getString("android.media.mediacodec.freeze-score-histogram-buckets",
+ &freezeScoreHistogramBucketsStr);
+ std::string freezeDurationMsHistogramStr;
+ item->getString("android.media.mediacodec.freeze-duration-ms-histogram",
+ &freezeDurationMsHistogramStr);
+ std::string freezeDurationMsHistogramBucketsStr;
+ item->getString("android.media.mediacodec.freeze-duration-ms-histogram-buckets",
+ &freezeDurationMsHistogramBucketsStr);
+ std::string freezeDistanceMsHistogramStr;
+ item->getString("android.media.mediacodec.freeze-distance-ms-histogram",
+ &freezeDistanceMsHistogramStr);
+ std::string freezeDistanceMsHistogramBucketsStr;
+ item->getString("android.media.mediacodec.freeze-distance-ms-histogram-buckets",
+ &freezeDistanceMsHistogramBucketsStr);
+ int64_t judderScore = -1;
+ item->getInt64("android.media.mediacodec.judder-score", &judderScore);
+ double judderRate = -1;
+ item->getDouble("android.media.mediacodec.judder-rate", &judderRate);
+ std::string judderScoreHistogramStr;
+ item->getString("android.media.mediacodec.judder-score-histogram", &judderScoreHistogramStr);
+ std::string judderScoreHistogramBucketsStr;
+ item->getString("android.media.mediacodec.judder-score-histogram-buckets",
+ &judderScoreHistogramBucketsStr);
+
int err = AStatsEvent_write(event);
if (err < 0) {
ALOGE("Failed to write codec metrics to statsd (%d)", err);
}
AStatsEvent_release(event);
+ if (framesRendered > 0) {
+ int32_t statsUid = item->getUid();
+ int64_t statsCodecId = codecId;
+ char const *statsLogSessionId = sessionId.c_str();
+ int32_t statsIsHardware = isHardware;
+ int32_t statsIsSecure = isSecure;
+ int32_t statsIsTunneled = isTunneled;
+ int32_t statsCodec = getMetricsCodecEnum(mime, codec);
+ int32_t statsResolution = getMetricsResolutionEnum(width, height);
+ int32_t statsBitrate = BITRATE_UNKNOWN;
+ int32_t statsContentFramerate = getMetricsFramerateEnum(framerateContent);
+ int32_t statsActualFramerate = getMetricsFramerateEnum(framerateActual);
+ int32_t statsHdrFormat = getMetricsHdrFormatEnum(mime, codec, configColorTransfer,
+ parsedColorTransfer, hdrStaticInfo,
+ hdr10PlusInfo);
+ int64_t statsFirstRenderTimeUs = firstRenderTimeUs;
+ int64_t statsPlaybackDurationSeconds = playbackDurationSec;
+ int64_t statsFramesTotal = framesReleased + framesSkipped;
+ int64_t statsFramesReleased = framesReleased;
+ int64_t statsFramesRendered = framesRendered;
+ int64_t statsFramesDropped = framesDropped;
+ int64_t statsFramesSkipped = framesSkipped;
+ float statsFrameDropRate = float(double(framesDropped) / statsFramesTotal);
+ float statsFrameSkipRate = float(double(framesSkipped) / statsFramesTotal);
+ float statsFrameSkipDropRate = float(double(framesSkipped + framesDropped) /
+ statsFramesTotal);
+ int64_t statsFreezeScore = freezeScore;
+ float statsFreezeRate = freezeRate;
+ std::vector<int32_t> statsFreezeDurationMsHistogram;
+ parseVector(freezeDurationMsHistogramStr, &statsFreezeDurationMsHistogram);
+ std::vector<int32_t> statsFreezeDurationMsHistogramBuckets;
+ parseVector(freezeDurationMsHistogramBucketsStr, &statsFreezeDurationMsHistogramBuckets);
+ std::vector<int32_t> statsFreezeDistanceMsHistogram;
+ parseVector(freezeDistanceMsHistogramStr, &statsFreezeDistanceMsHistogram);
+ std::vector<int32_t> statsFreezeDistanceMsHistogramBuckets;
+ parseVector(freezeDistanceMsHistogramBucketsStr, &statsFreezeDistanceMsHistogramBuckets);
+ int64_t statsJudderScore = judderScore;
+ float statsJudderRate = judderRate;
+ std::vector<int32_t> statsJudderScoreHistogram;
+ parseVector(judderScoreHistogramStr, &statsJudderScoreHistogram);
+ std::vector<int32_t> statsJudderScoreHistogramBuckets;
+ parseVector(judderScoreHistogramBucketsStr, &statsJudderScoreHistogramBuckets);
+ int result = stats_write(
+ MEDIA_CODEC_RENDERED,
+ statsUid,
+ statsCodecId,
+ statsLogSessionId,
+ statsIsHardware,
+ statsIsSecure,
+ statsIsTunneled,
+ statsCodec,
+ statsResolution,
+ statsBitrate,
+ statsContentFramerate,
+ statsActualFramerate,
+ statsHdrFormat,
+ statsFirstRenderTimeUs,
+ statsPlaybackDurationSeconds,
+ statsFramesTotal,
+ statsFramesReleased,
+ statsFramesRendered,
+ statsFramesDropped,
+ statsFramesSkipped,
+ statsFrameDropRate,
+ statsFrameSkipRate,
+ statsFrameSkipDropRate,
+ statsFreezeScore,
+ statsFreezeRate,
+ statsFreezeDurationMsHistogram,
+ statsFreezeDurationMsHistogramBuckets,
+ statsFreezeDistanceMsHistogram,
+ statsFreezeDistanceMsHistogramBuckets,
+ statsJudderScore,
+ statsJudderRate,
+ statsJudderScoreHistogram,
+ statsJudderScoreHistogramBuckets);
+ ALOGE_IF(result < 0, "Failed to record MEDIA_CODEC_RENDERED atom (%d)", result);
+ }
+
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize codec metrics");
return false;
}
- const stats::media_metrics::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ const stats::media_metrics::BytesField bf_serialized(serialized.c_str(), serialized.size());
const int result = stats::media_metrics::stats_write(stats::media_metrics::MEDIAMETRICS_CODEC_REPORTED,
- timestamp_nanos, package_name.c_str(), package_version_code,
- media_apex_version,
+ timestampNanos, packageName.c_str(), packageVersionCode,
+ mediaApexVersion,
bf_serialized);
std::stringstream log;
log << "result:" << result << " {"
<< " mediametrics_codec_reported:"
<< stats::media_metrics::MEDIAMETRICS_CODEC_REPORTED
- << " timestamp_nanos:" << timestamp_nanos
- << " package_name:" << package_name
- << " package_version_code:" << package_version_code
- << " media_apex_version:" << media_apex_version
-
+ << " timestamp_nanos:" << timestampNanos
+ << " package_name:" << packageName
+ << " package_version_code:" << packageVersionCode
+ << " media_apex_version:" << mediaApexVersion
<< " codec:" << codec
<< " mime:" << mime
<< " mode:" << mode
- << " encoder:" << encoder
- << " secure:" << secure
+ << " encoder:" << isEncoder
+ << " secure:" << isSecure
<< " width:" << width
<< " height:" << height
<< " rotation:" << rotation
<< " crypto:" << crypto
<< " profile:" << profile
-
<< " level:" << level
- << " max_width:" << max_width
- << " max_height:" << max_height
- << " error_code:" << error_code
- << " error_state:" << error_state
- << " latency_max:" << latency_max
- << " latency_min:" << latency_min
- << " latency_avg:" << latency_avg
- << " latency_count:" << latency_count
- << " latency_unknown:" << latency_unknown
-
- << " queue_input_buffer_error:" << queue_input_buffer_error
- << " queue_secure_input_buffer_error:" << queue_secure_input_buffer_error
- << " bitrate_mode:" << bitrate_mode
+ << " max_width:" << maxWidth
+ << " max_height:" << maxHeight
+ << " error_code:" << errorCode
+ << " error_state:" << errorState
+ << " latency_max:" << latencyMax
+ << " latency_min:" << latencyMin
+ << " latency_avg:" << latencyAvg
+ << " latency_count:" << latencyCount
+ << " latency_unknown:" << latencyUnknown
+ << " queue_input_buffer_error:" << queueInputBufferError
+ << " queue_secure_input_buffer_error:" << queueSecureInputBufferError
+ << " bitrate_mode:" << bitrateMode
<< " bitrate:" << bitrate
<< " original_bitrate:" << originalBitrate
- << " lifetime_millis:" << lifetime_millis
- << " playback_duration_seconds:" << playback_duration_sec
+ << " lifetime_millis:" << lifetimeMillis
+ << " playback_duration_seconds:" << playbackDurationSec
<< " log_session_id:" << sessionId
<< " channel_count:" << channelCount
<< " sample_rate:" << sampleRate
@@ -572,7 +836,6 @@
<< " operating_rate:" << operatingRate
<< " priority:" << priority
<< " shaping_enhanced:" << shapingEnhanced
-
<< " qp_i_min:" << qpIMin
<< " qp_i_max:" << qpIMax
<< " qp_p_min:" << qpPMin
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index bc7b47b..4a6aee4 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -17,9 +17,10 @@
#define LOG_TAG "mediametrics_tests"
#include <utils/Log.h>
-
#include <stdio.h>
+#include <string>
#include <unordered_set>
+#include <vector>
#include <gtest/gtest.h>
#include <media/MediaMetricsItem.h>
@@ -30,6 +31,7 @@
#include <system/audio.h>
using namespace android;
+using android::mediametrics::stringutils::parseVector;
static size_t countNewlines(const char *s) {
size_t count = 0;
@@ -57,6 +59,35 @@
ASSERT_EQ(false, android::mediametrics::startsWith(s, std::string("est")));
}
+TEST(mediametrics_tests, parseVector) {
+ {
+ std::vector<int32_t> values;
+ EXPECT_EQ(true, parseVector("0{4,300,0,-112343,350}9", &values));
+ EXPECT_EQ(values, std::vector<int32_t>({0, 4, 300, 0, -112343, 350, 9}));
+ }
+ {
+ std::vector<int32_t> values;
+ EXPECT_EQ(true, parseVector("53", &values));
+ EXPECT_EQ(values, std::vector<int32_t>({53}));
+ }
+ {
+ std::vector<int32_t> values;
+ EXPECT_EQ(false, parseVector("5{3,6*3}3", &values));
+ EXPECT_EQ(values, std::vector<int32_t>({}));
+ }
+ {
+ std::vector<int32_t> values = {1}; // should still be this when parsing fails
+ std::vector<int32_t> expected = {1};
+ EXPECT_EQ(false, parseVector("51342abcd,1232", &values));
+ EXPECT_EQ(values, std::vector<int32_t>({1}));
+ }
+ {
+ std::vector<int32_t> values = {2}; // should still be this when parsing fails
+ EXPECT_EQ(false, parseVector("12345678901234,12345678901234", &values));
+ EXPECT_EQ(values, std::vector<int32_t>({2}));
+ }
+}
+
TEST(mediametrics_tests, defer) {
bool check = false;
{
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index e5bcf1f..9a1e8bb 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -100,8 +100,11 @@
if (fallBackToOpenDemux) {
auto status = mTuner->openDemux(&ids, &demux);
- return ::ndk::ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(Result::UNAVAILABLE));
+ if (status.isOk()) {
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerDemux>(demux, ids[0],
+ this->ref<TunerService>());
+ }
+ return status;
} else {
int id = TunerHelper::getResourceIdFromHandle(in_demuxHandle, DEMUX);
auto status = mTuner->openDemuxById(id, &demux);
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index 1789028..617622d 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -368,7 +368,7 @@
}
hidl_handle handle;
- handle.setTo(makeFromAidl(in_handle), true);
+ handle.setTo(makeFromAidl(in_handle));
HidlResult res = mFilter->releaseAvHandle(handle, in_avDataId);
if (res != HidlResult::SUCCESS) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));