Merge "Remove Hermitian tone-mapping curve for HLG" into tm-d1-dev
diff --git a/apex/manifest.json b/apex/manifest.json
index f908f97..4b75b04 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media",
- "version": 330090000,
+ "version": 339990000,
"requireNativeLibs": [
"libandroid.so",
"libbinder_ndk.so",
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index 01a85ae..fbcbb69 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media.swcodec",
- "version": 330090000,
+ "version": 339990000,
"requireNativeLibs": [
":sphal"
]
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 913854c..b6f8552 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3486,10 +3486,11 @@
* </ul></p>
*
* <p>This is a subset of ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS which contains a list
- * of keys that can be overridden using <a href="https://developer.android.com/reference/CaptureRequest/Builder.html#setPhysicalCameraKey">Builder#setPhysicalCameraKey</a>.
+ * of keys that can be overridden using
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#setPhysicalCameraKey">Builder#setPhysicalCameraKey</a>.
* The respective value of such request key can be obtained by calling
- * <a href="https://developer.android.com/reference/CaptureRequest/Builder.html#getPhysicalCameraKey">Builder#getPhysicalCameraKey</a>. Capture requests that contain
- * individual physical device requests must be built via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.Builder.html#getPhysicalCameraKey">Builder#getPhysicalCameraKey</a>.
+ * Capture requests that contain individual physical device requests must be built via
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureRequest(int,">Set)</a>.</p>
*
* @see ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS
@@ -3713,7 +3714,7 @@
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
* media performance class 12 or higher by setting
- * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CODES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+ * <a href="https://developer.android.com/reference/android/os/Build.VERSION.html#MEDIA_PERFORMANCE_CLASS">VERSION#MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
* the primary camera devices (first rear/front camera in the camera ID list) will not
* support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
* smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3732,7 +3733,7 @@
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
* to be media performance class 12 or better by setting
- * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CODES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+ * <a href="https://developer.android.com/reference/android/os/Build.VERSION.html#MEDIA_PERFORMANCE_CLASS">VERSION#MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
* or if the camera device isn't a primary rear/front camera, the minimum required output
* stream configurations are the same as for applications targeting SDK version older than
* 31.</p>
@@ -10225,7 +10226,7 @@
* fire the flash for flash power metering during precapture, and then fire the flash
* for the final capture, if a flash is available on the device and the AE mode is set to
* enable the flash.</p>
- * <p>Devices that initially shipped with Android version <a href="https://developer.android.com/reference/android/os/Build/VERSION_CODES.html#Q">Q</a> or newer will not include any LEGACY-level devices.</p>
+ * <p>Devices that initially shipped with Android version <a href="https://developer.android.com/reference/android/os/Build.VERSION_CODES.html#Q">Q</a> or newer will not include any LEGACY-level devices.</p>
*
* @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
* @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 41fe080..a4c03ba 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -1,24 +1,7 @@
// for frameworks/av/media
{
- "presubmit-large": [
- // runs whenever we change something in this tree
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.EncodeDecodeTest"
- }
- ]
- },
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
- }
- ]
- }
- ],
+ // TODO (b/229286407) Add EncodeDecodeTest and DecodeEditEncodeTest to
+ // presubmit-large once issues in cuttlefish are fixed
"presubmit": [
{
"name": "GtsMediaTestCases",
@@ -40,26 +23,8 @@
{
"path": "frameworks/av/drm/mediadrm/plugins"
}
- ],
-
- "platinum-postsubmit": [
- // runs regularly, independent of changes in this tree.
- // signals if changes elsewhere break media functionality
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.EncodeDecodeTest"
- }
- ]
- },
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
- }
- ]
- }
]
+
+ // TODO (b/229286407) Add EncodeDecodeTest and DecodeEditEncodeTest to
+ // platinum-postsubmit once issues in cuttlefish are fixed
}
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index d65488e..96b81d7 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -536,9 +536,10 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
if (img->fmt == AOM_IMG_FMT_I42016) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+ defaultColorAspects = mIntf->getDefaultColorAspects_l();
if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -587,7 +588,8 @@
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
convertYUV420Planar16ToY410OrRGBA1010102(
(uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(defaultColorAspects));
} else {
convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index 4ffcd59..8b46d3f 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -1514,7 +1514,8 @@
vPlane = uPlane + yPlaneSize / 4;
yStride = width;
uStride = vStride = yStride / 2;
- ConvertRGBToPlanarYUV(yPlane, yStride, height, conversionBuffer.size(), *input);
+ ConvertRGBToPlanarYUV(yPlane, yStride, height, conversionBuffer.size(), *input,
+ mColorAspects->matrix, mColorAspects->range);
break;
}
case C2PlanarLayout::TYPE_YUV: {
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index 8c7f8db..664647a 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -37,6 +37,11 @@
"libsfplugin_ccodec_utils",
],
+ header_libs: [
+ "libarect_headers",
+ "libnativewindow_headers",
+ ],
+
shared_libs: [
"libcutils", // for properties
"liblog", // for ALOG
@@ -77,6 +82,11 @@
"libsfplugin_ccodec_utils",
],
+ header_libs: [
+ "libarect_headers",
+ "libnativewindow_headers",
+ ],
+
shared_libs: [
"libcodec2_soft_common",
"libcutils", // for properties
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 678c269..9d4f049 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "SimpleC2Component"
#include <log/log.h>
+#include <android/hardware_buffer.h>
#include <cutils/properties.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -26,19 +27,14 @@
#include <C2Config.h>
#include <C2Debug.h>
#include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
#include <SimpleC2Component.h>
namespace android {
constexpr uint8_t kNeutralUVBitDepth8 = 128;
constexpr uint16_t kNeutralUVBitDepth10 = 512;
-bool isAtLeastT() {
- char deviceCodeName[PROP_VALUE_MAX];
- __system_property_get("ro.build.version.codename", deviceCodeName);
- return android_get_device_api_level() >= __ANDROID_API_T__ ||
- !strcmp(deviceCodeName, "Tiramisu");
-}
-
void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
@@ -137,11 +133,126 @@
dst += dstStride * 2;
}
}
+
+namespace {
+
+static C2ColorAspectsStruct FillMissingColorAspects(
+ std::shared_ptr<const C2ColorAspectsStruct> aspects,
+ int32_t width, int32_t height) {
+ C2ColorAspectsStruct _aspects;
+ if (aspects) {
+ _aspects = *aspects;
+ }
+
+ // use matrix for conversion
+ if (_aspects.matrix == C2Color::MATRIX_UNSPECIFIED) {
+ // if not specified, deduce matrix from primaries
+ if (_aspects.primaries == C2Color::PRIMARIES_UNSPECIFIED) {
+ // if those are also not specified, deduce primaries first from transfer, then from
+ // width and height
+ if (_aspects.transfer == C2Color::TRANSFER_ST2084
+ || _aspects.transfer == C2Color::TRANSFER_HLG) {
+ _aspects.primaries = C2Color::PRIMARIES_BT2020;
+ } else if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
+ // TODO: stagefright defaults to BT.2020 for UHD, but perhaps we should default to
+ // BT.709 for non-HDR 10-bit UHD content
+ // (see media/libstagefright/foundation/ColorUtils.cpp)
+ _aspects.primaries = C2Color::PRIMARIES_BT2020;
+ } else if ((width <= 720 && height <= 576)
+ || (height <= 720 && width <= 576)) {
+ // note: it does not actually matter whether to use 525 or 625 here as the
+ // conversion is the same
+ _aspects.primaries = C2Color::PRIMARIES_BT601_625;
+ } else {
+ _aspects.primaries = C2Color::PRIMARIES_BT709;
+ }
+ }
+
+ switch (_aspects.primaries) {
+ case C2Color::PRIMARIES_BT601_525:
+ case C2Color::PRIMARIES_BT601_625:
+ _aspects.matrix = C2Color::MATRIX_BT601;
+ break;
+
+ case C2Color::PRIMARIES_BT709:
+ _aspects.matrix = C2Color::MATRIX_BT709;
+ break;
+
+ case C2Color::PRIMARIES_BT2020:
+ default:
+ _aspects.matrix = C2Color::MATRIX_BT2020;
+ }
+ }
+
+ return _aspects;
+}
+
+// matrix conversion coefficients
+// (see media/libstagefright/colorconverter/ColorConverter.cpp for more details)
+struct Coeffs {
+ int32_t _y, _b_u, _g_u, _g_v, _r_v, _c16;
+};
+
+static const struct Coeffs GetCoeffsForAspects(const C2ColorAspectsStruct &aspects) {
+ bool isFullRange = aspects.range == C2Color::RANGE_FULL;
+
+ switch (aspects.matrix) {
+ case C2Color::MATRIX_BT601:
+ /**
+ * BT.601: K_R = 0.299; K_B = 0.114
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1436, 352, 731, 1815, 0 };
+ } else {
+ return Coeffs { 1196, 1639, 402, 835, 2072, 64 };
+ }
+ break;
+
+ case C2Color::MATRIX_BT709:
+ /**
+ * BT.709: K_R = 0.2126; K_B = 0.0722
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1613, 192, 479, 1900, 0 };
+ } else {
+ return Coeffs { 1196, 1841, 219, 547, 2169, 64 };
+ }
+ break;
+
+ case C2Color::MATRIX_BT2020:
+ default:
+ /**
+ * BT.2020: K_R = 0.2627; K_B = 0.0593
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1510, 169, 585, 1927, 0 };
+ } else {
+ return Coeffs { 1196, 1724, 192, 668, 2200, 64 };
+ }
+ }
+}
+
+}
+
#define CLIP3(min, v, max) (((v) < (min)) ? (min) : (((max) > (v)) ? (v) : (max)))
-void convertYUV420Planar16ToRGBA1010102(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
- const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height) {
+void convertYUV420Planar16ToRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width,
+ size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects) {
+
+ C2ColorAspectsStruct _aspects = FillMissingColorAspects(aspects, width, height);
+
+ struct Coeffs coeffs = GetCoeffsForAspects(_aspects);
+
+ int32_t _y = coeffs._y;
+ int32_t _b_u = coeffs._b_u;
+ int32_t _neg_g_u = -coeffs._g_u;
+ int32_t _neg_g_v = -coeffs._g_v;
+ int32_t _r_v = coeffs._r_v;
+ int32_t _c16 = coeffs._c16;
+
// Converting two lines at a time, slightly faster
for (size_t y = 0; y < height; y += 2) {
uint32_t *dstTop = (uint32_t *)dst;
@@ -151,25 +262,6 @@
uint16_t *uSrc = (uint16_t *)srcU;
uint16_t *vSrc = (uint16_t *)srcV;
- // BT.2020 Limited Range conversion
-
- // B = 1.168 *(Y - 64) + 2.148 *(U - 512)
- // G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
- // R = 1.168 *(Y - 64) + 1.683 *(V - 512)
-
- // B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
- // G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
- // R = .................... + 1723/1024 *(V - 512)
-
- // min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
- // min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
- // min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
-
- // max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
- // max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
- // max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
-
- int32_t mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
for (size_t x = 0; x < width; x += 2) {
int32_t u, v, y00, y01, y10, y11;
u = *uSrc - 512;
@@ -177,22 +269,22 @@
v = *vSrc - 512;
vSrc += 1;
- y00 = *ySrcTop - 64;
+ y00 = *ySrcTop - _c16;
ySrcTop += 1;
- y01 = *ySrcTop - 64;
+ y01 = *ySrcTop - _c16;
ySrcTop += 1;
- y10 = *ySrcBot - 64;
+ y10 = *ySrcBot - _c16;
ySrcBot += 1;
- y11 = *ySrcBot - 64;
+ y11 = *ySrcBot - _c16;
ySrcBot += 1;
- int32_t u_b = u * mU_B;
- int32_t u_g = u * mU_G;
- int32_t v_g = v * mV_G;
- int32_t v_r = v * mV_R;
+ int32_t u_b = u * _b_u;
+ int32_t u_g = u * _neg_g_u;
+ int32_t v_g = v * _neg_g_v;
+ int32_t v_r = v * _r_v;
int32_t yMult, b, g, r;
- yMult = y00 * mY;
+ yMult = y00 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -201,7 +293,7 @@
r = CLIP3(0, r, 1023);
*dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y01 * mY;
+ yMult = y01 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -210,7 +302,7 @@
r = CLIP3(0, r, 1023);
*dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y10 * mY;
+ yMult = y10 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -219,7 +311,7 @@
r = CLIP3(0, r, 1023);
*dstBot++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y11 * mY;
+ yMult = y11 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -236,19 +328,21 @@
}
}
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height) {
+void convertYUV420Planar16ToY410OrRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects) {
if (isAtLeastT()) {
convertYUV420Planar16ToRGBA1010102(dst, srcY, srcU, srcV, srcYStride, srcUStride,
- srcVStride, dstStride, width, height);
+ srcVStride, dstStride, width, height, aspects);
} else {
convertYUV420Planar16ToY410(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
dstStride, width, height);
}
}
+
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
@@ -885,25 +979,14 @@
// Save supported hal pixel formats for bit depth of 10, the first time this is called
if (!mBitDepth10HalPixelFormats.size()) {
std::vector<int> halPixelFormats;
- if (isAtLeastT()) {
- halPixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
- }
+ halPixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
+
// since allowRGBA1010102 can chance in each call, but mBitDepth10HalPixelFormats
// is populated only once, allowRGBA1010102 is not considered at this stage.
halPixelFormats.push_back(HAL_PIXEL_FORMAT_RGBA_1010102);
for (int halPixelFormat : halPixelFormats) {
- std::shared_ptr<C2GraphicBlock> block;
-
- uint32_t gpuConsumerFlags = halPixelFormat == HAL_PIXEL_FORMAT_RGBA_1010102
- ? C2AndroidMemoryUsage::HW_TEXTURE_READ
- : 0;
- C2MemoryUsage usage = {C2MemoryUsage::CPU_READ | gpuConsumerFlags,
- C2MemoryUsage::CPU_WRITE};
- // TODO(b/214411172) Use AHardwareBuffer_isSupported once it supports P010
- c2_status_t status =
- mOutputBlockPool->fetchGraphicBlock(320, 240, halPixelFormat, usage, &block);
- if (status == C2_OK) {
+ if (isHalPixelFormatSupported((AHardwareBuffer_Format)halPixelFormat)) {
mBitDepth10HalPixelFormats.push_back(halPixelFormat);
}
}
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index 3172f29..7600c5b 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -26,28 +26,35 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/Mutexed.h>
+struct C2ColorAspectsStruct;
+
namespace android {
-bool isAtLeastT();
+
void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, uint32_t width, uint32_t height,
bool isMonochrome = false);
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height);
+
+void convertYUV420Planar16ToY410OrRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects = nullptr);
+
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, size_t width, size_t height,
bool isMonochrome = false);
+
void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, size_t width, size_t height,
bool isMonochrome = false);
+
class SimpleC2Component
: public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
public:
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index a22c750..4dec57f 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -21,6 +21,7 @@
#include <C2Debug.h>
#include <C2PlatformSupport.h>
#include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
#include <Codec2Mapper.h>
#include <SimpleC2Interface.h>
#include <log/log.h>
@@ -191,9 +192,14 @@
.build());
std::vector<uint32_t> pixelFormats = {HAL_PIXEL_FORMAT_YCBCR_420_888};
- if (isAtLeastT()) {
+ if (isHalPixelFormatSupported((AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010)) {
pixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
}
+ // If color format surface isn't added to supported formats, there is no way to know
+ // when the color-format is configured to surface. This is necessary to be able to
+ // choose 10-bit format while decoding 10-bit clips in surface mode.
+ pixelFormats.push_back(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+
// TODO: support more formats?
addParameter(
DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
@@ -322,6 +328,9 @@
return C2R::Ok();
}
+ // unsafe getters
+ std::shared_ptr<C2StreamPixelFormatInfo::output> getPixelFormat_l() const { return mPixelFormat; }
+
private:
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -410,6 +419,10 @@
mSignalledError = false;
mSignalledOutputEos = false;
mHalPixelFormat = HAL_PIXEL_FORMAT_YV12;
+ {
+ IntfImpl::Lock lock = mIntf->lock();
+ mPixelFormatInfo = mIntf->getPixelFormat_l();
+ }
mCodecCtx.reset(new libgav1::Decoder());
if (mCodecCtx == nullptr) {
@@ -633,10 +646,10 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
- if (buffer->bitdepth == 10) {
+ std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects;
+ if (buffer->bitdepth == 10 && mPixelFormatInfo->value != HAL_PIXEL_FORMAT_YCBCR_420_888) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects =
- mIntf->getColorAspects_l();
+ codedColorAspects = mIntf->getColorAspects_l();
bool allowRGBA1010102 = false;
if (codedColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
codedColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -714,9 +727,11 @@
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410OrRGBA1010102((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2,
- dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ convertYUV420Planar16ToY410OrRGBA1010102(
+ (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ dstYStride / sizeof(uint32_t), mWidth, mHeight,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(codedColorAspects));
} else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index 4b13fef..3d4db55 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -51,6 +51,10 @@
std::shared_ptr<IntfImpl> mIntf;
std::unique_ptr<libgav1::Decoder> mCodecCtx;
+ // configurations used by component in process
+ // (TODO: keep this in intf but make them internal only)
+ std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormatInfo;
+
uint32_t mHalPixelFormat;
uint32_t mWidth;
uint32_t mHeight;
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 947e387..60d5875 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -901,7 +901,8 @@
yStride = width;
uStride = vStride = yStride / 2;
ConvertRGBToPlanarYUV(yPlane, yStride, height,
- conversionBuffer.size(), *input);
+ conversionBuffer.size(), *input,
+ mColorAspects->matrix, mColorAspects->range);
break;
}
case C2PlanarLayout::TYPE_YUV: {
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index c2ccfa0..8087396 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -25,6 +25,7 @@
#include <C2Debug.h>
#include <C2PlatformSupport.h>
#include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
#include <SimpleC2Interface.h>
#include "C2SoftVpxDec.h"
@@ -219,9 +220,13 @@
// TODO: support more formats?
std::vector<uint32_t> pixelFormats = {HAL_PIXEL_FORMAT_YCBCR_420_888};
#ifdef VP9
- if (isAtLeastT()) {
+ if (isHalPixelFormatSupported((AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010)) {
pixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
}
+ // If color format surface isn't added to supported formats, there is no way to know
+ // when the color-format is configured to surface. This is necessary to be able to
+ // choose 10-bit format while decoding 10-bit clips in surface mode
+ pixelFormats.push_back(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
#endif
addParameter(
DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
@@ -306,6 +311,11 @@
return C2R::Ok();
}
+ // unsafe getters
+ std::shared_ptr<C2StreamPixelFormatInfo::output> getPixelFormat_l() const {
+ return mPixelFormat;
+ }
+
private:
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -433,6 +443,11 @@
mMode = MODE_VP8;
#endif
mHalPixelFormat = HAL_PIXEL_FORMAT_YV12;
+ {
+ IntfImpl::Lock lock = mIntf->lock();
+ mPixelFormatInfo = mIntf->getPixelFormat_l();
+ }
+
mWidth = 320;
mHeight = 240;
mFrameParallelMode = false;
@@ -687,9 +702,11 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
- if (img->fmt == VPX_IMG_FMT_I42016) {
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
+ if (img->fmt == VPX_IMG_FMT_I42016 &&
+ mPixelFormatInfo->value != HAL_PIXEL_FORMAT_YCBCR_420_888) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+ defaultColorAspects = mIntf->getDefaultColorAspects_l();
bool allowRGBA1010102 = false;
if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -759,11 +776,14 @@
queue->entries.push_back(
[dstY, srcY, srcU, srcV,
srcYStride, srcUStride, srcVStride, dstYStride,
- width = mWidth, height = std::min(mHeight - i, kHeight)] {
+ width = mWidth, height = std::min(mHeight - i, kHeight),
+ defaultColorAspects] {
convertYUV420Planar16ToY410OrRGBA1010102(
(uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
- width, height);
+ width, height,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(
+ defaultColorAspects));
});
srcY += srcYStride / 2 * kHeight;
srcU += srcUStride / 2 * (kHeight / 2);
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.h b/media/codec2/components/vpx/C2SoftVpxDec.h
index 5564766..e9d6dc9 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.h
+++ b/media/codec2/components/vpx/C2SoftVpxDec.h
@@ -63,6 +63,10 @@
std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
};
+ // configurations used by component in process
+ // (TODO: keep this in intf but make them internal only)
+ std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormatInfo;
+
std::shared_ptr<IntfImpl> mIntf;
vpx_codec_ctx_t *mCodecCtx;
bool mFrameParallelMode; // Frame parallel is only supported by VP9 decoder.
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 617769b..f99ee24 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -733,8 +733,14 @@
switch (layout.type) {
case C2PlanarLayout::TYPE_RGB:
case C2PlanarLayout::TYPE_RGBA: {
+ std::shared_ptr<C2StreamColorAspectsInfo::output> colorAspects;
+ {
+ IntfImpl::Lock lock = mIntf->lock();
+ colorAspects = mIntf->getCodedColorAspects_l();
+ }
ConvertRGBToPlanarYUV(mConversionBuffer.data(), stride, vstride,
- mConversionBuffer.size(), *rView.get());
+ mConversionBuffer.size(), *rView.get(),
+ colorAspects->matrix, colorAspects->range);
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, width, height,
mStrideAlign, mConversionBuffer.data());
break;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index e296c8f..714fadb 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -265,6 +265,9 @@
std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const {
return mLayering;
}
+ std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() const {
+ return mCodedColorAspects;
+ }
uint32_t getSyncFramePeriod() const;
static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me);
static C2R CodedColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::output> &me,
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 31840a2..6ff3dbc 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -79,6 +79,7 @@
struct C2PlatformConfig {
enum encoding_quality_level_t : uint32_t; ///< encoding quality level
+ enum tunnel_peek_mode_t: uint32_t; ///< tunnel peek mode
};
namespace {
@@ -280,6 +281,9 @@
// channel mask for decoded audio
kParamIndexAndroidChannelMask, // uint32
+
+ // allow tunnel peek behavior to be unspecified for app compatibility
+ kParamIndexTunnelPeekMode, // tunnel mode, enum
};
}
@@ -2482,6 +2486,28 @@
C2StreamTunnelStartRender;
constexpr char C2_PARAMKEY_TUNNEL_START_RENDER[] = "output.tunnel-start-render";
+/** Tunnel Peek Mode. */
+C2ENUM(C2PlatformConfig::tunnel_peek_mode_t, uint32_t,
+ UNSPECIFIED_PEEK = 0,
+ SPECIFIED_PEEK = 1
+);
+
+/**
+ * Tunnel Peek Mode Tuning parameter.
+ *
+ * If set to UNSPECIFIED_PEEK_MODE, the decoder is free to ignore the
+ * C2StreamTunnelHoldRender and C2StreamTunnelStartRender flags and associated
+ * features. Additionally, it becomes up to the decoder to display any frame
+ * before receiving synchronization information.
+ *
+ * Note: This parameter allows a decoder to ignore the video peek machinery and
+ * to revert to its preferred behavior.
+ */
+typedef C2StreamParam<C2Tuning, C2EasyEnum<C2PlatformConfig::tunnel_peek_mode_t>,
+ kParamIndexTunnelPeekMode> C2StreamTunnelPeekModeTuning;
+constexpr char C2_PARAMKEY_TUNNEL_PEEK_MODE[] =
+ "output.tunnel-peek-mode";
+
/**
* Encoding quality level signaling.
*
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index c36ae94..5a652a3 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -43,6 +43,10 @@
"media_ndk_headers",
],
+ static_libs: [
+ "SurfaceFlingerProperties",
+ ],
+
shared_libs: [
"android.hardware.cas.native@1.0",
"android.hardware.drm@1.0",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 529ee36..296d7ed 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1492,8 +1492,12 @@
// with more enc stat kinds
// Future extended encoding statistics for the level 2 should be added here
case VIDEO_ENCODING_STATISTICS_LEVEL_1:
- config->subscribeToConfigUpdate(comp,
- {kParamIndexAverageBlockQuantization, kParamIndexPictureType});
+ config->subscribeToConfigUpdate(
+ comp,
+ {
+ C2AndroidStreamAverageBlockQuantizationInfo::output::PARAM_TYPE,
+ C2StreamPictureTypeInfo::output::PARAM_TYPE,
+ });
break;
case VIDEO_ENCODING_STATISTICS_LEVEL_NONE:
break;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index f27cc21..dc9f848 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1016,6 +1016,10 @@
array->clear();
Mutexed<Input>::Locked input(mInput);
+ if (!input->buffers) {
+ ALOGE("getInputBufferArray: No Input Buffers allocated");
+ return;
+ }
if (!input->buffers->isArrayMode()) {
input->buffers = input->buffers->toArrayMode(input->numSlots);
}
@@ -1026,7 +1030,10 @@
void CCodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
array->clear();
Mutexed<Output>::Locked output(mOutput);
-
+ if (!output->buffers) {
+ ALOGE("getOutputBufferArray: No Output Buffers allocated");
+ return;
+ }
if (!output->buffers->isArrayMode()) {
output->buffers = output->buffers->toArrayMode(output->numSlots);
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 2643290..5208be6 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -16,6 +16,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "CCodecConfig"
+
+#include <initializer_list>
+
#include <cutils/properties.h>
#include <log/log.h>
#include <utils/NativeHandle.h>
@@ -615,10 +618,30 @@
add(ConfigMapper("csd-0", C2_PARAMKEY_INIT_DATA, "value")
.limitTo(D::OUTPUT & D::READ));
- add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
+ deprecated(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
.limitTo(D::VIDEO & D::PARAM & D::INPUT));
- add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+ deprecated(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+ .limitTo(D::VIDEO & D::OUTPUT));
+
+ add(ConfigMapper(
+ std::string(C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO) + ".type",
+ C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO, "type")
+ .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+ add(ConfigMapper(
+ std::string(C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO) + ".data",
+ C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO, "data")
+ .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+ add(ConfigMapper(
+ std::string(C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO) + ".type",
+ C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO, "type")
+ .limitTo(D::VIDEO & D::OUTPUT));
+
+ add(ConfigMapper(
+ std::string(C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO) + ".data",
+ C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO, "data")
.limitTo(D::VIDEO & D::OUTPUT));
add(ConfigMapper(C2_PARAMKEY_TEMPORAL_LAYERING, C2_PARAMKEY_TEMPORAL_LAYERING, "")
@@ -965,6 +988,16 @@
return value == 0 ? C2_FALSE : C2_TRUE;
}));
+ add(ConfigMapper("android._tunnel-peek-set-legacy", C2_PARAMKEY_TUNNEL_PEEK_MODE, "value")
+ .limitTo(D::PARAM & D::VIDEO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value = 0;
+ (void)v.get(&value);
+ return value == 0
+ ? C2Value(C2PlatformConfig::SPECIFIED_PEEK)
+ : C2Value(C2PlatformConfig::UNSPECIFIED_PEEK);
+ }));
+
add(ConfigMapper(KEY_VIDEO_QP_AVERAGE, C2_PARAMKEY_AVERAGE_QP, "value")
.limitTo(D::ENCODER & D::VIDEO & D::READ));
@@ -1098,7 +1131,7 @@
if (domain.value == C2Component::DOMAIN_VIDEO) {
addLocalParam(new C2AndroidStreamAverageBlockQuantizationInfo::output(0u, 0),
C2_PARAMKEY_AVERAGE_QP);
- addLocalParam(new C2StreamPictureTypeMaskInfo::output(0u, 0),
+ addLocalParam(new C2StreamPictureTypeInfo::output(0u, 0),
C2_PARAMKEY_PICTURE_TYPE);
}
}
@@ -1135,6 +1168,17 @@
}
}
+ // Parameters that are not subscribed initially, but can be subscribed
+ // upon explicit request.
+ static const std::initializer_list<C2Param::Index> kOptionalParams = {
+ C2AndroidStreamAverageBlockQuantizationInfo::output::PARAM_TYPE,
+ C2StreamPictureTypeInfo::output::PARAM_TYPE,
+ };
+ for (const C2Param::Index &index : kOptionalParams) {
+ mSubscribedIndices.erase(index);
+ }
+ subscribeToConfigUpdate(configurable, {}, C2_MAY_BLOCK);
+
return OK;
}
@@ -1166,6 +1210,20 @@
ALOGV("Subscribed to %zu params", mSubscribedIndices.size());
mSubscribedIndicesSize = mSubscribedIndices.size();
}
+#if defined(LOG_NDEBUG) && !LOG_NDEBUG
+ ALOGV("subscribed to %zu params:", mSubscribedIndices.size());
+ std::stringstream ss;
+ for (const C2Param::Index &index : mSubscribedIndices) {
+ ss << index << " ";
+ if (ss.str().length() > 70) {
+ ALOGV("%s", ss.str().c_str());
+ std::stringstream().swap(ss);
+ }
+ }
+ if (!ss.str().empty()) {
+ ALOGV("%s", ss.str().c_str());
+ }
+#endif
return OK;
}
@@ -1190,6 +1248,12 @@
bool changed = false;
for (std::unique_ptr<C2Param> &p : configUpdate) {
if (p && *p) {
+ // Allow unsubscribed vendor parameters to go through --- it may be
+ // later handled by the format shaper.
+ if (!p->isVendor() && mSubscribedIndices.count(p->index()) == 0) {
+ ALOGV("updateConfiguration: skipped unsubscribed param %08x", p->index());
+ continue;
+ }
auto insertion = mCurrentConfig.emplace(p->index(), nullptr);
if (insertion.second || *insertion.first->second != *p) {
if (mSupportedIndices.count(p->index()) || mLocalParams.count(p->index())) {
@@ -1560,6 +1624,22 @@
msg->removeEntryAt(msg->findEntryByName("cta861.max-cll"));
msg->removeEntryAt(msg->findEntryByName("cta861.max-fall"));
}
+
+ // HDR dynamic info
+ std::string keyPrefix = input ? C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO
+ : C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO;
+ std::string typeKey = keyPrefix + ".type";
+ std::string dataKey = keyPrefix + ".data";
+ int32_t type;
+ sp<ABuffer> data;
+ if (msg->findInt32(typeKey.c_str(), &type)
+ && msg->findBuffer(dataKey.c_str(), &data)) {
+ if (type == HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40) {
+ msg->setBuffer(KEY_HDR10_PLUS_INFO, data);
+ msg->removeEntryAt(msg->findEntryByName(typeKey.c_str()));
+ msg->removeEntryAt(msg->findEntryByName(dataKey.c_str()));
+ }
+ }
}
ALOGV("converted to SDK values as %s", msg->debugString().c_str());
@@ -1761,6 +1841,16 @@
params->setFloat("cta861.max-fall", meta->sType1.mMaxFrameAverageLightLevel);
}
}
+
+ sp<ABuffer> hdrDynamicInfo;
+ if (params->findBuffer(KEY_HDR10_PLUS_INFO, &hdrDynamicInfo)) {
+ for (const std::string &prefix : { C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO,
+ C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO }) {
+ params->setInt32((prefix + ".type").c_str(),
+ HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+ params->setBuffer((prefix + ".data").c_str(), hdrDynamicInfo);
+ }
+ }
}
// this is to verify that we set proper signedness for standard parameters
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 6084ee3..cde4c72 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -268,6 +268,39 @@
mInitCheck = BAD_VALUE;
return;
}
+ std::optional<int> clientBitDepth = {};
+ switch (mClientColorFormat) {
+ case COLOR_FormatYUVP010:
+ clientBitDepth = 10;
+ break;
+ case COLOR_FormatYUV411PackedPlanar:
+ case COLOR_FormatYUV411Planar:
+ case COLOR_FormatYUV420Flexible:
+ case COLOR_FormatYUV420PackedPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV422Flexible:
+ case COLOR_FormatYUV422PackedPlanar:
+ case COLOR_FormatYUV422PackedSemiPlanar:
+ case COLOR_FormatYUV422Planar:
+ case COLOR_FormatYUV422SemiPlanar:
+ case COLOR_FormatYUV444Flexible:
+ case COLOR_FormatYUV444Interleaved:
+ clientBitDepth = 8;
+ break;
+ default:
+ // no-op; used with optional
+ break;
+
+ }
+ // conversion fails if client bit-depth and the component bit-depth differs
+ if ((clientBitDepth) && (bitDepth != clientBitDepth.value())) {
+ ALOGD("Bit depth of client: %d and component: %d differs",
+ *clientBitDepth, bitDepth);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
C2PlaneInfo yPlane = layout.planes[C2PlanarLayout::PLANE_Y];
C2PlaneInfo uPlane = layout.planes[C2PlanarLayout::PLANE_U];
C2PlaneInfo vPlane = layout.planes[C2PlanarLayout::PLANE_V];
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 58e1c7f..1c362ae 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -33,6 +33,7 @@
#include <OMX_Video.h>
#include <OMX_VideoExt.h>
#include <OMX_AsString.h>
+#include <SurfaceFlingerProperties.sysprop.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
@@ -159,6 +160,12 @@
// TODO: directly check this from the component interface
supports10Bit = (supportsHdr || supportsHdr10Plus);
+ // If the device doesn't support HDR display, then no codec on the device
+ // can advertise support for HDR profiles.
+ // Default to true to maintain backward compatibility
+ auto ret = sysprop::SurfaceFlingerProperties::has_HDR_display();
+ bool hasHDRDisplay = ret.has_value() ? *ret : true;
+
bool added = false;
for (C2Value::Primitive profile : profileQuery[0].values.values) {
@@ -184,8 +191,8 @@
if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
&& mapper->mapLevel(pl.level, &sdkLevel)) {
caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
- // also list HDR profiles if component supports HDR
- if (supportsHdr) {
+ // also list HDR profiles if component supports HDR and device has HDR display
+ if (supportsHdr && hasHDRDisplay) {
auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(trait.mediaType);
if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
@@ -302,8 +309,13 @@
if (trait.name.find("android") != std::string::npos) {
addDefaultColorFormat(COLOR_FormatSurface);
}
- for (int32_t colorFormat : supportedColorFormats) {
- caps->addColorFormat(colorFormat);
+
+ static const int kVendorSdkVersion = ::android::base::GetIntProperty(
+ "ro.vendor.build.version.sdk", android_get_device_api_level());
+ if (kVendorSdkVersion >= __ANDROID_API_T__) {
+ for (int32_t colorFormat : supportedColorFormats) {
+ caps->addColorFormat(colorFormat);
+ }
}
}
}
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index fe63651..54a6fb1 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -28,6 +28,7 @@
srcs: [
"Codec2BufferUtils.cpp",
+ "Codec2CommonUtils.cpp",
"Codec2Mapper.cpp",
],
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 7fc4c27..807841e 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -121,7 +121,10 @@
} // namespace
status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
- if (view.crop().width != img->mWidth || view.crop().height != img->mHeight) {
+ if (img == nullptr
+ || imgBase == nullptr
+ || view.crop().width != img->mWidth
+ || view.crop().height != img->mHeight) {
return BAD_VALUE;
}
const uint8_t* src_y = view.data()[0];
@@ -203,7 +206,10 @@
}
status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img) {
- if (view.crop().width != img->mWidth || view.crop().height != img->mHeight) {
+ if (img == nullptr
+ || imgBase == nullptr
+ || view.crop().width != img->mWidth
+ || view.crop().height != img->mHeight) {
return BAD_VALUE;
}
const uint8_t* src_y = imgBase + img->mPlane[0].mOffset;
@@ -532,12 +538,14 @@
// Matrix coefficient to convert RGB to Planar YUV data.
// Each sub-array represents the 3X3 coeff used with R, G and B
static const int16_t bt601Matrix[2][3][3] = {
- { { 76, 150, 29 }, { -43, -85, 128 }, { 128, -107, -21 } }, /* RANGE_FULL */
+ { { 77, 150, 29 }, { -43, -85, 128 }, { 128, -107, -21 } }, /* RANGE_FULL */
{ { 66, 129, 25 }, { -38, -74, 112 }, { 112, -94, -18 } }, /* RANGE_LIMITED */
};
static const int16_t bt709Matrix[2][3][3] = {
- { { 54, 183, 18 }, { -29, -99, 128 }, { 128, -116, -12 } }, /* RANGE_FULL */
+ // TRICKY: 18 is adjusted to 19 so that sum of row 1 is 256
+ { { 54, 183, 19 }, { -29, -99, 128 }, { 128, -116, -12 } }, /* RANGE_FULL */
+ // TRICKY: -87 is adjusted to -86 so that sum of row 2 is 0
{ { 47, 157, 16 }, { -26, -86, 112 }, { 112, -102, -10 } }, /* RANGE_LIMITED */
};
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
new file mode 100644
index 0000000..ef5800d
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2BufferUtils"
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Log.h>
+
+#include <android/hardware_buffer.h>
+#include <android-base/properties.h>
+#include <cutils/properties.h>
+#include <media/hardware/HardwareAPI.h>
+#include <system/graphics.h>
+
+#include <C2Debug.h>
+
+#include "Codec2CommonUtils.h"
+
+namespace android {
+
+bool isAtLeastT() {
+ char deviceCodeName[PROP_VALUE_MAX];
+ __system_property_get("ro.build.version.codename", deviceCodeName);
+ return android_get_device_api_level() >= __ANDROID_API_T__ ||
+ !strcmp(deviceCodeName, "Tiramisu");
+}
+
+bool isVendorApiOrFirstApiAtLeastT() {
+ // The first SDK the device shipped with.
+ static const int32_t kProductFirstApiLevel =
+ base::GetIntProperty<int32_t>("ro.product.first_api_level", 0);
+
+ // GRF devices (introduced in Android 11) list the first and possibly the current api levels
+ // to signal which VSR requirements they conform to even if the first device SDK was higher.
+ static const int32_t kBoardFirstApiLevel =
+ base::GetIntProperty<int32_t>("ro.board.first_api_level", 0);
+ static const int32_t kBoardApiLevel =
+ base::GetIntProperty<int32_t>("ro.board.api_level", 0);
+
+ // For non-GRF devices, use the first SDK version by the product.
+ static const int32_t kFirstApiLevel =
+ kBoardApiLevel != 0 ? kBoardApiLevel :
+ kBoardFirstApiLevel != 0 ? kBoardFirstApiLevel :
+ kProductFirstApiLevel;
+
+ return kFirstApiLevel >= __ANDROID_API_T__;
+}
+
+bool isHalPixelFormatSupported(AHardwareBuffer_Format format) {
+ // HAL_PIXEL_FORMAT_YCBCR_P010 requirement was added in T VSR, although it could have been
+ // supported prior to this.
+ //
+ // Unfortunately, we cannot detect if P010 is properly supported using AHardwareBuffer
+ // API alone. For now limit P010 to devices that launched with Android T or known to conform
+ // to Android T VSR (as opposed to simply limiting to a T vendor image).
+ if (format == (AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010 &&
+ !isVendorApiOrFirstApiAtLeastT()) {
+ return false;
+ }
+
+ const AHardwareBuffer_Desc desc = {
+ .width = 320,
+ .height = 240,
+ .format = format,
+ .layers = 1,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+ AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY,
+ .stride = 0,
+ .rfu0 = 0,
+ .rfu1 = 0,
+ };
+
+ return AHardwareBuffer_isSupported(&desc);
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.h b/media/codec2/sfplugin/utils/Codec2CommonUtils.h
new file mode 100644
index 0000000..98dd65b
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2022, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_COMMON_UTILS_H_
+#define CODEC2_COMMON_UTILS_H_
+
+#include <android/hardware_buffer.h>
+
+namespace android {
+
+bool isAtLeastT();
+
+bool isVendorApiOrFirstApiAtLeastT();
+
+/**
+ * Check if a given pixel format is supported.
+ * enums listed in android_pixel_format_t, android_pixel_format_v1_1_t
+ * and so on can be passed as these enums have an equivalent definition in
+ * AHardwareBuffer_Format as well.
+ */
+bool isHalPixelFormatSupported(AHardwareBuffer_Format format);
+
+} // namespace android
+
+#endif // CODEC2_COMMON_UTILS_H_
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 7917395..52c4c0f 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -77,6 +77,7 @@
namespace {
constexpr char TUNNEL_PEEK_KEY[] = "android._trigger-tunnel-peek";
+constexpr char TUNNEL_PEEK_SET_LEGACY_KEY[] = "android._tunnel-peek-set-legacy";
}
@@ -2483,17 +2484,39 @@
return BAD_VALUE;
}
- OMX_CONFIG_BOOLEANTYPE config;
- InitOMXParams(&config);
- config.bEnabled = (OMX_BOOL)(tunnelPeek != 0);
+ OMX_CONFIG_BOOLEANTYPE tunnelPeekConfig;
+ InitOMXParams(&tunnelPeekConfig);
+ tunnelPeekConfig.bEnabled = (OMX_BOOL)(tunnelPeek != 0);
status_t err = mOMXNode->setConfig(
(OMX_INDEXTYPE)OMX_IndexConfigAndroidTunnelPeek,
- &config, sizeof(config));
+ &tunnelPeekConfig, sizeof(tunnelPeekConfig));
if (err != OK) {
ALOGE("decoder cannot set %s to %d (err %d)",
- TUNNEL_PEEK_KEY, tunnelPeek, err);
+ TUNNEL_PEEK_KEY, tunnelPeek, err);
+ }
+ return err;
+}
+
+status_t ACodec::setTunnelPeekLegacy(int32_t isLegacy) {
+ if (mIsEncoder) {
+ ALOGE("encoder does not support %s", TUNNEL_PEEK_SET_LEGACY_KEY);
+ return BAD_VALUE;
+ }
+ if (!mTunneled) {
+ ALOGE("%s is only supported in tunnel mode", TUNNEL_PEEK_SET_LEGACY_KEY);
+ return BAD_VALUE;
}
+ OMX_CONFIG_BOOLEANTYPE tunnelPeekLegacyModeConfig;
+ InitOMXParams(&tunnelPeekLegacyModeConfig);
+ tunnelPeekLegacyModeConfig.bEnabled = (OMX_BOOL)(isLegacy != 0);
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidTunnelPeekLegacyMode,
+ &tunnelPeekLegacyModeConfig, sizeof(tunnelPeekLegacyModeConfig));
+ if (err != OK) {
+ ALOGE("decoder cannot set video peek legacy mode to %d (err %d)",
+ isLegacy, err);
+ }
return err;
}
@@ -7934,11 +7957,22 @@
}
}
- int32_t tunnelPeek = 0;
- if (params->findInt32(TUNNEL_PEEK_KEY, &tunnelPeek)) {
- status_t err = setTunnelPeek(tunnelPeek);
- if (err != OK) {
- return err;
+ {
+ int32_t tunnelPeek = 0;
+ if (params->findInt32(TUNNEL_PEEK_KEY, &tunnelPeek)) {
+ status_t err = setTunnelPeek(tunnelPeek);
+ if (err != OK) {
+ return err;
+ }
+ }
+ }
+ {
+ int32_t tunnelPeekSetLegacy = 0;
+ if (params->findInt32(TUNNEL_PEEK_SET_LEGACY_KEY, &tunnelPeekSetLegacy)) {
+ status_t err = setTunnelPeekLegacy(tunnelPeekSetLegacy);
+ if (err != OK) {
+ return err;
+ }
}
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 37489ea..e50880a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -817,7 +817,7 @@
mTunneledInputWidth(0),
mTunneledInputHeight(0),
mTunneled(false),
- mTunnelPeekState(TunnelPeekState::kEnabledNoBuffer),
+ mTunnelPeekState(TunnelPeekState::kLegacyMode),
mHaveInputSurface(false),
mHavePendingInputBuffers(false),
mCpuBoostRequested(false),
@@ -1087,6 +1087,8 @@
constexpr const char *MediaCodec::asString(TunnelPeekState state, const char *default_string){
switch(state) {
+ case TunnelPeekState::kLegacyMode:
+ return "LegacyMode";
case TunnelPeekState::kEnabledNoBuffer:
return "EnabledNoBuffer";
case TunnelPeekState::kDisabledNoBuffer:
@@ -1113,6 +1115,9 @@
TunnelPeekState previousState = mTunnelPeekState;
if(tunnelPeek == 0){
switch (mTunnelPeekState) {
+ case TunnelPeekState::kLegacyMode:
+ msg->setInt32("android._tunnel-peek-set-legacy", 0);
+ [[fallthrough]];
case TunnelPeekState::kEnabledNoBuffer:
mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
break;
@@ -1125,6 +1130,9 @@
}
} else {
switch (mTunnelPeekState) {
+ case TunnelPeekState::kLegacyMode:
+ msg->setInt32("android._tunnel-peek-set-legacy", 0);
+ [[fallthrough]];
case TunnelPeekState::kDisabledNoBuffer:
mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
break;
@@ -2614,7 +2622,9 @@
msg->setObject("c2buffer", obj);
msg->setInt64("timeUs", presentationTimeUs);
msg->setInt32("flags", flags);
- msg->setMessage("tunings", tunings);
+ if (tunings && tunings->countEntries() > 0) {
+ msg->setMessage("tunings", tunings);
+ }
msg->setPointer("errorDetailMsg", errorDetailMsg);
sp<AMessage> response;
@@ -2656,7 +2666,9 @@
msg->setInt32("skipBlocks", pattern.mSkipBlocks);
msg->setInt64("timeUs", presentationTimeUs);
msg->setInt32("flags", flags);
- msg->setMessage("tunings", tunings);
+ if (tunings && tunings->countEntries() > 0) {
+ msg->setMessage("tunings", tunings);
+ }
msg->setPointer("errorDetailMsg", errorDetailMsg);
sp<AMessage> response;
@@ -3539,10 +3551,12 @@
break;
}
TunnelPeekState previousState = mTunnelPeekState;
- mTunnelPeekState = TunnelPeekState::kBufferRendered;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(previousState),
- asString(TunnelPeekState::kBufferRendered));
+ if (mTunnelPeekState != TunnelPeekState::kLegacyMode) {
+ mTunnelPeekState = TunnelPeekState::kBufferRendered;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(TunnelPeekState::kBufferRendered));
+ }
updatePlaybackDuration(msg);
// check that we have a notification set
if (mOnFrameRenderedNotification != NULL) {
@@ -3959,6 +3973,14 @@
mTunneled = false;
}
+ // If mTunnelPeekState is still in kLegacyMode at this point,
+ // configure the codec in legacy mode
+ if (mTunneled && (mTunnelPeekState == TunnelPeekState::kLegacyMode)) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("android._tunnel-peek-set-legacy", 1);
+ onSetParameters(params);
+ }
+
int32_t background = 0;
if (format->findInt32("android._background-mode", &background) && background) {
androidSetThreadPriority(gettid(), ANDROID_PRIORITY_BACKGROUND);
@@ -4077,10 +4099,12 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
TunnelPeekState previousState = mTunnelPeekState;
- mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(previousState),
- asString(TunnelPeekState::kEnabledNoBuffer));
+ if (previousState != TunnelPeekState::kLegacyMode) {
+ mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(TunnelPeekState::kEnabledNoBuffer));
+ }
mReplyID = replyID;
setState(STARTING);
@@ -4521,10 +4545,12 @@
mCodec->signalFlush();
returnBuffersToCodec();
TunnelPeekState previousState = mTunnelPeekState;
- mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(previousState),
- asString(TunnelPeekState::kEnabledNoBuffer));
+ if (previousState != TunnelPeekState::kLegacyMode) {
+ mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(TunnelPeekState::kEnabledNoBuffer));
+ }
break;
}
@@ -4842,12 +4868,10 @@
sp<WrapperObject<std::shared_ptr<C2Buffer>>> obj{
new WrapperObject<std::shared_ptr<C2Buffer>>{c2Buffer}};
msg->setObject("c2buffer", obj);
- msg->setMessage("tunings", new AMessage);
} else if (memory) {
sp<WrapperObject<sp<hardware::HidlMemory>>> obj{
new WrapperObject<sp<hardware::HidlMemory>>{memory}};
msg->setObject("memory", obj);
- msg->setMessage("tunings", new AMessage);
}
return onQueueInputBuffer(msg);
@@ -5027,9 +5051,10 @@
sp<MediaCodecBuffer> buffer = info->mData;
if (c2Buffer || memory) {
- sp<AMessage> tunings;
- CHECK(msg->findMessage("tunings", &tunings));
- onSetParameters(tunings);
+ sp<AMessage> tunings = NULL;
+ if (msg->findMessage("tunings", &tunings) && tunings != NULL) {
+ onSetParameters(tunings);
+ }
status_t err = OK;
if (c2Buffer) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4b6470a..a443ed9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -126,14 +126,10 @@
}
}
-static bool isHdr(const sp<AMessage> &format) {
- // if CSD specifies HDR transfer(s), we assume HDR. Otherwise, if it specifies non-HDR
- // transfers, we must assume non-HDR. This is because CSD trumps any color-transfer key
- // in the format.
- int32_t isHdr;
- if (format->findInt32("android._is-hdr", &isHdr)) {
- return isHdr;
- }
+/**
+ * Returns true if, and only if, the given format corresponds to HDR10 or HDR10+.
+ */
+static bool isHdr10or10Plus(const sp<AMessage> &format) {
// if user/container supplied HDR static info without transfer set, assume true
if ((format->contains("hdr-static-info") || format->contains("hdr10-plus-info"))
@@ -143,8 +139,7 @@
// otherwise, verify that an HDR transfer function is set
int32_t transfer;
if (format->findInt32("color-transfer", &transfer)) {
- return transfer == ColorUtils::kColorTransferST2084
- || transfer == ColorUtils::kColorTransferHLG;
+ return transfer == ColorUtils::kColorTransferST2084;
}
return false;
}
@@ -419,8 +414,12 @@
}
// bump to HDR profile
- if (isHdr(format) && codecProfile == HEVCProfileMain10) {
- codecProfile = HEVCProfileMain10HDR10;
+ if (isHdr10or10Plus(format) && codecProfile == HEVCProfileMain10) {
+ if (format->contains("hdr10-plus-info")) {
+ codecProfile = HEVCProfileMain10HDR10Plus;
+ } else {
+ codecProfile = HEVCProfileMain10HDR10;
+ }
}
format->setInt32("profile", codecProfile);
@@ -615,16 +614,25 @@
{ 3, VP9Profile3 },
};
- const static ALookup<int32_t, int32_t> toHdr {
+ const static ALookup<int32_t, int32_t> toHdr10 {
{ VP9Profile2, VP9Profile2HDR },
{ VP9Profile3, VP9Profile3HDR },
};
+ const static ALookup<int32_t, int32_t> toHdr10Plus {
+ { VP9Profile2, VP9Profile2HDR10Plus },
+ { VP9Profile3, VP9Profile3HDR10Plus },
+ };
+
int32_t profile;
if (profiles.map(data[0], &profile)) {
// convert to HDR profile
- if (isHdr(format)) {
- toHdr.lookup(profile, &profile);
+ if (isHdr10or10Plus(format)) {
+ if (format->contains("hdr10-plus-info")) {
+ toHdr10Plus.lookup(profile, &profile);
+ } else {
+ toHdr10.lookup(profile, &profile);
+ }
}
format->setInt32("profile", profile);
@@ -684,7 +692,7 @@
int32_t profile;
if (profiles.map(std::make_pair(highBitDepth, profileData), &profile)) {
// bump to HDR profile
- if (isHdr(format) && profile == AV1ProfileMain10) {
+ if (isHdr10or10Plus(format) && profile == AV1ProfileMain10) {
if (format->contains("hdr10-plus-info")) {
profile = AV1ProfileMain10HDR10Plus;
} else {
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 6004cf8..4b4f65f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -56,20 +56,161 @@
|| colorFormat == COLOR_Format32bitABGR2101010;
}
-bool ColorConverter::ColorSpace::isBt709() {
- return (mStandard == ColorUtils::kColorStandardBT709);
-}
-
-bool ColorConverter::ColorSpace::isBt2020() {
+bool ColorConverter::ColorSpace::isBt2020() const {
return (mStandard == ColorUtils::kColorStandardBT2020);
}
-bool ColorConverter::ColorSpace::isJpeg() {
+bool ColorConverter::ColorSpace::isH420() const {
+ return (mStandard == ColorUtils::kColorStandardBT709)
+ && (mRange == ColorUtils::kColorRangeLimited);
+}
+
+// the matrix coefficients are the same for both 601.625 and 601.525 standards
+bool ColorConverter::ColorSpace::isI420() const {
+ return ((mStandard == ColorUtils::kColorStandardBT601_625)
+ || (mStandard == ColorUtils::kColorStandardBT601_525))
+ && (mRange == ColorUtils::kColorRangeLimited);
+}
+
+bool ColorConverter::ColorSpace::isJ420() const {
return ((mStandard == ColorUtils::kColorStandardBT601_625)
|| (mStandard == ColorUtils::kColorStandardBT601_525))
&& (mRange == ColorUtils::kColorRangeFull);
}
+/**
+ * This class approximates the standard YUV to RGB conversions by factoring the matrix
+ * coefficients to 1/256th-s (as dividing by 256 is easy to do with right shift). The chosen value
+ * of 256 is somewhat arbitrary and was not dependent on the bit-depth, but it does limit the
+ * precision of the matrix coefficients (KR & KB).
+ *
+ * The maximum color error after clipping from using 256 is a distance of:
+ * 0.4 (8-bit) / 1.4 (10-bit) for greens in BT.601
+ * 0.5 (8-bit) / 1.9 (10-bit) for cyans in BT.709, and
+ * 0.3 (8-bit) / 1.3 (10-bit) for violets in BT.2020 (it is 0.4 for 10-bit BT.2020 limited)
+ *
+ * Note for reference: libyuv is using a divisor of 64 instead of 256 to ensure no overflow in
+ * 16-bit math. The maximum color error for libyuv is 3.5 / 14.
+ *
+ * The clamping is done using a lookup vector where negative indices are mapped to 0
+ * and indices > 255 are mapped to 255. (For 10-bit these are clamped to 0 to 1023)
+ *
+ * The matrices are assumed to be of the following format (note the sign on the 2nd row):
+ *
+ * [ R ] [ _y 0 _r_v ] [ Y - C16 ]
+ * [ G ] = [ _y -_g_u -_g_v ] * [ U - C128 ]
+ * [ B ] [ _y _b_u 0 ] [ V - C128 ]
+ *
+ * C16 is 1 << (bitdepth - 4) for limited range, and 0 for full range
+ * C128 is 1 << (bitdepth - 1)
+ * C255 is (1 << bitdepth) - 1
+ *
+ * The min and max values from these equations determine the clip range needed for clamping:
+ *
+ * min = - (_y * C16 + max((_g_u + _g_v) * (C255-C128), max(_r_v, _b_u) * C128)) / 256
+ * max = (_y * (C255 - C16) + max((_g_u + _g_v) * C128, max(_r_v, _b_u) * (C255-C128)) + 128) / 256
+ */
+
+struct ColorConverter::Coeffs {
+ int32_t _y;
+ int32_t _r_v;
+ int32_t _g_u;
+ int32_t _g_v;
+ int32_t _b_u;
+};
+
+/*
+
+Color conversion rules are dictated by ISO (e.g. ISO:IEC 23008:2)
+
+Limited range means Y is in [16, 235], U and V are in [16, 224] corresponding to [-0.5 to 0.5].
+
+Full range means Y is in [0, 255], U and V are in [0.5, 255.5] corresponding to [-0.5 to .5].
+
+RGB is always in full range ([0, 255])
+
+The color primaries determine the KR and KB values:
+
+
+For full range (assuming 8-bits) ISO defines:
+
+( Y ) ( KR 1-KR-KB KB )
+( ) ( ) (R)
+( ) (-KR/2 -(1-KR-KB)/2 ) ( )
+(U - 128) = (----- ------------ 0.5 ) * (G)
+( ) ((1-KB) (1-KB) ) ( )
+( ) ( ) (B)
+( ) ( -(1-KR-KB)/2 -KB/2 )
+(V - 128) ( 0.5 ------------ ----- )
+ ( (1-KR) (1-KR))
+
+(the math is rounded, 128 is (1 << (bitdepth - 1)) )
+
+From this
+
+(R) ( 1 0 2*(1-KR) ) ( Y )
+( ) ( ) ( )
+( ) ( 2*KB*(KB-1) 2*KR*(KR-1) ) ( )
+(G) = ( 1 ----------- ----------- ) * (U - 128)
+( ) ( 1-KR-KB 1-KR-KB ) ( )
+( ) ( ) ( )
+(B) ( 1 2*(1-KB) 0 ) (V - 128)
+
+For limited range, this becomes
+
+(R) ( 1 0 2*(1-KR) ) (255/219 0 0) (Y - 16)
+( ) ( ) ( ) ( )
+( ) ( 2*KB*(KB-1) 2*KR*(KR-1) ) ( ) ( )
+(G) = ( 1 ----------- ----------- ) * (0 255/224 0) * (U - 128)
+( ) ( 1-KR-KB 1-KR-KB ) ( ) ( )
+( ) ( ) ( ) ( )
+(B) ( 1 2*(1-KB) 0 ) (0 0 255/224) (V - 128)
+
+( For non-8-bit, 16 is (1 << (bitdepth - 4)), 128 is (1 << (bitdepth - 1)),
+ 255 is ((1 << bitdepth) - 1), 219 is (219 << (bitdepth - 8)) and
+ 224 is (224 << (bitdepth - 8)), so the matrix coefficients slightly change. )
+
+*/
+
+namespace {
+
+/**
+ * BT.601: K_R = 0.299; K_B = 0.114
+ *
+ * clip range 8-bit: [-277, 535], 10-bit: [-1111, 2155]
+ */
+const struct ColorConverter::Coeffs BT601_FULL = { 256, 359, 88, 183, 454 };
+const struct ColorConverter::Coeffs BT601_LIMITED = { 298, 409, 100, 208, 516 };
+const struct ColorConverter::Coeffs BT601_LTD_10BIT = { 299, 410, 101, 209, 518 };
+
+/**
+ * BT.709: K_R = 0.2126; K_B = 0.0722
+ *
+ * clip range 8-bit: [-289, 547], 10-bit: [-1159, 2202]
+ */
+const struct ColorConverter::Coeffs BT709_FULL = { 256, 403, 48, 120, 475 };
+const struct ColorConverter::Coeffs BT709_LIMITED = { 298, 459, 55, 136, 541 };
+const struct ColorConverter::Coeffs BT709_LTD_10BIT = { 290, 460, 55, 137, 542 };
+
+/**
+ * BT.2020: K_R = 0.2627; K_B = 0.0593
+ *
+ * clip range 8-bit: [-294, 552], 10-bit: [-1175, 2218]
+ *
+ * This is the largest clip range.
+ */
+const struct ColorConverter::Coeffs BT2020_FULL = { 256, 377, 42, 146, 482 };
+const struct ColorConverter::Coeffs BT2020_LIMITED = { 298, 430, 48, 167, 548 };
+const struct ColorConverter::Coeffs BT2020_LTD_10BIT = { 299, 431, 48, 167, 550 };
+
+constexpr int CLIP_RANGE_MIN_8BIT = -294;
+constexpr int CLIP_RANGE_MAX_8BIT = 552;
+
+constexpr int CLIP_RANGE_MIN_10BIT = -1175;
+constexpr int CLIP_RANGE_MAX_10BIT = 2218;
+
+}
+
ColorConverter::ColorConverter(
OMX_COLOR_FORMATTYPE from, OMX_COLOR_FORMATTYPE to)
: mSrcFormat(from),
@@ -106,7 +247,8 @@
case OMX_COLOR_FormatYUV420SemiPlanar:
#ifdef USE_LIBYUV
return mDstFormat == OMX_COLOR_Format16bitRGB565
- || mDstFormat == OMX_COLOR_Format32BitRGBA8888;
+ || mDstFormat == OMX_COLOR_Format32BitRGBA8888
+ || mDstFormat == OMX_COLOR_Format32bitBGRA8888;
#else
return mDstFormat == OMX_COLOR_Format16bitRGB565;
#endif
@@ -290,10 +432,53 @@
return err;
}
+const struct ColorConverter::Coeffs *ColorConverter::getMatrix() const {
+ const bool isFullRange = mSrcColorSpace.mRange == ColorUtils::kColorRangeFull;
+ const bool is10Bit = (mSrcFormat == COLOR_FormatYUVP010
+ || mSrcFormat == OMX_COLOR_FormatYUV420Planar16);
+
+ switch (mSrcColorSpace.mStandard) {
+ case ColorUtils::kColorStandardBT601_525:
+ case ColorUtils::kColorStandardBT601_625:
+ return (isFullRange ? &BT601_FULL :
+ is10Bit ? &BT601_LTD_10BIT : &BT601_LIMITED);
+
+ case ColorUtils::kColorStandardBT709:
+ return (isFullRange ? &BT709_FULL :
+ is10Bit ? &BT709_LTD_10BIT : &BT709_LIMITED);
+
+ case ColorUtils::kColorStandardBT2020:
+ return (isFullRange ? &BT2020_FULL :
+ is10Bit ? &BT2020_LTD_10BIT : &BT2020_LIMITED);
+
+ default:
+ // for now use the default matrices for unhandled color spaces
+ // TODO: fail?
+ // return nullptr;
+ [[fallthrough]];
+
+ case ColorUtils::kColorStandardUnspecified:
+ return is10Bit ? &BT2020_LTD_10BIT : &BT601_LIMITED;
+
+ }
+}
+
status_t ColorConverter::convertCbYCrY(
const BitmapParams &src, const BitmapParams &dst) {
// XXX Untested
+ const struct Coeffs *matrix = getMatrix();
+ if (!matrix) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ signed _b_u = matrix->_b_u;
+ signed _neg_g_u = -matrix->_g_u;
+ signed _neg_g_v = -matrix->_g_v;
+ signed _r_v = matrix->_r_v;
+ signed _y = matrix->_y;
+ signed _c16 = mSrcColorSpace.mRange == ColorUtils::kColorRangeLimited ? 16 : 0;
+
uint8_t *kAdjustedClip = initClip();
uint16_t *dst_ptr = (uint16_t *)dst.mBits
@@ -304,22 +489,22 @@
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
- signed y1 = (signed)src_ptr[2 * x + 1] - 16;
- signed y2 = (signed)src_ptr[2 * x + 3] - 16;
+ signed y1 = (signed)src_ptr[2 * x + 1] - _c16;
+ signed y2 = (signed)src_ptr[2 * x + 3] - _c16;
signed u = (signed)src_ptr[2 * x] - 128;
signed v = (signed)src_ptr[2 * x + 2] - 128;
- signed u_b = u * 517;
- signed u_g = -u * 100;
- signed v_g = -v * 208;
- signed v_r = v * 409;
+ signed u_b = u * _b_u;
+ signed u_g = u * _neg_g_u;
+ signed v_g = v * _neg_g_v;
+ signed v_r = v * _r_v;
- signed tmp1 = y1 * 298;
+ signed tmp1 = y1 * _y + 128;
signed b1 = (tmp1 + u_b) / 256;
signed g1 = (tmp1 + v_g + u_g) / 256;
signed r1 = (tmp1 + v_r) / 256;
- signed tmp2 = y2 * 298;
+ signed tmp2 = y2 * _y + 128;
signed b2 = (tmp2 + u_b) / 256;
signed g2 = (tmp2 + v_g + u_g) / 256;
signed r2 = (tmp2 + v_r) / 256;
@@ -348,15 +533,32 @@
return OK;
}
+/*
+ libyuv supports the following color spaces:
+
+ I420: BT.601 limited range
+ J420: BT.601 full range (jpeg)
+ H420: BT.709 limited range
+
+*/
+
#define DECLARE_YUV2RGBFUNC(func, rgb) int (*func)( \
- const uint8_t*, int, const uint8_t*, int, \
- const uint8_t*, int, uint8_t*, int, int, int) \
- = mSrcColorSpace.isBt709() ? libyuv::H420To##rgb \
- : mSrcColorSpace.isJpeg() ? libyuv::J420To##rgb \
+ const uint8_t*, int, const uint8_t*, int, \
+ const uint8_t*, int, uint8_t*, int, int, int) \
+ = mSrcColorSpace.isH420() ? libyuv::H420To##rgb \
+ : mSrcColorSpace.isJ420() ? libyuv::J420To##rgb \
: libyuv::I420To##rgb
status_t ColorConverter::convertYUV420PlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst) {
+ // Fall back to our conversion if libyuv does not support the color space.
+ // I420 (BT.601 limited) is default, so don't fall back if we end up using it anyway.
+ if (!mSrcColorSpace.isH420() && !mSrcColorSpace.isJ420()
+ // && !mSrcColorSpace.isI420() /* same as line below */
+ && getMatrix() != &BT601_LIMITED) {
+ return convertYUV420Planar(src, dst);
+ }
+
uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -404,6 +606,13 @@
status_t ColorConverter::convertYUV420SemiPlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst) {
+ // Fall back to our conversion if libyuv does not support the color space.
+ // libyuv only supports BT.601 limited range NV12. Don't fall back if we end up using it anyway.
+ if (// !mSrcColorSpace.isI420() && /* same as below */
+ getMatrix() != &BT601_LIMITED) {
+ return convertYUV420SemiPlanar(src, dst);
+ }
+
uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -444,16 +653,16 @@
case OMX_COLOR_FormatYUV420Planar:
return [](void *src_y, void *src_u, void *src_v, size_t x,
signed *y1, signed *y2, signed *u, signed *v) {
- *y1 = ((uint8_t*)src_y)[x] - 16;
- *y2 = ((uint8_t*)src_y)[x + 1] - 16;
+ *y1 = ((uint8_t*)src_y)[x];
+ *y2 = ((uint8_t*)src_y)[x + 1];
*u = ((uint8_t*)src_u)[x / 2] - 128;
*v = ((uint8_t*)src_v)[x / 2] - 128;
};
case OMX_COLOR_FormatYUV420Planar16:
return [](void *src_y, void *src_u, void *src_v, size_t x,
signed *y1, signed *y2, signed *u, signed *v) {
- *y1 = (signed)(((uint16_t*)src_y)[x] >> 2) - 16;
- *y2 = (signed)(((uint16_t*)src_y)[x + 1] >> 2) - 16;
+ *y1 = (signed)(((uint16_t*)src_y)[x] >> 2);
+ *y2 = (signed)(((uint16_t*)src_y)[x + 1] >> 2);
*u = (signed)(((uint16_t*)src_u)[x / 2] >> 2) - 128;
*v = (signed)(((uint16_t*)src_v)[x / 2] >> 2) - 128;
};
@@ -463,6 +672,8 @@
return nullptr;
}
+// TRICKY: this method only supports RGBA_1010102 output for 10-bit sources, and all other outputs
+// for 8-bit sources as the type of kAdjustedClip is hardcoded based on output, not input.
std::function<void (void *, bool, signed, signed, signed, signed, signed, signed)>
getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, void *kAdjustedClip) {
switch ((int)dstFormat) {
@@ -557,6 +768,18 @@
status_t ColorConverter::convertYUV420Planar(
const BitmapParams &src, const BitmapParams &dst) {
+ const struct Coeffs *matrix = getMatrix();
+ if (!matrix) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ signed _b_u = matrix->_b_u;
+ signed _neg_g_u = -matrix->_g_u;
+ signed _neg_g_v = -matrix->_g_v;
+ signed _r_v = matrix->_r_v;
+ signed _y = matrix->_y;
+ signed _c16 = mSrcColorSpace.mRange == ColorUtils::kColorRangeLimited ? 16 : 0;
+
uint8_t *kAdjustedClip = initClip();
auto readFromSrc = getReadFromSrc(mSrcFormat);
@@ -575,38 +798,20 @@
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
- // B = 1.164 * (Y - 16) + 2.018 * (U - 128)
- // G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128)
- // R = 1.164 * (Y - 16) + 1.596 * (V - 128)
-
- // B = 298/256 * (Y - 16) + 517/256 * (U - 128)
- // G = .................. - 208/256 * (V - 128) - 100/256 * (U - 128)
- // R = .................. + 409/256 * (V - 128)
-
- // min_B = (298 * (- 16) + 517 * (- 128)) / 256 = -277
- // min_G = (298 * (- 16) - 208 * (255 - 128) - 100 * (255 - 128)) / 256 = -172
- // min_R = (298 * (- 16) + 409 * (- 128)) / 256 = -223
-
- // max_B = (298 * (255 - 16) + 517 * (255 - 128)) / 256 = 534
- // max_G = (298 * (255 - 16) - 208 * (- 128) - 100 * (- 128)) / 256 = 432
- // max_R = (298 * (255 - 16) + 409 * (255 - 128)) / 256 = 481
-
- // clip range -278 .. 535
-
signed y1, y2, u, v;
readFromSrc(src_y, src_u, src_v, x, &y1, &y2, &u, &v);
- signed u_b = u * 517;
- signed u_g = -u * 100;
- signed v_g = -v * 208;
- signed v_r = v * 409;
+ signed u_b = u * _b_u;
+ signed u_g = u * _neg_g_u;
+ signed v_g = v * _neg_g_v;
+ signed v_r = v * _r_v;
- signed tmp1 = y1 * 298;
+ signed tmp1 = (y1 - _c16) * _y + 128;
signed b1 = (tmp1 + u_b) / 256;
signed g1 = (tmp1 + v_g + u_g) / 256;
signed r1 = (tmp1 + v_r) / 256;
- signed tmp2 = y2 * 298;
+ signed tmp2 = (y2 - _c16) * _y + 128;
signed b2 = (tmp2 + u_b) / 256;
signed g2 = (tmp2 + v_g + u_g) / 256;
signed r2 = (tmp2 + v_r) / 256;
@@ -648,6 +853,18 @@
status_t ColorConverter::convertYUVP010ToRGBA1010102(
const BitmapParams &src, const BitmapParams &dst) {
+ const struct Coeffs *matrix = getMatrix();
+ if (!matrix) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ signed _b_u = matrix->_b_u;
+ signed _neg_g_u = -matrix->_g_u;
+ signed _neg_g_v = -matrix->_g_v;
+ signed _r_v = matrix->_r_v;
+ signed _y = matrix->_y;
+ signed _c16 = mSrcColorSpace.mRange == ColorUtils::kColorRangeLimited ? 64 : 0;
+
uint16_t *kAdjustedClip10bit = initClip10Bit();
// auto readFromSrc = getReadFromSrc(mSrcFormat);
@@ -663,72 +880,28 @@
+ src.mStride * src.mHeight
+ (src.mCropTop / 2) * src.mStride + src.mCropLeft * src.mBpp);
- // BT.2020 Limited Range conversion
-
- // B = 1.168 *(Y - 64) + 2.148 *(U - 512)
- // G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
- // R = 1.168 *(Y - 64) + 1.683 *(V - 512)
-
- // B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
- // G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
- // R = .................... + 1723/1024 *(V - 512)
-
- // min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
- // min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
- // min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
-
- // max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
- // max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
- // max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
-
- // clip range -1175 .. 2218
-
- // BT.709 Limited Range conversion
-
- // B = 1.164 * (Y - 64) + 2.018 * (U - 512)
- // G = 1.164 * (Y - 64) - 0.813 * (V - 512) - 0.391 * (U - 512)
- // R = 1.164 * (Y - 64) + 1.596 * (V - 512)
-
- // B = 1192/1024 * (Y - 64) + 2068/1024 * (U - 512)
- // G = .................... - 832/1024 * (V - 512) - 400/1024 * (U - 512)
- // R = .................... + 1636/1024 * (V - 512)
-
- // min_B = (1192 * (- 64) + 2068 * (- 512)) / 1024 = -1108
-
- // max_B = (1192 * (1023 - 64) + 517 * (1023 - 512)) / 1024 = 2148
-
- // clip range -1108 .. 2148
-
- signed mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
- if (!mSrcColorSpace.isBt2020()) {
- mY = 1192;
- mU_B = 2068;
- mV_G = -832;
- mV_R = 1636;
- mU_G = -400;
- }
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
signed y1, y2, u, v;
- y1 = (src_y[x] >> 6) - 64;
- y2 = (src_y[x + 1] >> 6) - 64;
+ y1 = (src_y[x] >> 6) - _c16;
+ y2 = (src_y[x + 1] >> 6) - _c16;
u = int(src_uv[x] >> 6) - 512;
v = int(src_uv[x + 1] >> 6) - 512;
- signed u_b = u * mU_B;
- signed u_g = u * mU_G;
- signed v_g = v * mV_G;
- signed v_r = v * mV_R;
+ signed u_b = u * _b_u;
+ signed u_g = u * _neg_g_u;
+ signed v_g = v * _neg_g_v;
+ signed v_r = v * _r_v;
- signed tmp1 = y1 * mY;
- signed b1 = (tmp1 + u_b) / 1024;
- signed g1 = (tmp1 + v_g + u_g) / 1024;
- signed r1 = (tmp1 + v_r) / 1024;
+ signed tmp1 = y1 * _y + 128;
+ signed b1 = (tmp1 + u_b) / 256;
+ signed g1 = (tmp1 + v_g + u_g) / 256;
+ signed r1 = (tmp1 + v_r) / 256;
- signed tmp2 = y2 * mY;
- signed b2 = (tmp2 + u_b) / 1024;
- signed g2 = (tmp2 + v_g + u_g) / 1024;
- signed r2 = (tmp2 + v_r) / 1024;
+ signed tmp2 = y2 * _y + 128;
+ signed b2 = (tmp2 + u_b) / 256;
+ signed g2 = (tmp2 + v_g + u_g) / 256;
+ signed r2 = (tmp2 + v_r) / 256;
bool uncropped = x + 1 < src.cropWidth();
@@ -949,11 +1122,6 @@
status_t ColorConverter::convertQCOMYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst) {
- uint8_t *kAdjustedClip = initClip();
-
- uint16_t *dst_ptr = (uint16_t *)dst.mBits
- + dst.mCropTop * dst.mWidth + dst.mCropLeft;
-
const uint8_t *src_y =
(const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
@@ -961,67 +1129,25 @@
(const uint8_t *)src_y + src.mWidth * src.mHeight
+ src.mCropTop * src.mWidth + src.mCropLeft;
- for (size_t y = 0; y < src.cropHeight(); ++y) {
- for (size_t x = 0; x < src.cropWidth(); x += 2) {
- signed y1 = (signed)src_y[x] - 16;
- signed y2 = (signed)src_y[x + 1] - 16;
+ /* QCOMYUV420SemiPlanar is NV21, while MediaCodec uses NV12 */
+ return convertYUV420SemiPlanarBase(
+ src, dst, src_y, src_u, src.mWidth /* row_inc */, true /* isNV21 */);
+}
- signed u = (signed)src_u[x & ~1] - 128;
- signed v = (signed)src_u[(x & ~1) + 1] - 128;
+status_t ColorConverter::convertTIYUV420PackedSemiPlanar(
+ const BitmapParams &src, const BitmapParams &dst) {
+ const uint8_t *src_y =
+ (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
- signed u_b = u * 517;
- signed u_g = -u * 100;
- signed v_g = -v * 208;
- signed v_r = v * 409;
+ const uint8_t *src_u =
+ (const uint8_t *)src_y + src.mWidth * (src.mHeight - src.mCropTop / 2);
- signed tmp1 = y1 * 298;
- signed b1 = (tmp1 + u_b) / 256;
- signed g1 = (tmp1 + v_g + u_g) / 256;
- signed r1 = (tmp1 + v_r) / 256;
-
- signed tmp2 = y2 * 298;
- signed b2 = (tmp2 + u_b) / 256;
- signed g2 = (tmp2 + v_g + u_g) / 256;
- signed r2 = (tmp2 + v_r) / 256;
-
- uint32_t rgb1 =
- ((kAdjustedClip[b1] >> 3) << 11)
- | ((kAdjustedClip[g1] >> 2) << 5)
- | (kAdjustedClip[r1] >> 3);
-
- uint32_t rgb2 =
- ((kAdjustedClip[b2] >> 3) << 11)
- | ((kAdjustedClip[g2] >> 2) << 5)
- | (kAdjustedClip[r2] >> 3);
-
- if (x + 1 < src.cropWidth()) {
- *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
- } else {
- dst_ptr[x] = rgb1;
- }
- }
-
- src_y += src.mWidth;
-
- if (y & 1) {
- src_u += src.mWidth;
- }
-
- dst_ptr += dst.mWidth;
- }
-
- return OK;
+ return convertYUV420SemiPlanarBase(
+ src, dst, src_y, src_u, src.mWidth /* row_inc */);
}
status_t ColorConverter::convertYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst) {
- // XXX Untested
-
- uint8_t *kAdjustedClip = initClip();
-
- uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
- dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
-
const uint8_t *src_y =
(const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
@@ -1029,90 +1155,49 @@
(const uint8_t *)src.mBits + src.mHeight * src.mStride +
(src.mCropTop / 2) * src.mStride + src.mCropLeft;
- for (size_t y = 0; y < src.cropHeight(); ++y) {
- for (size_t x = 0; x < src.cropWidth(); x += 2) {
- signed y1 = (signed)src_y[x] - 16;
- signed y2 = (signed)src_y[x + 1] - 16;
-
- signed v = (signed)src_u[x & ~1] - 128;
- signed u = (signed)src_u[(x & ~1) + 1] - 128;
-
- signed u_b = u * 517;
- signed u_g = -u * 100;
- signed v_g = -v * 208;
- signed v_r = v * 409;
-
- signed tmp1 = y1 * 298;
- signed b1 = (tmp1 + u_b) / 256;
- signed g1 = (tmp1 + v_g + u_g) / 256;
- signed r1 = (tmp1 + v_r) / 256;
-
- signed tmp2 = y2 * 298;
- signed b2 = (tmp2 + u_b) / 256;
- signed g2 = (tmp2 + v_g + u_g) / 256;
- signed r2 = (tmp2 + v_r) / 256;
-
- uint32_t rgb1 =
- ((kAdjustedClip[b1] >> 3) << 11)
- | ((kAdjustedClip[g1] >> 2) << 5)
- | (kAdjustedClip[r1] >> 3);
-
- uint32_t rgb2 =
- ((kAdjustedClip[b2] >> 3) << 11)
- | ((kAdjustedClip[g2] >> 2) << 5)
- | (kAdjustedClip[r2] >> 3);
-
- if (x + 1 < src.cropWidth()) {
- *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
- } else {
- dst_ptr[x] = rgb1;
- }
- }
-
- src_y += src.mStride;
-
- if (y & 1) {
- src_u += src.mStride;
- }
-
- dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
- }
-
- return OK;
+ return convertYUV420SemiPlanarBase(
+ src, dst, src_y, src_u, src.mStride /* row_inc */);
}
-status_t ColorConverter::convertTIYUV420PackedSemiPlanar(
- const BitmapParams &src, const BitmapParams &dst) {
+status_t ColorConverter::convertYUV420SemiPlanarBase(
+ const BitmapParams &src, const BitmapParams &dst,
+ const uint8_t *src_y, const uint8_t *src_u, size_t row_inc, bool isNV21) {
+ const struct Coeffs *matrix = getMatrix();
+ if (!matrix) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ signed _b_u = matrix->_b_u;
+ signed _neg_g_u = -matrix->_g_u;
+ signed _neg_g_v = -matrix->_g_v;
+ signed _r_v = matrix->_r_v;
+ signed _y = matrix->_y;
+ signed _c16 = mSrcColorSpace.mRange == ColorUtils::kColorRangeLimited ? 16 : 0;
+
uint8_t *kAdjustedClip = initClip();
- uint16_t *dst_ptr = (uint16_t *)dst.mBits
- + dst.mCropTop * dst.mWidth + dst.mCropLeft;
-
- const uint8_t *src_y =
- (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
-
- const uint8_t *src_u =
- (const uint8_t *)src_y + src.mWidth * (src.mHeight - src.mCropTop / 2);
+ uint16_t *dst_ptr = (uint16_t *)((uint8_t *)
+ dst.mBits + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp);
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
- signed y1 = (signed)src_y[x] - 16;
- signed y2 = (signed)src_y[x + 1] - 16;
+ signed y1 = (signed)src_y[x] - _c16;
+ signed y2 = (signed)src_y[x + 1] - _c16;
- signed u = (signed)src_u[x & ~1] - 128;
- signed v = (signed)src_u[(x & ~1) + 1] - 128;
+ signed u = (signed)src_u[(x & ~1) + isNV21] - 128;
+ signed v = (signed)src_u[(x & ~1) + !isNV21] - 128;
- signed u_b = u * 517;
- signed u_g = -u * 100;
- signed v_g = -v * 208;
- signed v_r = v * 409;
+ signed u_b = u * _b_u;
+ signed u_g = u * _neg_g_u;
+ signed v_g = v * _neg_g_v;
+ signed v_r = v * _r_v;
- signed tmp1 = y1 * 298;
+ signed tmp1 = y1 * _y + 128;
signed b1 = (tmp1 + u_b) / 256;
signed g1 = (tmp1 + v_g + u_g) / 256;
signed r1 = (tmp1 + v_r) / 256;
- signed tmp2 = y2 * 298;
+ signed tmp2 = y2 * _y + 128;
signed b2 = (tmp2 + u_b) / 256;
signed g2 = (tmp2 + v_g + u_g) / 256;
signed r2 = (tmp2 + v_r) / 256;
@@ -1134,46 +1219,40 @@
}
}
- src_y += src.mWidth;
+ src_y += row_inc;
if (y & 1) {
- src_u += src.mWidth;
+ src_u += row_inc;
}
- dst_ptr += dst.mWidth;
+ dst_ptr = (uint16_t*)((uint8_t*)dst_ptr + dst.mStride);
}
return OK;
}
uint8_t *ColorConverter::initClip() {
- static const signed kClipMin = -278;
- static const signed kClipMax = 535;
-
if (mClip == NULL) {
- mClip = new uint8_t[kClipMax - kClipMin + 1];
+ mClip = new uint8_t[CLIP_RANGE_MAX_8BIT - CLIP_RANGE_MIN_8BIT + 1];
- for (signed i = kClipMin; i <= kClipMax; ++i) {
- mClip[i - kClipMin] = (i < 0) ? 0 : (i > 255) ? 255 : (uint8_t)i;
+ for (signed i = CLIP_RANGE_MIN_8BIT; i <= CLIP_RANGE_MAX_8BIT; ++i) {
+ mClip[i - CLIP_RANGE_MIN_8BIT] = (i < 0) ? 0 : (i > 255) ? 255 : (uint8_t)i;
}
}
- return &mClip[-kClipMin];
+ return &mClip[-CLIP_RANGE_MIN_8BIT];
}
uint16_t *ColorConverter::initClip10Bit() {
- static const signed kClipMin = -1176;
- static const signed kClipMax = 2219;
-
if (mClip10Bit == NULL) {
- mClip10Bit = new uint16_t[kClipMax - kClipMin + 1];
+ mClip10Bit = new uint16_t[CLIP_RANGE_MAX_10BIT - CLIP_RANGE_MIN_10BIT + 1];
- for (signed i = kClipMin; i <= kClipMax; ++i) {
- mClip10Bit[i - kClipMin] = (i < 0) ? 0 : (i > 1023) ? 1023 : (uint16_t)i;
+ for (signed i = CLIP_RANGE_MIN_10BIT; i <= CLIP_RANGE_MAX_10BIT; ++i) {
+ mClip10Bit[i - CLIP_RANGE_MIN_10BIT] = (i < 0) ? 0 : (i > 1023) ? 1023 : (uint16_t)i;
}
}
- return &mClip10Bit[-kClipMin];
+ return &mClip10Bit[-CLIP_RANGE_MIN_10BIT];
}
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 632b32c..5a21755 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -522,6 +522,7 @@
status_t setLatency(uint32_t latency);
status_t getLatency(uint32_t *latency);
status_t setTunnelPeek(int32_t tunnelPeek);
+ status_t setTunnelPeekLegacy(int32_t isLegacy);
status_t setAudioPresentation(int32_t presentationId, int32_t programId);
status_t setOperatingRate(float rateFloat, bool isVideo);
status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 1d86a22..7a05f00 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -47,15 +47,20 @@
size_t dstCropLeft, size_t dstCropTop,
size_t dstCropRight, size_t dstCropBottom);
+ struct Coeffs; // matrix coefficients
+
private:
struct ColorSpace {
uint32_t mStandard;
uint32_t mRange;
uint32_t mTransfer;
- bool isBt709();
- bool isBt2020();
- bool isJpeg();
+ bool isBt2020() const;
+
+ // libyuv helper methods
+ bool isH420() const;
+ bool isI420() const;
+ bool isJ420() const;
};
struct BitmapParams {
@@ -84,6 +89,9 @@
uint8_t *initClip();
uint16_t *initClip10Bit();
+ // returns the YUV2RGB matrix coefficients according to the color aspects and bit depth
+ const struct Coeffs *getMatrix() const;
+
status_t convertCbYCrY(
const BitmapParams &src, const BitmapParams &dst);
@@ -111,6 +119,10 @@
status_t convertYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
+ status_t convertYUV420SemiPlanarBase(
+ const BitmapParams &src, const BitmapParams &dst,
+ const uint8_t *src_y, const uint8_t *src_u, size_t row_inc, bool isNV21 = false);
+
status_t convertTIYUV420PackedSemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index a00a3e6..1d2d711 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -398,6 +398,7 @@
// <all states> -> EnabledNoBuffer when flush
// <all states> -> EnabledNoBuffer when stop then configure then start
enum struct TunnelPeekState {
+ kLegacyMode,
kDisabledNoBuffer,
kEnabledNoBuffer,
kDisabledQueued,
diff --git a/media/libstagefright/omx/OMXStore.cpp b/media/libstagefright/omx/OMXStore.cpp
index 7e33f09..4827d9e 100644
--- a/media/libstagefright/omx/OMXStore.cpp
+++ b/media/libstagefright/omx/OMXStore.cpp
@@ -28,6 +28,8 @@
#include <dlfcn.h>
#include <fcntl.h>
+#include <sstream>
+
namespace android {
OMXStore::OMXStore() {
@@ -108,9 +110,26 @@
return android::base::GetIntProperty("ro.product.first_api_level", __ANDROID_API_T__);
}
+static bool isTV() {
+ static const bool kIsTv = []() {
+ std::string characteristics = android::base::GetProperty("ro.build.characteristics", "");
+ std::stringstream ss(characteristics);
+ for (std::string item; std::getline(ss, item, ','); ) {
+ if (item == "tv") {
+ return true;
+ }
+ }
+ return false;
+ }();
+ return kIsTv;
+}
+
void OMXStore::addPlugin(OMXPluginBase *plugin) {
Mutex::Autolock autoLock(mLock);
+ bool typeTV = isTV();
+ int firstApiLevel = getFirstApiLevel();
+
OMX_U32 index = 0;
char name[128];
@@ -125,13 +144,16 @@
bool skip = false;
for (String8 role : roles) {
if (role.find("video_decoder") != -1 || role.find("video_encoder") != -1) {
- if (getFirstApiLevel() >= __ANDROID_API_S__) {
+ if (firstApiLevel >= __ANDROID_API_T__) {
+ skip = true;
+ break;
+ } else if (!typeTV && firstApiLevel >= __ANDROID_API_S__) {
skip = true;
break;
}
}
if (role.find("audio_decoder") != -1 || role.find("audio_encoder") != -1) {
- if (getFirstApiLevel() >= __ANDROID_API_T__) {
+ if (firstApiLevel >= __ANDROID_API_T__) {
skip = true;
break;
}
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 1ab5bc1..07f4529 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -181,10 +181,14 @@
bool captureAudioOutputAllowed(const AttributionSourceState& attributionSource) {
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
- bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
+ // Use PermissionChecker, which includes some logic for allowing the isolated
+ // HotwordDetectionService to hold certain permissions.
+ permission::PermissionChecker permissionChecker;
+ bool ok = (permissionChecker.checkPermissionForPreflight(
+ sCaptureAudioOutput, attributionSource, String16(),
+ AppOpsManager::OP_NONE) != permission::PermissionChecker::PERMISSION_HARD_DENIED);
if (!ok) ALOGV("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
return ok;
}
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.cpp b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
index fcd6ebe..a169667 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.cpp
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
@@ -41,8 +41,10 @@
tidToCycleCounterMap[currentThreadId]++;
if (tidToCycleCounterMap[currentThreadId] >= mMaxCycles) {
- ALOGW("CameraServiceWatchdog triggering kill for pid: %d", getpid());
- kill(getpid(), SIGKILL);
+ ALOGW("CameraServiceWatchdog triggering abort for pid: %d", getpid());
+ // We use abort here so we can get a tombstone for better
+ // debugging.
+ abort();
}
}
}
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index f4955e2..29ddab1 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -26,7 +26,7 @@
* and single call monitoring differently. See function documentation for
* more details.
*/
-
+#pragma once
#include <chrono>
#include <thread>
#include <time.h>
@@ -61,7 +61,7 @@
/** Used to wrap monitored calls in start and stop functions using custom timer values */
template<typename T>
auto watchThread(T func, uint32_t tid, uint32_t cycles, uint32_t cycleLength) {
- auto res = NULL;
+ decltype(func()) res;
if (cycles != mMaxCycles || cycleLength != mCycleLengthMs) {
// Create another instance of the watchdog to prevent disruption
@@ -84,10 +84,9 @@
/** Used to wrap monitored calls in start and stop functions using class timer values */
template<typename T>
auto watchThread(T func, uint32_t tid) {
- auto res = NULL;
start(tid);
- res = func();
+ auto res = func();
stop(tid);
return res;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index dc5002b..a3d24ff 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -40,6 +40,9 @@
#include "utils/CameraServiceProxyWrapper.h"
namespace android {
+
+const static size_t kDisconnectTimeoutMs = 2500;
+
using namespace camera2;
// Interface used by CameraService
@@ -144,6 +147,10 @@
wp<NotificationListener> weakThis(this);
res = mDevice->setNotifyCallback(weakThis);
+ /** Start watchdog thread */
+ mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog->run("Camera2ClientBaseWatchdog");
+
return OK;
}
@@ -155,6 +162,11 @@
disconnect();
+ if (mCameraServiceWatchdog != NULL) {
+ mCameraServiceWatchdog->requestExit();
+ mCameraServiceWatchdog.clear();
+ }
+
ALOGI("Closed Camera %s. Client was: %s (PID %d, UID %u)",
TClientBase::mCameraIdStr.string(),
String8(TClientBase::mClientPackageName).string(),
@@ -238,9 +250,18 @@
// ICameraClient2BaseUser interface
-
template <typename TClientBase>
binder::Status Camera2ClientBase<TClientBase>::disconnect() {
+ if (mCameraServiceWatchdog != nullptr) {
+ // Initialization from hal succeeded, time disconnect.
+ return mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(disconnectImpl(),
+ kDisconnectTimeoutMs / kCycleLengthMs, kCycleLengthMs);
+ }
+ return disconnectImpl();
+}
+
+template <typename TClientBase>
+binder::Status Camera2ClientBase<TClientBase>::disconnectImpl() {
ATRACE_CALL();
ALOGD("Camera %s: start to disconnect", TClientBase::mCameraIdStr.string());
Mutex::Autolock icl(mBinderSerializationLock);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index b0d1c3f..3af781b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -19,6 +19,7 @@
#include "common/CameraDeviceBase.h"
#include "camera/CaptureResult.h"
+#include "CameraServiceWatchdog.h"
namespace android {
@@ -173,6 +174,12 @@
private:
template<typename TProviderPtr>
status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
+
+ binder::Status disconnectImpl();
+
+ // Watchdog thread
+ sp<CameraServiceWatchdog> mCameraServiceWatchdog;
+
};
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index fd00284..8e4ff13 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -379,9 +379,11 @@
}
// Fill in JPEG header
- CameraBlob *aidlBlobHeader = reinterpret_cast<CameraBlob *>(aidlHeaderStart);
- aidlBlobHeader->blobId = blobId;
- aidlBlobHeader->blobSizeBytes = blobSizeBytes;
+ CameraBlob aidlHeader = {
+ .blobId = blobId,
+ .blobSizeBytes = static_cast<int32_t>(blobSizeBytes)
+ };
+ memcpy(aidlHeaderStart, &aidlHeader, sizeof(CameraBlob));
graphicBuffer->unlock();
return OK;
}
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 7b61290..7dde268 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -87,7 +87,7 @@
int32_t PERF_CLASS_LEVEL =
property_get_int32("ro.odm.build.media_performance_class", 0);
-bool IS_PERF_CLASS = (PERF_CLASS_LEVEL == SDK_VERSION_S);
+bool IS_PERF_CLASS = (PERF_CLASS_LEVEL >= SDK_VERSION_S);
camera3::Size getMaxJpegResolution(const CameraMetadata &metadata,
bool ultraHighResolution) {