Merge "AudioRecord: Refine metric status information"
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 2b96055..f23684a 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -37,6 +37,17 @@
]
},
{
+ "name": "CtsMediaDecoderTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaEncoderTestCases",
"options": [
{
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index c08cd59..c7985ca 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -505,124 +505,6 @@
}
}
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width, size_t height) {
-
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *) dst;
- uint32_t *dstBot = (uint32_t *) (dst + dstStride);
- uint16_t *ySrcTop = (uint16_t*) srcY;
- uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
- uint16_t *uSrc = (uint16_t*) srcU;
- uint16_t *vSrc = (uint16_t*) srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
-
- u01 = *((uint32_t*)uSrc); uSrc += 2;
- v01 = *((uint32_t*)vSrc); vSrc += 2;
-
- y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
- y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t*)ySrcTop);
- y45 = *((uint32_t*)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-
- return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
- return;
-}
bool C2SoftAomDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
@@ -711,21 +593,16 @@
dstYStride / sizeof(uint32_t),
mWidth, mHeight);
} else {
- convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride / 2, srcUStride / 2, srcVStride / 2,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ mWidth, mHeight);
}
} else {
const uint8_t *srcY = (const uint8_t *)img->planes[AOM_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)img->planes[AOM_PLANE_U];
const uint8_t *srcV = (const uint8_t *)img->planes[AOM_PLANE_V];
- copyOutputBufferToYuvPlanarFrame(
- dstY, dstU, dstV, srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
}
finishWork(*(int64_t*)img->user_priv, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 6c4b7d9..99ff450 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -29,7 +29,179 @@
#include <SimpleC2Component.h>
namespace android {
+constexpr uint8_t kNeutralUVBitDepth8 = 128;
+constexpr uint16_t kNeutralUVBitDepth10 = 512;
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+ const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, uint32_t width, uint32_t height,
+ bool isMonochrome) {
+ for (size_t i = 0; i < height; ++i) {
+ memcpy(dstY, srcY, width);
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t i = 0; i < height / 2; ++i) {
+ memset(dstV, kNeutralUVBitDepth8, width / 2);
+ memset(dstU, kNeutralUVBitDepth8, width / 2);
+ dstV += dstUVStride;
+ dstU += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dstV, srcV, width / 2);
+ srcV += srcVStride;
+ dstV += dstUVStride;
+ }
+
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dstU, srcU, width / 2);
+ srcU += srcUStride;
+ dstU += dstUVStride;
+ }
+}
+
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < height; y += 2) {
+ uint32_t *dstTop = (uint32_t *)dst;
+ uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+ uint16_t *ySrcTop = (uint16_t *)srcY;
+ uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+ uint16_t *uSrc = (uint16_t *)srcU;
+ uint16_t *vSrc = (uint16_t *)srcV;
+
+ uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+ size_t x = 0;
+ for (; x < width - 3; x += 4) {
+ u01 = *((uint32_t *)uSrc);
+ uSrc += 2;
+ v01 = *((uint32_t *)vSrc);
+ vSrc += 2;
+
+ y01 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y23 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y45 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+ y67 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+ *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+ *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+ *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+ *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+ }
+
+ // There should be at most 2 more pixels to process. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (x < width) {
+ u01 = *uSrc;
+ v01 = *vSrc;
+ y01 = *((uint32_t *)ySrcTop);
+ y45 = *((uint32_t *)ySrcBot);
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = ((y01 >> 16) << 10) | uv0;
+ *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = ((y45 >> 16) << 10) | uv0;
+ }
+
+ srcY += srcYStride * 2;
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dst += dstStride * 2;
+ }
+}
+
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome) {
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = (uint8_t)(srcY[x] >> 2);
+ }
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ memset(dstV, kNeutralUVBitDepth8, (width + 1) / 2);
+ memset(dstU, kNeutralUVBitDepth8, (width + 1) / 2);
+ dstV += dstUVStride;
+ dstU += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstU[x] = (uint8_t)(srcU[x] >> 2);
+ dstV[x] = (uint8_t)(srcV[x] >> 2);
+ }
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstU += dstUVStride;
+ dstV += dstUVStride;
+ }
+}
+
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome) {
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = srcY[x] << 6;
+ }
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstUV[2 * x] = kNeutralUVBitDepth10 << 6;
+ dstUV[2 * x + 1] = kNeutralUVBitDepth10 << 6;
+ }
+ dstUV += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstUV[2 * x] = srcU[x] << 6;
+ dstUV[2 * x + 1] = srcV[x] << 6;
+ }
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstUV += dstUVStride;
+ }
+}
std::unique_ptr<C2Work> SimpleC2Component::WorkQueue::pop_front() {
std::unique_ptr<C2Work> work = std::move(mQueue.front().work);
mQueue.pop_front();
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index e5e16d8..3b4e212 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -28,6 +28,24 @@
namespace android {
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+ const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, uint32_t width, uint32_t height,
+ bool isMonochrome = false);
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height);
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome = false);
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome = false);
class SimpleC2Component
: public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
public:
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 475d863..ff6080d 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -28,11 +28,6 @@
#include <media/stagefright/foundation/MediaDefs.h>
namespace android {
-namespace {
-
-constexpr uint8_t NEUTRAL_UV_VALUE = 128;
-
-} // namespace
// codecname set and passed in as a compile flag from Android.bp
constexpr char COMPONENT_NAME[] = CODECNAME;
@@ -545,150 +540,6 @@
}
}
-static void copyOutputBufferToYV12Frame(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height,
- bool isMonochrome) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- if (isMonochrome) {
- // Fill with neutral U/V values.
- for (size_t i = 0; i < height / 2; ++i) {
- memset(dstV, NEUTRAL_UV_VALUE, width / 2);
- memset(dstU, NEUTRAL_UV_VALUE, width / 2);
- dstV += dstUVStride;
- dstU += dstUVStride;
- }
- return;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU,
- const uint16_t *srcV, size_t srcYStride,
- size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width,
- size_t height) {
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *)dst;
- uint32_t *dstBot = (uint32_t *)(dst + dstStride);
- uint16_t *ySrcTop = (uint16_t *)srcY;
- uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
- uint16_t *uSrc = (uint16_t *)srcU;
- uint16_t *vSrc = (uint16_t *)srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
- u01 = *((uint32_t *)uSrc);
- uSrc += 2;
- v01 = *((uint32_t *)vSrc);
- vSrc += 2;
-
- y01 = *((uint32_t *)ySrcTop);
- ySrcTop += 2;
- y23 = *((uint32_t *)ySrcTop);
- ySrcTop += 2;
- y45 = *((uint32_t *)ySrcBot);
- ySrcBot += 2;
- y67 = *((uint32_t *)ySrcBot);
- ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t *)ySrcTop);
- y45 = *((uint32_t *)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height, bool isMonochrome) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- if (isMonochrome) {
- // Fill with neutral U/V values.
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- memset(dstV, NEUTRAL_UV_VALUE, (width + 1) / 2);
- memset(dstU, NEUTRAL_UV_VALUE, (width + 1) / 2);
- dstV += dstUVStride;
- dstU += dstUVStride;
- }
- return;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
-}
-
void C2SoftGav1Dec::getVuiParams(const libgav1::DecoderBuffer *buffer) {
VuiColorAspects vuiColorAspects;
vuiColorAspects.primaries = buffer->color_primary;
@@ -841,22 +692,24 @@
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410(
- (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ convertYUV420Planar16ToY410((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
+ mWidth, mHeight);
+ } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+ convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+ srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
+ dstUVStride / 2, mWidth, mHeight, isMonochrome);
} else {
- convertYUV420Planar16ToYUV420Planar(
- dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride, dstUVStride, mWidth, mHeight,
- isMonochrome);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride, mWidth,
+ mHeight, isMonochrome);
}
} else {
const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
- copyOutputBufferToYV12Frame(
- dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
}
finishWork(buffer->user_private_data, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 81f4679..54a1d0e 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -419,40 +419,6 @@
return resChanged;
}
-/* TODO: can remove temporary copy after library supports writing to display
- * buffer Y, U and V plane pointers using stride info. */
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, uint8_t *src,
- size_t dstYStride, size_t dstUVStride,
- size_t srcYStride, uint32_t width,
- uint32_t height) {
- size_t srcUVStride = srcYStride / 2;
- uint8_t *srcStart = src;
-
- size_t vStride = align(height, 16);
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, src, width);
- src += srcYStride;
- dstY += dstYStride;
- }
-
- /* U buffer */
- src = srcStart + vStride * srcYStride;
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, src, width / 2);
- src += srcUVStride;
- dstU += dstUVStride;
- }
-
- /* V buffer */
- src = srcStart + vStride * srcYStride * 5 / 4;
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, src, width / 2);
- src += srcUVStride;
- dstV += dstUVStride;
- }
-}
-
void C2SoftMpeg4Dec::process(
const std::unique_ptr<C2Work> &work,
const std::shared_ptr<C2BlockPool> &pool) {
@@ -636,11 +602,17 @@
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
- (void)copyOutputBufferToYuvPlanarFrame(
- outputBufferY, outputBufferU, outputBufferV,
- mOutputBuffer[mNumSamplesOutput & 1],
- dstYStride, dstUVStride,
- align(mWidth, 16), mWidth, mHeight);
+ size_t srcYStride = align(mWidth, 16);
+ size_t srcUStride = srcYStride / 2;
+ size_t srcVStride = srcYStride / 2;
+ size_t vStride = align(mHeight, 16);
+ const uint8_t *srcY = (const uint8_t *)mOutputBuffer[mNumSamplesOutput & 1];
+ const uint8_t *srcU = (const uint8_t *)srcY + vStride * srcYStride;
+ const uint8_t *srcV = (const uint8_t *)srcY + vStride * srcYStride * 5 / 4;
+
+ convertYUV420Planar8ToYV12(outputBufferY, outputBufferU, outputBufferV, srcY, srcU, srcV,
+ srcYStride, srcUStride, srcVStride, dstYStride, dstUVStride,
+ mWidth, mHeight);
inPos += inSize - (size_t)tmpInSize;
finishWork(workIndex, work);
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 2da9d5b..0a27821 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -640,125 +640,6 @@
}
}
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width, size_t height) {
-
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *) dst;
- uint32_t *dstBot = (uint32_t *) (dst + dstStride);
- uint16_t *ySrcTop = (uint16_t*) srcY;
- uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
- uint16_t *uSrc = (uint16_t*) srcU;
- uint16_t *vSrc = (uint16_t*) srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
-
- u01 = *((uint32_t*)uSrc); uSrc += 2;
- v01 = *((uint32_t*)vSrc); vSrc += 2;
-
- y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
- y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t*)ySrcTop);
- y45 = *((uint32_t*)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-
- return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
- return;
-}
status_t C2SoftVpxDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
@@ -876,24 +757,22 @@
queue->cond.signal();
queue.waitForCondition(queue->cond);
}
+ } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+ convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+ srcYStride / 2, srcUStride / 2, srcVStride / 2,
+ dstYStride / 2, dstUVStride / 2, mWidth, mHeight);
} else {
- convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride / 2, srcUStride / 2, srcVStride / 2,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ mWidth, mHeight);
}
} else {
const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
- copyOutputBufferToYuvPlanarFrame(
- dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
}
finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
return OK;
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index fd51622..834f4ad 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -19,7 +19,7 @@
cc_library {
name: "libflacextractor",
- defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+ defaults: ["extractor-defaults"],
srcs: ["FLACExtractor.cpp"],
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index b7e2af3..cdf587c 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -18,7 +18,7 @@
cc_library {
name: "libwavextractor",
- defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+ defaults: ["extractor-defaults"],
srcs: ["WAVExtractor.cpp"],
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 73432af..06f05b0 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -602,6 +602,7 @@
void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+ std::lock_guard<std::mutex> lock(mStreamLock);
mDuckAndMuteVolume = duckAndMuteVolume;
doSetVolume(); // apply this change
}
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 47241ce..ae39512 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -43,7 +43,7 @@
return {to, false};
}
// Always rate limit if t is 0 (required to avoid division by 0).
- if (t == 0) {
+ if (t == 0 || maxTranslationalVelocity == 0 || maxRotationalVelocity == 0) {
return {from, true};
}
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index 4d6a483..6a17972 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -336,7 +336,7 @@
*durationUs = 0ll;
int64_t audioDurationUs;
- if (mAudioTrack != NULL
+ if (mAudioTrack != NULL && mAudioTrack->getFormat() != NULL
&& mAudioTrack->getFormat()->findInt64(
kKeyDuration, &audioDurationUs)
&& audioDurationUs > *durationUs) {
@@ -344,7 +344,7 @@
}
int64_t videoDurationUs;
- if (mVideoTrack != NULL
+ if (mVideoTrack != NULL && mVideoTrack->getFormat() != NULL
&& mVideoTrack->getFormat()->findInt64(
kKeyDuration, &videoDurationUs)
&& videoDurationUs > *durationUs) {
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 53181cc..807b525 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -42,6 +42,17 @@
]
},
{
+ "name": "CtsMediaDecoderTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaEncoderTestCases",
"options": [
{
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 2fe2451..cb428ec 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -61,6 +61,7 @@
// Used for calls that should come from system server or internal.
// Note: system server is multiprocess for multiple users. audioserver is not.
+// Note: if this method is modified, also update the same method in SensorService.h.
static inline bool isAudioServerOrSystemServerUid(uid_t uid) {
return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 607ea49..fc6e05e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -3690,7 +3690,8 @@
const audio_attributes_t& attributes) {
audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
audio_flags_to_audio_output_flags(attributes.flags, &output_flags);
- sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
+ DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(attributes);
+ sp<IOProfile> profile = getProfileForOutput(outputDevices,
config.sample_rate,
config.format,
config.channel_mask,
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index cb017f0..440a7ff 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -35,10 +35,10 @@
namespace {
// This is how fast, in m/s, we allow position to shift during rate-limiting.
-constexpr auto kMaxTranslationalVelocity = 2;
+constexpr float kMaxTranslationalVelocity = 2;
// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
-constexpr auto kMaxRotationalVelocity = 8;
+constexpr float kMaxRotationalVelocity = 8;
// This should be set to the typical time scale that the translation sensors used drift in. This
// means, loosely, for how long we can trust the reading to be "accurate enough". This would