Merge "Revert "C2Soft Enc: Use correct matrix and range in RGB to YUV conversion""
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 41fe080..3e79897 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -1,24 +1,7 @@
// for frameworks/av/media
{
- "presubmit-large": [
- // runs whenever we change something in this tree
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.EncodeDecodeTest"
- }
- ]
- },
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
- }
- ]
- }
- ],
+ // TODO (b/229286407) Add EncodeDecodeTest and DecodeEditEncodeTest to
+ // presubmit-large once issues in cuttlefish are fixed
"presubmit": [
{
"name": "GtsMediaTestCases",
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 39bbe1c..971b009 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -539,9 +539,10 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
if (img->fmt == AOM_IMG_FMT_I42016) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+ defaultColorAspects = mIntf->getDefaultColorAspects_l();
if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -590,7 +591,8 @@
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
convertYUV420Planar16ToY410OrRGBA1010102(
(uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(defaultColorAspects));
} else {
convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 01f5ffb..e81b70a 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -133,11 +133,126 @@
dst += dstStride * 2;
}
}
+
+namespace {
+
+static C2ColorAspectsStruct FillMissingColorAspects(
+ std::shared_ptr<const C2ColorAspectsStruct> aspects,
+ int32_t width, int32_t height) {
+ C2ColorAspectsStruct _aspects;
+ if (aspects) {
+ _aspects = *aspects;
+ }
+
+ // use matrix for conversion
+ if (_aspects.matrix == C2Color::MATRIX_UNSPECIFIED) {
+ // if not specified, deduce matrix from primaries
+ if (_aspects.primaries == C2Color::PRIMARIES_UNSPECIFIED) {
+ // if those are also not specified, deduce primaries first from transfer, then from
+ // width and height
+ if (_aspects.transfer == C2Color::TRANSFER_ST2084
+ || _aspects.transfer == C2Color::TRANSFER_HLG) {
+ _aspects.primaries = C2Color::PRIMARIES_BT2020;
+ } else if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
+ // TODO: stagefright defaults to BT.2020 for UHD, but perhaps we should default to
+ // BT.709 for non-HDR 10-bit UHD content
+ // (see media/libstagefright/foundation/ColorUtils.cpp)
+ _aspects.primaries = C2Color::PRIMARIES_BT2020;
+ } else if ((width <= 720 && height <= 576)
+ || (height <= 720 && width <= 576)) {
+ // note: it does not actually matter whether to use 525 or 625 here as the
+ // conversion is the same
+ _aspects.primaries = C2Color::PRIMARIES_BT601_625;
+ } else {
+ _aspects.primaries = C2Color::PRIMARIES_BT709;
+ }
+ }
+
+ switch (_aspects.primaries) {
+ case C2Color::PRIMARIES_BT601_525:
+ case C2Color::PRIMARIES_BT601_625:
+ _aspects.matrix = C2Color::MATRIX_BT601;
+ break;
+
+ case C2Color::PRIMARIES_BT709:
+ _aspects.matrix = C2Color::MATRIX_BT709;
+ break;
+
+ case C2Color::PRIMARIES_BT2020:
+ default:
+ _aspects.matrix = C2Color::MATRIX_BT2020;
+ }
+ }
+
+ return _aspects;
+}
+
+// matrix conversion coefficients
+// (see media/libstagefright/colorconverter/ColorConverter.cpp for more details)
+struct Coeffs {
+ int32_t _y, _b_u, _g_u, _g_v, _r_v, _c16;
+};
+
+static const struct Coeffs GetCoeffsForAspects(const C2ColorAspectsStruct &aspects) {
+ bool isFullRange = aspects.range == C2Color::RANGE_FULL;
+
+ switch (aspects.matrix) {
+ case C2Color::MATRIX_BT601:
+ /**
+ * BT.601: K_R = 0.299; K_B = 0.114
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1436, 352, 731, 1815, 0 };
+ } else {
+ return Coeffs { 1196, 1639, 402, 835, 2072, 64 };
+ }
+ break;
+
+ case C2Color::MATRIX_BT709:
+ /**
+ * BT.709: K_R = 0.2126; K_B = 0.0722
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1613, 192, 479, 1900, 0 };
+ } else {
+ return Coeffs { 1196, 1841, 219, 547, 2169, 64 };
+ }
+ break;
+
+ case C2Color::MATRIX_BT2020:
+ default:
+ /**
+ * BT.2020: K_R = 0.2627; K_B = 0.0593
+ */
+ if (isFullRange) {
+ return Coeffs { 1024, 1510, 169, 585, 1927, 0 };
+ } else {
+ return Coeffs { 1196, 1724, 192, 668, 2200, 64 };
+ }
+ }
+}
+
+}
+
#define CLIP3(min, v, max) (((v) < (min)) ? (min) : (((max) > (v)) ? (v) : (max)))
-void convertYUV420Planar16ToRGBA1010102(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
- const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height) {
+void convertYUV420Planar16ToRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width,
+ size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects) {
+
+ C2ColorAspectsStruct _aspects = FillMissingColorAspects(aspects, width, height);
+
+ struct Coeffs coeffs = GetCoeffsForAspects(_aspects);
+
+ int32_t _y = coeffs._y;
+ int32_t _b_u = coeffs._b_u;
+ int32_t _neg_g_u = -coeffs._g_u;
+ int32_t _neg_g_v = -coeffs._g_v;
+ int32_t _r_v = coeffs._r_v;
+ int32_t _c16 = coeffs._c16;
+
// Converting two lines at a time, slightly faster
for (size_t y = 0; y < height; y += 2) {
uint32_t *dstTop = (uint32_t *)dst;
@@ -147,25 +262,6 @@
uint16_t *uSrc = (uint16_t *)srcU;
uint16_t *vSrc = (uint16_t *)srcV;
- // BT.2020 Limited Range conversion
-
- // B = 1.168 *(Y - 64) + 2.148 *(U - 512)
- // G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
- // R = 1.168 *(Y - 64) + 1.683 *(V - 512)
-
- // B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
- // G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
- // R = .................... + 1723/1024 *(V - 512)
-
- // min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
- // min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
- // min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
-
- // max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
- // max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
- // max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
-
- int32_t mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
for (size_t x = 0; x < width; x += 2) {
int32_t u, v, y00, y01, y10, y11;
u = *uSrc - 512;
@@ -173,22 +269,22 @@
v = *vSrc - 512;
vSrc += 1;
- y00 = *ySrcTop - 64;
+ y00 = *ySrcTop - _c16;
ySrcTop += 1;
- y01 = *ySrcTop - 64;
+ y01 = *ySrcTop - _c16;
ySrcTop += 1;
- y10 = *ySrcBot - 64;
+ y10 = *ySrcBot - _c16;
ySrcBot += 1;
- y11 = *ySrcBot - 64;
+ y11 = *ySrcBot - _c16;
ySrcBot += 1;
- int32_t u_b = u * mU_B;
- int32_t u_g = u * mU_G;
- int32_t v_g = v * mV_G;
- int32_t v_r = v * mV_R;
+ int32_t u_b = u * _b_u;
+ int32_t u_g = u * _neg_g_u;
+ int32_t v_g = v * _neg_g_v;
+ int32_t v_r = v * _r_v;
int32_t yMult, b, g, r;
- yMult = y00 * mY;
+ yMult = y00 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -197,7 +293,7 @@
r = CLIP3(0, r, 1023);
*dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y01 * mY;
+ yMult = y01 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -206,7 +302,7 @@
r = CLIP3(0, r, 1023);
*dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y10 * mY;
+ yMult = y10 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -215,7 +311,7 @@
r = CLIP3(0, r, 1023);
*dstBot++ = 3 << 30 | (b << 20) | (g << 10) | r;
- yMult = y11 * mY;
+ yMult = y11 * _y + 512;
b = (yMult + u_b) / 1024;
g = (yMult + v_g + u_g) / 1024;
r = (yMult + v_r) / 1024;
@@ -232,19 +328,21 @@
}
}
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height) {
+void convertYUV420Planar16ToY410OrRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects) {
if (isAtLeastT()) {
convertYUV420Planar16ToRGBA1010102(dst, srcY, srcU, srcV, srcYStride, srcUStride,
- srcVStride, dstStride, width, height);
+ srcVStride, dstStride, width, height, aspects);
} else {
convertYUV420Planar16ToY410(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
dstStride, width, height);
}
}
+
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index cc23b60..7600c5b 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -26,27 +26,35 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/Mutexed.h>
+struct C2ColorAspectsStruct;
+
namespace android {
+
void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, uint32_t width, uint32_t height,
bool isMonochrome = false);
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width,
- size_t height);
+
+void convertYUV420Planar16ToY410OrRGBA1010102(
+ uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height,
+ std::shared_ptr<const C2ColorAspectsStruct> aspects = nullptr);
+
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, size_t width, size_t height,
bool isMonochrome = false);
+
void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, size_t width, size_t height,
bool isMonochrome = false);
+
class SimpleC2Component
: public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
public:
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 386723d..dc467c8 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -636,10 +636,10 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects;
if (buffer->bitdepth == 10) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects =
- mIntf->getColorAspects_l();
+ codedColorAspects = mIntf->getColorAspects_l();
bool allowRGBA1010102 = false;
if (codedColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
codedColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -713,9 +713,11 @@
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410OrRGBA1010102((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2,
- dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ convertYUV420Planar16ToY410OrRGBA1010102(
+ (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ dstYStride / sizeof(uint32_t), mWidth, mHeight,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(codedColorAspects));
} else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index d969f2e..b50fe7d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -688,9 +688,10 @@
std::shared_ptr<C2GraphicBlock> block;
uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
if (img->fmt == VPX_IMG_FMT_I42016) {
IntfImpl::Lock lock = mIntf->lock();
- std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+ defaultColorAspects = mIntf->getDefaultColorAspects_l();
bool allowRGBA1010102 = false;
if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -760,11 +761,14 @@
queue->entries.push_back(
[dstY, srcY, srcU, srcV,
srcYStride, srcUStride, srcVStride, dstYStride,
- width = mWidth, height = std::min(mHeight - i, kHeight)] {
+ width = mWidth, height = std::min(mHeight - i, kHeight),
+ defaultColorAspects] {
convertYUV420Planar16ToY410OrRGBA1010102(
(uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
- width, height);
+ width, height,
+ std::static_pointer_cast<const C2ColorAspectsStruct>(
+ defaultColorAspects));
});
srcY += srcYStride / 2 * kHeight;
srcU += srcUStride / 2 * (kHeight / 2);
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 9a71198..8f61129 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -204,7 +204,7 @@
bool OutputBuffers::convert(
const std::shared_ptr<C2Buffer> &src, sp<Codec2Buffer> *dst) {
- if (!src || src->data().type() != C2BufferData::LINEAR) {
+ if (src && src->data().type() != C2BufferData::LINEAR) {
return false;
}
int32_t configEncoding = kAudioEncodingPcm16bit;
@@ -233,7 +233,12 @@
if (!mDataConverter) {
return false;
}
- sp<MediaCodecBuffer> srcBuffer = ConstLinearBlockBuffer::Allocate(mFormat, src);
+ sp<MediaCodecBuffer> srcBuffer;
+ if (src) {
+ srcBuffer = ConstLinearBlockBuffer::Allocate(mFormat, src);
+ } else {
+ srcBuffer = new MediaCodecBuffer(mFormat, new ABuffer(0));
+ }
if (!srcBuffer) {
return false;
}
@@ -1253,8 +1258,8 @@
if (newBuffer == nullptr) {
return NO_MEMORY;
}
+ newBuffer->setFormat(mFormat);
}
- newBuffer->setFormat(mFormat);
*index = mImpl.assignSlot(newBuffer);
handleImageData(newBuffer);
*clientBuffer = newBuffer;
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index e9adac4..3f9a40d 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -151,9 +151,10 @@
}
}
- // For VP9/AV1, the static info is always propagated by framework.
+ // VP9 does not support HDR metadata in the bitstream and static metadata
+ // can always be carried by the framework. (The framework does not propagate
+ // dynamic metadata as that needs to be frame accurate.)
supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
- supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
// HDR support implies 10-bit support.
// TODO: directly check this from the component interface
diff --git a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
index 41e4fff..a471291 100644
--- a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
@@ -861,4 +861,57 @@
+ std::to_string(std::get<3>(info.param));
});
+TEST(LinearOutputBuffersTest, PcmConvertFormat) {
+ // Prepare LinearOutputBuffers
+ std::shared_ptr<LinearOutputBuffers> buffers =
+ std::make_shared<LinearOutputBuffers>("test");
+ sp<AMessage> format{new AMessage};
+ format->setInt32(KEY_CHANNEL_COUNT, 1);
+ format->setInt32(KEY_SAMPLE_RATE, 8000);
+ format->setInt32(KEY_PCM_ENCODING, kAudioEncodingPcmFloat);
+ format->setInt32("android._config-pcm-encoding", kAudioEncodingPcm16bit);
+ format->setInt32("android._codec-pcm-encoding", kAudioEncodingPcmFloat);
+ buffers->setFormat(format);
+
+ // Prepare a linear C2Buffer
+ std::shared_ptr<C2BlockPool> pool;
+ ASSERT_EQ(OK, GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, nullptr, &pool));
+
+ std::shared_ptr<C2LinearBlock> block;
+ ASSERT_EQ(OK, pool->fetchLinearBlock(
+ 1024, C2MemoryUsage{C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block));
+ std::shared_ptr<C2Buffer> c2Buffer =
+ C2Buffer::CreateLinearBuffer(block->share(0, 1024, C2Fence()));
+
+ // Test regular buffer convert
+ size_t index;
+ sp<MediaCodecBuffer> clientBuffer;
+ ASSERT_EQ(OK, buffers->registerBuffer(c2Buffer, &index, &clientBuffer));
+ int32_t pcmEncoding = 0;
+ ASSERT_TRUE(clientBuffer->format()->findInt32(KEY_PCM_ENCODING, &pcmEncoding));
+ EXPECT_EQ(kAudioEncodingPcm16bit, pcmEncoding);
+ ASSERT_TRUE(buffers->releaseBuffer(clientBuffer, &c2Buffer));
+
+ // Test null buffer convert
+ ASSERT_EQ(OK, buffers->registerBuffer(nullptr, &index, &clientBuffer));
+ ASSERT_TRUE(clientBuffer->format()->findInt32(KEY_PCM_ENCODING, &pcmEncoding));
+ EXPECT_EQ(kAudioEncodingPcm16bit, pcmEncoding);
+ ASSERT_TRUE(buffers->releaseBuffer(clientBuffer, &c2Buffer));
+
+ // Do the same test in the array mode
+ std::shared_ptr<OutputBuffersArray> array = buffers->toArrayMode(8);
+
+ // Test regular buffer convert
+ ASSERT_EQ(OK, buffers->registerBuffer(c2Buffer, &index, &clientBuffer));
+ ASSERT_TRUE(clientBuffer->format()->findInt32(KEY_PCM_ENCODING, &pcmEncoding));
+ EXPECT_EQ(kAudioEncodingPcm16bit, pcmEncoding);
+ ASSERT_TRUE(buffers->releaseBuffer(clientBuffer, &c2Buffer));
+
+ // Test null buffer convert
+ ASSERT_EQ(OK, buffers->registerBuffer(nullptr, &index, &clientBuffer));
+ ASSERT_TRUE(clientBuffer->format()->findInt32(KEY_PCM_ENCODING, &pcmEncoding));
+ EXPECT_EQ(kAudioEncodingPcm16bit, pcmEncoding);
+ ASSERT_TRUE(buffers->releaseBuffer(clientBuffer, &c2Buffer));
+}
+
} // namespace android