Merge "Camera: HeicCompositeStream: Fix IDLE state transition" into main
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
index ee7ace6..6388518 100644
--- a/cmds/screenrecord/FrameOutput.cpp
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -15,11 +15,14 @@
*/
#define LOG_TAG "ScreenRecord"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
+//#define LOG_NDEBUG 0
+
+#include <com_android_graphics_libgui_flags.h>
+#include <gui/Surface.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
+#include <utils/Log.h>
#include "FrameOutput.h"
@@ -67,11 +70,17 @@
return UNKNOWN_ERROR;
}
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mGlConsumer = new GLConsumer(mExtTextureName, GL_TEXTURE_EXTERNAL_OES, /*useFenceSync=*/true,
+ /*isControlledByApp=*/false);
+ auto producer = mGlConsumer->getSurface()->getIGraphicBufferProducer();
+#else
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
mGlConsumer = new GLConsumer(consumer, mExtTextureName,
GL_TEXTURE_EXTERNAL_OES, true, false);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
mGlConsumer->setName(String8("virtual display"));
mGlConsumer->setDefaultBufferSize(width, height);
producer->setMaxDequeuedBufferCount(4);
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index a19ef8e..727f16a 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -172,10 +172,16 @@
return UNKNOWN_ERROR;
}
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mGlConsumer = new GLConsumer(mExtTextureName, GL_TEXTURE_EXTERNAL_OES, /*useFenceSync=*/true,
+ /*isControlledByApp=*/false);
+ mProducer = mGlConsumer->getSurface()->getIGraphicBufferProducer();
+#else
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&mProducer, &consumer);
mGlConsumer = new GLConsumer(consumer, mExtTextureName,
GL_TEXTURE_EXTERNAL_OES, true, false);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
mGlConsumer->setName(String8("virtual display"));
mGlConsumer->setDefaultBufferSize(width, height);
mProducer->setMaxDequeuedBufferCount(4);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index f26e3a8..1a6e5e8 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -60,6 +60,7 @@
#include <private/media/VideoFrame.h>
+#include <com_android_graphics_libgui_flags.h>
#include <gui/GLConsumer.h>
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
@@ -1133,7 +1134,12 @@
CHECK(gSurface != NULL);
} else {
CHECK(useSurfaceTexAlloc);
-
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ sp<GLConsumer> texture =
+ new GLConsumer(0 /* tex */, GLConsumer::TEXTURE_EXTERNAL,
+ true /* useFenceSync */, false /* isControlledByApp */);
+ gSurface = texture->getSurface();
+#else
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
@@ -1141,6 +1147,7 @@
GLConsumer::TEXTURE_EXTERNAL, true /* useFenceSync */,
false /* isControlledByApp */);
gSurface = new Surface(producer);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
}
}
diff --git a/drm/libmediadrmrkp/Android.bp b/drm/libmediadrmrkp/Android.bp
index b1a01e4..88f0571 100644
--- a/drm/libmediadrmrkp/Android.bp
+++ b/drm/libmediadrmrkp/Android.bp
@@ -14,6 +14,7 @@
],
static_libs: [
"android.hardware.common-V2-ndk",
+ "android.hardware.drm.common-V1-ndk",
"android.hardware.drm-V1-ndk",
"android.hardware.security.rkp-V3-ndk",
"libbase",
@@ -39,6 +40,7 @@
],
static_libs: [
"android.hardware.common-V2-ndk",
+ "android.hardware.drm.common-V1-ndk",
"android.hardware.drm-V1-ndk",
"android.hardware.security.rkp-V3-ndk",
"libbase",
diff --git a/drm/mediadrm/plugins/clearkey/aidl/Android.bp b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
index 8e8e57d..1bb3da6 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
@@ -40,6 +40,7 @@
static_libs: [
"android.hardware.common-V2-ndk",
+ "android.hardware.drm.common-V1-ndk",
"android.hardware.drm-V1-ndk",
"libbase",
"libclearkeybase",
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index 8d9e76e..efac892 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -824,6 +824,10 @@
std::shared_ptr<BufferCache> cache;
int slotId;
sp<Fence> rFence;
+ if (mStopped.load() == true) {
+ ALOGE("cannot deallocate due to being stopped");
+ return C2_BAD_STATE;
+ }
c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
&cache, &slotId, &rFence);
if (res != C2_OK) {
@@ -900,7 +904,10 @@
cache->unblockSlot(buffer->mSlot);
if (oldBuffer) {
// migrated, register the new buffer to the cache.
- cache->mBuffers.emplace(buffer->mSlot, buffer);
+ auto ret = cache->mBuffers.emplace(buffer->mSlot, buffer);
+ if (!ret.second) {
+ ret.first->second = buffer;
+ }
}
}
mDeallocating.erase(origBid);
diff --git a/media/codec2/hal/client/client.cpp b/media/codec2/hal/client/client.cpp
index a137dbb..80735cb 100644
--- a/media/codec2/hal/client/client.cpp
+++ b/media/codec2/hal/client/client.cpp
@@ -2391,7 +2391,12 @@
"GraphicBufferAllocator was not created.";
return C2_CORRUPTED;
}
+ // Note: Consumer usage is set ahead of the HAL allocator(gba) being set.
+ // This is same as HIDL.
+ uint64_t consumerUsage = configConsumerUsage(surface);
bool ret = gba->configure(surface, generation, maxDequeueCount);
+ ALOGD("setOutputSurface -- generation=%u consumer usage=%#llx",
+ generation, (long long)consumerUsage);
return ret ? C2_OK : C2_CORRUPTED;
}
uint64_t bqId = 0;
@@ -2419,41 +2424,9 @@
mHidlBase1_2 ? &syncObj : nullptr);
}
- // set consumer bits
- // TODO: should this get incorporated into setOutputSurface method so that consumer bits
- // can be set atomically?
- uint64_t consumerUsage = kDefaultConsumerUsage;
- {
- if (surface) {
- uint64_t usage = 0;
- status_t err = surface->getConsumerUsage(&usage);
- if (err != NO_ERROR) {
- ALOGD("setOutputSurface -- failed to get consumer usage bits (%d/%s). ignoring",
- err, asString(err));
- } else {
- // Note: we are adding the default usage because components must support
- // producing output frames that can be displayed an all output surfaces.
-
- // TODO: do not set usage for tunneled scenario. It is unclear if consumer usage
- // is meaningful in a tunneled scenario; on one hand output buffers exist, but
- // they do not exist inside of C2 scope. Any buffer usage shall be communicated
- // through the sideband channel.
-
- consumerUsage = usage | kDefaultConsumerUsage;
- }
- }
-
- C2StreamUsageTuning::output outputUsage{
- 0u, C2AndroidMemoryUsage::FromGrallocUsage(consumerUsage).expected};
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- c2_status_t err = config({&outputUsage}, C2_MAY_BLOCK, &failures);
- if (err != C2_OK) {
- ALOGD("setOutputSurface -- failed to set consumer usage (%d/%s)",
- err, asString(err));
- }
- }
+ uint64_t consumerUsage = configConsumerUsage(surface);
ALOGD("setOutputSurface -- generation=%u consumer usage=%#llx%s",
- generation, (long long)consumerUsage, syncObj ? " sync" : "");
+ generation, (long long)consumerUsage, syncObj ? " sync" : "");
Return<c2_hidl::Status> transStatus = syncObj ?
mHidlBase1_2->setOutputSurfaceWithSyncObj(
@@ -2495,6 +2468,44 @@
return mOutputBufferQueue->outputBuffer(block, input, output);
}
+uint64_t Codec2Client::Component::configConsumerUsage(
+ const sp<IGraphicBufferProducer>& surface) {
+ // set consumer bits
+ // TODO: should this get incorporated into setOutputSurface method so that consumer bits
+ // can be set atomically?
+ uint64_t consumerUsage = kDefaultConsumerUsage;
+ {
+ if (surface) {
+ uint64_t usage = 0;
+ status_t err = surface->getConsumerUsage(&usage);
+ if (err != NO_ERROR) {
+ ALOGD("setOutputSurface -- failed to get consumer usage bits (%d/%s). ignoring",
+ err, asString(err));
+ } else {
+ // Note: we are adding the default usage because components must support
+ // producing output frames that can be displayed an all output surfaces.
+
+ // TODO: do not set usage for tunneled scenario. It is unclear if consumer usage
+ // is meaningful in a tunneled scenario; on one hand output buffers exist, but
+ // they do not exist inside of C2 scope. Any buffer usage shall be communicated
+ // through the sideband channel.
+
+ consumerUsage = usage | kDefaultConsumerUsage;
+ }
+ }
+
+ C2StreamUsageTuning::output outputUsage{
+ 0u, C2AndroidMemoryUsage::FromGrallocUsage(consumerUsage).expected};
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err = config({&outputUsage}, C2_MAY_BLOCK, &failures);
+ if (err != C2_OK) {
+ ALOGD("setOutputSurface -- failed to set consumer usage (%d/%s)",
+ err, asString(err));
+ }
+ }
+ return consumerUsage;
+}
+
void Codec2Client::Component::pollForRenderedFrames(FrameEventHistoryDelta* delta) {
if (mAidlBase) {
// TODO b/311348680
diff --git a/media/codec2/hal/client/include/codec2/hidl/client.h b/media/codec2/hal/client/include/codec2/hidl/client.h
index 413e92e..7923f04 100644
--- a/media/codec2/hal/client/include/codec2/hidl/client.h
+++ b/media/codec2/hal/client/include/codec2/hidl/client.h
@@ -467,6 +467,9 @@
const QueueBufferInput& input,
QueueBufferOutput* output);
+ // configure consumer usage.
+ uint64_t configConsumerUsage(const sp<IGraphicBufferProducer>& surface);
+
// Retrieve frame event history from the output surface.
void pollForRenderedFrames(FrameEventHistoryDelta* delta);
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 4a956f5..90d1874 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -31,6 +31,7 @@
#include <C2Debug.h>
#include <codec2/common/HalSelection.h>
#include <codec2/hidl/client.h>
+#include <com_android_graphics_libgui_flags.h>
#include <gui/BufferQueue.h>
#include <gui/IConsumerListener.h>
#include <gui/IProducerListener.h>
@@ -423,7 +424,6 @@
surfaceMode_t surfMode) {
using namespace android;
sp<IGraphicBufferProducer> producer = nullptr;
- sp<IGraphicBufferConsumer> consumer = nullptr;
sp<GLConsumer> texture = nullptr;
sp<ANativeWindow> surface = nullptr;
static std::atomic_uint32_t surfaceGeneration{0};
@@ -442,6 +442,16 @@
}
if (surfMode == SURFACE) {
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ texture = new GLConsumer(0 /* tex */, GLConsumer::TEXTURE_EXTERNAL, true /* useFenceSync */,
+ false /* isControlledByApp */);
+ sp<Surface> s = texture->getSurface();
+ surface = s;
+ ASSERT_NE(surface, nullptr) << "failed to create Surface object";
+
+ producer = s->getIGraphicBufferProducer();
+#else
+ sp<IGraphicBufferConsumer> consumer = nullptr;
BufferQueue::createBufferQueue(&producer, &consumer);
ASSERT_NE(producer, nullptr) << "createBufferQueue returned invalid producer";
ASSERT_NE(consumer, nullptr) << "createBufferQueue returned invalid consumer";
@@ -452,6 +462,7 @@
surface = new Surface(producer);
ASSERT_NE(surface, nullptr) << "failed to create Surface object";
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
producer->setGenerationNumber(generation);
}
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 2550dcf..164a1e0 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -43,6 +43,7 @@
#include <C2Debug.h>
#include "Codec2Buffer.h"
+#include "Codec2BufferUtils.h"
namespace android {
@@ -215,482 +216,6 @@
mBufferRef.reset();
}
-// GraphicView2MediaImageConverter
-
-namespace {
-
-class GraphicView2MediaImageConverter {
-public:
- /**
- * Creates a C2GraphicView <=> MediaImage converter
- *
- * \param view C2GraphicView object
- * \param format buffer format
- * \param copy whether the converter is used for copy or not
- */
- GraphicView2MediaImageConverter(
- const C2GraphicView &view, const sp<AMessage> &format, bool copy)
- : mInitCheck(NO_INIT),
- mView(view),
- mWidth(view.width()),
- mHeight(view.height()),
- mAllocatedDepth(0),
- mBackBufferSize(0),
- mMediaImage(new ABuffer(sizeof(MediaImage2))) {
- ATRACE_CALL();
- if (!format->findInt32(KEY_COLOR_FORMAT, &mClientColorFormat)) {
- mClientColorFormat = COLOR_FormatYUV420Flexible;
- }
- if (!format->findInt32("android._color-format", &mComponentColorFormat)) {
- mComponentColorFormat = COLOR_FormatYUV420Flexible;
- }
- if (view.error() != C2_OK) {
- ALOGD("Converter: view.error() = %d", view.error());
- mInitCheck = BAD_VALUE;
- return;
- }
- MediaImage2 *mediaImage = (MediaImage2 *)mMediaImage->base();
- const C2PlanarLayout &layout = view.layout();
- if (layout.numPlanes == 0) {
- ALOGD("Converter: 0 planes");
- mInitCheck = BAD_VALUE;
- return;
- }
- memset(mediaImage, 0, sizeof(*mediaImage));
- mAllocatedDepth = layout.planes[0].allocatedDepth;
- uint32_t bitDepth = layout.planes[0].bitDepth;
-
- // align width and height to support subsampling cleanly
- uint32_t stride = align(view.crop().width, 2) * divUp(layout.planes[0].allocatedDepth, 8u);
- uint32_t vStride = align(view.crop().height, 2);
-
- bool tryWrapping = !copy;
-
- switch (layout.type) {
- case C2PlanarLayout::TYPE_YUV: {
- mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
- if (layout.numPlanes != 3) {
- ALOGD("Converter: %d planes for YUV layout", layout.numPlanes);
- mInitCheck = BAD_VALUE;
- return;
- }
- std::optional<int> clientBitDepth = {};
- switch (mClientColorFormat) {
- case COLOR_FormatYUVP010:
- clientBitDepth = 10;
- break;
- case COLOR_FormatYUV411PackedPlanar:
- case COLOR_FormatYUV411Planar:
- case COLOR_FormatYUV420Flexible:
- case COLOR_FormatYUV420PackedPlanar:
- case COLOR_FormatYUV420PackedSemiPlanar:
- case COLOR_FormatYUV420Planar:
- case COLOR_FormatYUV420SemiPlanar:
- case COLOR_FormatYUV422Flexible:
- case COLOR_FormatYUV422PackedPlanar:
- case COLOR_FormatYUV422PackedSemiPlanar:
- case COLOR_FormatYUV422Planar:
- case COLOR_FormatYUV422SemiPlanar:
- case COLOR_FormatYUV444Flexible:
- case COLOR_FormatYUV444Interleaved:
- clientBitDepth = 8;
- break;
- default:
- // no-op; used with optional
- break;
-
- }
- // conversion fails if client bit-depth and the component bit-depth differs
- if ((clientBitDepth) && (bitDepth != clientBitDepth.value())) {
- ALOGD("Bit depth of client: %d and component: %d differs",
- *clientBitDepth, bitDepth);
- mInitCheck = BAD_VALUE;
- return;
- }
- C2PlaneInfo yPlane = layout.planes[C2PlanarLayout::PLANE_Y];
- C2PlaneInfo uPlane = layout.planes[C2PlanarLayout::PLANE_U];
- C2PlaneInfo vPlane = layout.planes[C2PlanarLayout::PLANE_V];
- if (yPlane.channel != C2PlaneInfo::CHANNEL_Y
- || uPlane.channel != C2PlaneInfo::CHANNEL_CB
- || vPlane.channel != C2PlaneInfo::CHANNEL_CR) {
- ALOGD("Converter: not YUV layout");
- mInitCheck = BAD_VALUE;
- return;
- }
- bool yuv420888 = yPlane.rowSampling == 1 && yPlane.colSampling == 1
- && uPlane.rowSampling == 2 && uPlane.colSampling == 2
- && vPlane.rowSampling == 2 && vPlane.colSampling == 2;
- if (yuv420888) {
- for (uint32_t i = 0; i < 3; ++i) {
- const C2PlaneInfo &plane = layout.planes[i];
- if (plane.allocatedDepth != 8 || plane.bitDepth != 8) {
- yuv420888 = false;
- break;
- }
- }
- yuv420888 = yuv420888 && yPlane.colInc == 1 && uPlane.rowInc == vPlane.rowInc;
- }
- int32_t copyFormat = mClientColorFormat;
- if (yuv420888 && mClientColorFormat == COLOR_FormatYUV420Flexible) {
- if (uPlane.colInc == 2 && vPlane.colInc == 2
- && yPlane.rowInc == uPlane.rowInc) {
- copyFormat = COLOR_FormatYUV420PackedSemiPlanar;
- } else if (uPlane.colInc == 1 && vPlane.colInc == 1
- && yPlane.rowInc == uPlane.rowInc * 2) {
- copyFormat = COLOR_FormatYUV420PackedPlanar;
- }
- }
- ALOGV("client_fmt=0x%x y:{colInc=%d rowInc=%d} u:{colInc=%d rowInc=%d} "
- "v:{colInc=%d rowInc=%d}",
- mClientColorFormat,
- yPlane.colInc, yPlane.rowInc,
- uPlane.colInc, uPlane.rowInc,
- vPlane.colInc, vPlane.rowInc);
- switch (copyFormat) {
- case COLOR_FormatYUV420Flexible:
- case COLOR_FormatYUV420Planar:
- case COLOR_FormatYUV420PackedPlanar:
- mediaImage->mPlane[mediaImage->Y].mOffset = 0;
- mediaImage->mPlane[mediaImage->Y].mColInc = 1;
- mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
- mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
- mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
-
- mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
- mediaImage->mPlane[mediaImage->U].mColInc = 1;
- mediaImage->mPlane[mediaImage->U].mRowInc = stride / 2;
- mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
-
- mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 5 / 4;
- mediaImage->mPlane[mediaImage->V].mColInc = 1;
- mediaImage->mPlane[mediaImage->V].mRowInc = stride / 2;
- mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
-
- if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
- tryWrapping = yuv420888 && uPlane.colInc == 1 && vPlane.colInc == 1
- && yPlane.rowInc == uPlane.rowInc * 2
- && view.data()[0] < view.data()[1]
- && view.data()[1] < view.data()[2];
- }
- break;
-
- case COLOR_FormatYUV420SemiPlanar:
- case COLOR_FormatYUV420PackedSemiPlanar:
- mediaImage->mPlane[mediaImage->Y].mOffset = 0;
- mediaImage->mPlane[mediaImage->Y].mColInc = 1;
- mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
- mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
- mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
-
- mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
- mediaImage->mPlane[mediaImage->U].mColInc = 2;
- mediaImage->mPlane[mediaImage->U].mRowInc = stride;
- mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
-
- mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 1;
- mediaImage->mPlane[mediaImage->V].mColInc = 2;
- mediaImage->mPlane[mediaImage->V].mRowInc = stride;
- mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
-
- if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
- tryWrapping = yuv420888 && uPlane.colInc == 2 && vPlane.colInc == 2
- && yPlane.rowInc == uPlane.rowInc
- && view.data()[0] < view.data()[1]
- && view.data()[1] < view.data()[2];
- }
- break;
-
- case COLOR_FormatYUVP010:
- // stride is in bytes
- mediaImage->mPlane[mediaImage->Y].mOffset = 0;
- mediaImage->mPlane[mediaImage->Y].mColInc = 2;
- mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
- mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
- mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
-
- mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
- mediaImage->mPlane[mediaImage->U].mColInc = 4;
- mediaImage->mPlane[mediaImage->U].mRowInc = stride;
- mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
-
- mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
- mediaImage->mPlane[mediaImage->V].mColInc = 4;
- mediaImage->mPlane[mediaImage->V].mRowInc = stride;
- mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
- mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
- if (tryWrapping) {
- tryWrapping = yPlane.allocatedDepth == 16
- && uPlane.allocatedDepth == 16
- && vPlane.allocatedDepth == 16
- && yPlane.bitDepth == 10
- && uPlane.bitDepth == 10
- && vPlane.bitDepth == 10
- && yPlane.rightShift == 6
- && uPlane.rightShift == 6
- && vPlane.rightShift == 6
- && yPlane.rowSampling == 1 && yPlane.colSampling == 1
- && uPlane.rowSampling == 2 && uPlane.colSampling == 2
- && vPlane.rowSampling == 2 && vPlane.colSampling == 2
- && yPlane.colInc == 2
- && uPlane.colInc == 4
- && vPlane.colInc == 4
- && yPlane.rowInc == uPlane.rowInc
- && yPlane.rowInc == vPlane.rowInc;
- }
- break;
-
- default: {
- // default to fully planar format --- this will be overridden if wrapping
- // TODO: keep interleaved format
- int32_t colInc = divUp(mAllocatedDepth, 8u);
- int32_t rowInc = stride * colInc / yPlane.colSampling;
- mediaImage->mPlane[mediaImage->Y].mOffset = 0;
- mediaImage->mPlane[mediaImage->Y].mColInc = colInc;
- mediaImage->mPlane[mediaImage->Y].mRowInc = rowInc;
- mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = yPlane.colSampling;
- mediaImage->mPlane[mediaImage->Y].mVertSubsampling = yPlane.rowSampling;
- int32_t offset = rowInc * vStride / yPlane.rowSampling;
-
- rowInc = stride * colInc / uPlane.colSampling;
- mediaImage->mPlane[mediaImage->U].mOffset = offset;
- mediaImage->mPlane[mediaImage->U].mColInc = colInc;
- mediaImage->mPlane[mediaImage->U].mRowInc = rowInc;
- mediaImage->mPlane[mediaImage->U].mHorizSubsampling = uPlane.colSampling;
- mediaImage->mPlane[mediaImage->U].mVertSubsampling = uPlane.rowSampling;
- offset += rowInc * vStride / uPlane.rowSampling;
-
- rowInc = stride * colInc / vPlane.colSampling;
- mediaImage->mPlane[mediaImage->V].mOffset = offset;
- mediaImage->mPlane[mediaImage->V].mColInc = colInc;
- mediaImage->mPlane[mediaImage->V].mRowInc = rowInc;
- mediaImage->mPlane[mediaImage->V].mHorizSubsampling = vPlane.colSampling;
- mediaImage->mPlane[mediaImage->V].mVertSubsampling = vPlane.rowSampling;
- break;
- }
- }
- break;
- }
-
- case C2PlanarLayout::TYPE_YUVA:
- ALOGD("Converter: unrecognized color format "
- "(client %d component %d) for YUVA layout",
- mClientColorFormat, mComponentColorFormat);
- mInitCheck = NO_INIT;
- return;
- case C2PlanarLayout::TYPE_RGB:
- mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGB;
- // TODO: support MediaImage layout
- switch (mClientColorFormat) {
- case COLOR_FormatSurface:
- case COLOR_FormatRGBFlexible:
- case COLOR_Format24bitBGR888:
- case COLOR_Format24bitRGB888:
- ALOGD("Converter: accept color format "
- "(client %d component %d) for RGB layout",
- mClientColorFormat, mComponentColorFormat);
- break;
- default:
- ALOGD("Converter: unrecognized color format "
- "(client %d component %d) for RGB layout",
- mClientColorFormat, mComponentColorFormat);
- mInitCheck = BAD_VALUE;
- return;
- }
- if (layout.numPlanes != 3) {
- ALOGD("Converter: %d planes for RGB layout", layout.numPlanes);
- mInitCheck = BAD_VALUE;
- return;
- }
- break;
- case C2PlanarLayout::TYPE_RGBA:
- mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGBA;
- // TODO: support MediaImage layout
- switch (mClientColorFormat) {
- case COLOR_FormatSurface:
- case COLOR_FormatRGBAFlexible:
- case COLOR_Format32bitABGR8888:
- case COLOR_Format32bitARGB8888:
- case COLOR_Format32bitBGRA8888:
- ALOGD("Converter: accept color format "
- "(client %d component %d) for RGBA layout",
- mClientColorFormat, mComponentColorFormat);
- break;
- default:
- ALOGD("Converter: unrecognized color format "
- "(client %d component %d) for RGBA layout",
- mClientColorFormat, mComponentColorFormat);
- mInitCheck = BAD_VALUE;
- return;
- }
- if (layout.numPlanes != 4) {
- ALOGD("Converter: %d planes for RGBA layout", layout.numPlanes);
- mInitCheck = BAD_VALUE;
- return;
- }
- break;
- default:
- mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
- if (layout.numPlanes == 1) {
- const C2PlaneInfo &plane = layout.planes[0];
- if (plane.colInc < 0 || plane.rowInc < 0) {
- // Copy-only if we have negative colInc/rowInc
- tryWrapping = false;
- }
- mediaImage->mPlane[0].mOffset = 0;
- mediaImage->mPlane[0].mColInc = std::abs(plane.colInc);
- mediaImage->mPlane[0].mRowInc = std::abs(plane.rowInc);
- mediaImage->mPlane[0].mHorizSubsampling = plane.colSampling;
- mediaImage->mPlane[0].mVertSubsampling = plane.rowSampling;
- } else {
- ALOGD("Converter: unrecognized layout: color format (client %d component %d)",
- mClientColorFormat, mComponentColorFormat);
- mInitCheck = NO_INIT;
- return;
- }
- break;
- }
- if (tryWrapping) {
- // try to map directly. check if the planes are near one another
- const uint8_t *minPtr = mView.data()[0];
- const uint8_t *maxPtr = mView.data()[0];
- int32_t planeSize = 0;
- for (uint32_t i = 0; i < layout.numPlanes; ++i) {
- const C2PlaneInfo &plane = layout.planes[i];
- int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
- ssize_t minOffset = plane.minOffset(
- mWidth / plane.colSampling, mHeight / plane.rowSampling);
- ssize_t maxOffset = plane.maxOffset(
- mWidth / plane.colSampling, mHeight / plane.rowSampling);
- if (minPtr > mView.data()[i] + minOffset) {
- minPtr = mView.data()[i] + minOffset;
- }
- if (maxPtr < mView.data()[i] + maxOffset) {
- maxPtr = mView.data()[i] + maxOffset;
- }
- planeSize += planeStride * divUp(mAllocatedDepth, 8u)
- * align(mHeight, 64) / plane.rowSampling;
- }
-
- if (minPtr == mView.data()[0] && (maxPtr - minPtr) <= planeSize) {
- // FIXME: this is risky as reading/writing data out of bound results
- // in an undefined behavior, but gralloc does assume a
- // contiguous mapping
- for (uint32_t i = 0; i < layout.numPlanes; ++i) {
- const C2PlaneInfo &plane = layout.planes[i];
- mediaImage->mPlane[i].mOffset = mView.data()[i] - minPtr;
- mediaImage->mPlane[i].mColInc = plane.colInc;
- mediaImage->mPlane[i].mRowInc = plane.rowInc;
- mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
- mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
- }
- mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr), maxPtr - minPtr);
- ALOGV("Converter: wrapped (capacity=%zu)", mWrapped->capacity());
- }
- }
- mediaImage->mNumPlanes = layout.numPlanes;
- mediaImage->mWidth = view.crop().width;
- mediaImage->mHeight = view.crop().height;
- mediaImage->mBitDepth = bitDepth;
- mediaImage->mBitDepthAllocated = mAllocatedDepth;
-
- uint32_t bufferSize = 0;
- for (uint32_t i = 0; i < layout.numPlanes; ++i) {
- const C2PlaneInfo &plane = layout.planes[i];
- if (plane.allocatedDepth < plane.bitDepth
- || plane.rightShift != plane.allocatedDepth - plane.bitDepth) {
- ALOGD("rightShift value of %u unsupported", plane.rightShift);
- mInitCheck = BAD_VALUE;
- return;
- }
- if (plane.allocatedDepth > 8 && plane.endianness != C2PlaneInfo::NATIVE) {
- ALOGD("endianness value of %u unsupported", plane.endianness);
- mInitCheck = BAD_VALUE;
- return;
- }
- if (plane.allocatedDepth != mAllocatedDepth || plane.bitDepth != bitDepth) {
- ALOGD("different allocatedDepth/bitDepth per plane unsupported");
- mInitCheck = BAD_VALUE;
- return;
- }
- // stride is in bytes
- bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
- }
-
- mBackBufferSize = bufferSize;
- mInitCheck = OK;
- }
-
- status_t initCheck() const { return mInitCheck; }
-
- uint32_t backBufferSize() const { return mBackBufferSize; }
-
- /**
- * Wrap C2GraphicView using a MediaImage2. Note that if not wrapped, the content is not mapped
- * in this function --- the caller should use CopyGraphicView2MediaImage() function to copy the
- * data into a backing buffer explicitly.
- *
- * \return media buffer. This is null if wrapping failed.
- */
- sp<ABuffer> wrap() const {
- if (mBackBuffer == nullptr) {
- return mWrapped;
- }
- return nullptr;
- }
-
- bool setBackBuffer(const sp<ABuffer> &backBuffer) {
- if (backBuffer == nullptr) {
- return false;
- }
- if (backBuffer->capacity() < mBackBufferSize) {
- return false;
- }
- backBuffer->setRange(0, mBackBufferSize);
- mBackBuffer = backBuffer;
- return true;
- }
-
- /**
- * Copy C2GraphicView to MediaImage2.
- */
- status_t copyToMediaImage() {
- ATRACE_CALL();
- if (mInitCheck != OK) {
- return mInitCheck;
- }
- return ImageCopy(mBackBuffer->base(), getMediaImage(), mView);
- }
-
- const sp<ABuffer> &imageData() const { return mMediaImage; }
-
-private:
- status_t mInitCheck;
-
- const C2GraphicView mView;
- uint32_t mWidth;
- uint32_t mHeight;
- int32_t mClientColorFormat; ///< SDK color format for MediaImage
- int32_t mComponentColorFormat; ///< SDK color format from component
- sp<ABuffer> mWrapped; ///< wrapped buffer (if we can map C2Buffer to an ABuffer)
- uint32_t mAllocatedDepth;
- uint32_t mBackBufferSize;
- sp<ABuffer> mMediaImage;
- std::function<sp<ABuffer>(size_t)> mAlloc;
-
- sp<ABuffer> mBackBuffer; ///< backing buffer if we have to copy C2Buffer <=> ABuffer
-
- MediaImage2 *getMediaImage() {
- return (MediaImage2 *)mMediaImage->base();
- }
-};
-
-} // namespace
-
// GraphicBlockBuffer
// static
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index 5e96921..8c5e909 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -44,28 +44,6 @@
} // namespace drm
} // namespace hardware
-/**
- * Copies a graphic view into a media image.
- *
- * \param imgBase base of MediaImage
- * \param img MediaImage data
- * \param view graphic view
- *
- * \return OK on success
- */
-status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view);
-
-/**
- * Copies a media image into a graphic view.
- *
- * \param view graphic view
- * \param imgBase base of MediaImage
- * \param img MediaImage data
- *
- * \return OK on success
- */
-status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img);
-
class Codec2Buffer : public MediaCodecBuffer {
public:
using MediaCodecBuffer::MediaCodecBuffer;
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 75e9bbc..574f1b9 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -27,7 +27,10 @@
#include <android/hardware_buffer.h>
#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <C2Debug.h>
@@ -787,4 +790,438 @@
return MemoryBlockPool().fetch(size);
}
+GraphicView2MediaImageConverter::GraphicView2MediaImageConverter(
+ const C2GraphicView &view, const sp<AMessage> &format, bool copy)
+ : mInitCheck(NO_INIT),
+ mView(view),
+ mWidth(view.width()),
+ mHeight(view.height()),
+ mAllocatedDepth(0),
+ mBackBufferSize(0),
+ mMediaImage(new ABuffer(sizeof(MediaImage2))) {
+ ATRACE_CALL();
+ if (!format->findInt32(KEY_COLOR_FORMAT, &mClientColorFormat)) {
+ mClientColorFormat = COLOR_FormatYUV420Flexible;
+ }
+ if (!format->findInt32("android._color-format", &mComponentColorFormat)) {
+ mComponentColorFormat = COLOR_FormatYUV420Flexible;
+ }
+ if (view.error() != C2_OK) {
+ ALOGD("Converter: view.error() = %d", view.error());
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ MediaImage2 *mediaImage = (MediaImage2 *)mMediaImage->base();
+ const C2PlanarLayout &layout = view.layout();
+ if (layout.numPlanes == 0) {
+ ALOGD("Converter: 0 planes");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ memset(mediaImage, 0, sizeof(*mediaImage));
+ mAllocatedDepth = layout.planes[0].allocatedDepth;
+ uint32_t bitDepth = layout.planes[0].bitDepth;
+
+ // align width and height to support subsampling cleanly
+ uint32_t stride = align(view.crop().width, 2) * divUp(layout.planes[0].allocatedDepth, 8u);
+ uint32_t vStride = align(view.crop().height, 2);
+
+ bool tryWrapping = !copy;
+
+ switch (layout.type) {
+ case C2PlanarLayout::TYPE_YUV: {
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
+ if (layout.numPlanes != 3) {
+ ALOGD("Converter: %d planes for YUV layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ std::optional<int> clientBitDepth = {};
+ switch (mClientColorFormat) {
+ case COLOR_FormatYUVP010:
+ clientBitDepth = 10;
+ break;
+ case COLOR_FormatYUV411PackedPlanar:
+ case COLOR_FormatYUV411Planar:
+ case COLOR_FormatYUV420Flexible:
+ case COLOR_FormatYUV420PackedPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV422Flexible:
+ case COLOR_FormatYUV422PackedPlanar:
+ case COLOR_FormatYUV422PackedSemiPlanar:
+ case COLOR_FormatYUV422Planar:
+ case COLOR_FormatYUV422SemiPlanar:
+ case COLOR_FormatYUV444Flexible:
+ case COLOR_FormatYUV444Interleaved:
+ clientBitDepth = 8;
+ break;
+ default:
+ // no-op; used with optional
+ break;
+
+ }
+ // conversion fails if client bit-depth and the component bit-depth differs
+ if ((clientBitDepth) && (bitDepth != clientBitDepth.value())) {
+ ALOGD("Bit depth of client: %d and component: %d differs",
+ *clientBitDepth, bitDepth);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ C2PlaneInfo yPlane = layout.planes[C2PlanarLayout::PLANE_Y];
+ C2PlaneInfo uPlane = layout.planes[C2PlanarLayout::PLANE_U];
+ C2PlaneInfo vPlane = layout.planes[C2PlanarLayout::PLANE_V];
+ if (yPlane.channel != C2PlaneInfo::CHANNEL_Y
+ || uPlane.channel != C2PlaneInfo::CHANNEL_CB
+ || vPlane.channel != C2PlaneInfo::CHANNEL_CR) {
+ ALOGD("Converter: not YUV layout");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ bool yuv420888 = yPlane.rowSampling == 1 && yPlane.colSampling == 1
+ && uPlane.rowSampling == 2 && uPlane.colSampling == 2
+ && vPlane.rowSampling == 2 && vPlane.colSampling == 2;
+ if (yuv420888) {
+ for (uint32_t i = 0; i < 3; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ if (plane.allocatedDepth != 8 || plane.bitDepth != 8) {
+ yuv420888 = false;
+ break;
+ }
+ }
+ yuv420888 = yuv420888 && yPlane.colInc == 1 && uPlane.rowInc == vPlane.rowInc;
+ }
+ int32_t copyFormat = mClientColorFormat;
+ if (yuv420888 && mClientColorFormat == COLOR_FormatYUV420Flexible) {
+ if (uPlane.colInc == 2 && vPlane.colInc == 2
+ && yPlane.rowInc == uPlane.rowInc) {
+ copyFormat = COLOR_FormatYUV420PackedSemiPlanar;
+ } else if (uPlane.colInc == 1 && vPlane.colInc == 1
+ && yPlane.rowInc == uPlane.rowInc * 2) {
+ copyFormat = COLOR_FormatYUV420PackedPlanar;
+ }
+ }
+ ALOGV("client_fmt=0x%x y:{colInc=%d rowInc=%d} u:{colInc=%d rowInc=%d} "
+ "v:{colInc=%d rowInc=%d}",
+ mClientColorFormat,
+ yPlane.colInc, yPlane.rowInc,
+ uPlane.colInc, uPlane.rowInc,
+ vPlane.colInc, vPlane.rowInc);
+ switch (copyFormat) {
+ case COLOR_FormatYUV420Flexible:
+ case COLOR_FormatYUV420Planar:
+ case COLOR_FormatYUV420PackedPlanar:
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = 1;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
+
+ mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
+ mediaImage->mPlane[mediaImage->U].mColInc = 1;
+ mediaImage->mPlane[mediaImage->U].mRowInc = stride / 2;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
+
+ mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 5 / 4;
+ mediaImage->mPlane[mediaImage->V].mColInc = 1;
+ mediaImage->mPlane[mediaImage->V].mRowInc = stride / 2;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
+
+ if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
+ tryWrapping = yuv420888 && uPlane.colInc == 1 && vPlane.colInc == 1
+ && yPlane.rowInc == uPlane.rowInc * 2
+ && view.data()[0] < view.data()[1]
+ && view.data()[1] < view.data()[2];
+ }
+ break;
+
+ case COLOR_FormatYUV420SemiPlanar:
+ case COLOR_FormatYUV420PackedSemiPlanar:
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = 1;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
+
+ mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
+ mediaImage->mPlane[mediaImage->U].mColInc = 2;
+ mediaImage->mPlane[mediaImage->U].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
+
+ mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 1;
+ mediaImage->mPlane[mediaImage->V].mColInc = 2;
+ mediaImage->mPlane[mediaImage->V].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
+
+ if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
+ tryWrapping = yuv420888 && uPlane.colInc == 2 && vPlane.colInc == 2
+ && yPlane.rowInc == uPlane.rowInc
+ && view.data()[0] < view.data()[1]
+ && view.data()[1] < view.data()[2];
+ }
+ break;
+
+ case COLOR_FormatYUVP010:
+ // stride is in bytes
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = 2;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
+
+ mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
+ mediaImage->mPlane[mediaImage->U].mColInc = 4;
+ mediaImage->mPlane[mediaImage->U].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
+
+ mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
+ mediaImage->mPlane[mediaImage->V].mColInc = 4;
+ mediaImage->mPlane[mediaImage->V].mRowInc = stride;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
+ if (tryWrapping) {
+ tryWrapping = yPlane.allocatedDepth == 16
+ && uPlane.allocatedDepth == 16
+ && vPlane.allocatedDepth == 16
+ && yPlane.bitDepth == 10
+ && uPlane.bitDepth == 10
+ && vPlane.bitDepth == 10
+ && yPlane.rightShift == 6
+ && uPlane.rightShift == 6
+ && vPlane.rightShift == 6
+ && yPlane.rowSampling == 1 && yPlane.colSampling == 1
+ && uPlane.rowSampling == 2 && uPlane.colSampling == 2
+ && vPlane.rowSampling == 2 && vPlane.colSampling == 2
+ && yPlane.colInc == 2
+ && uPlane.colInc == 4
+ && vPlane.colInc == 4
+ && yPlane.rowInc == uPlane.rowInc
+ && yPlane.rowInc == vPlane.rowInc;
+ }
+ break;
+
+ default: {
+ // default to fully planar format --- this will be overridden if wrapping
+ // TODO: keep interleaved format
+ int32_t colInc = divUp(mAllocatedDepth, 8u);
+ int32_t rowInc = stride * colInc / yPlane.colSampling;
+ mediaImage->mPlane[mediaImage->Y].mOffset = 0;
+ mediaImage->mPlane[mediaImage->Y].mColInc = colInc;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = rowInc;
+ mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = yPlane.colSampling;
+ mediaImage->mPlane[mediaImage->Y].mVertSubsampling = yPlane.rowSampling;
+ int32_t offset = rowInc * vStride / yPlane.rowSampling;
+
+ rowInc = stride * colInc / uPlane.colSampling;
+ mediaImage->mPlane[mediaImage->U].mOffset = offset;
+ mediaImage->mPlane[mediaImage->U].mColInc = colInc;
+ mediaImage->mPlane[mediaImage->U].mRowInc = rowInc;
+ mediaImage->mPlane[mediaImage->U].mHorizSubsampling = uPlane.colSampling;
+ mediaImage->mPlane[mediaImage->U].mVertSubsampling = uPlane.rowSampling;
+ offset += rowInc * vStride / uPlane.rowSampling;
+
+ rowInc = stride * colInc / vPlane.colSampling;
+ mediaImage->mPlane[mediaImage->V].mOffset = offset;
+ mediaImage->mPlane[mediaImage->V].mColInc = colInc;
+ mediaImage->mPlane[mediaImage->V].mRowInc = rowInc;
+ mediaImage->mPlane[mediaImage->V].mHorizSubsampling = vPlane.colSampling;
+ mediaImage->mPlane[mediaImage->V].mVertSubsampling = vPlane.rowSampling;
+ break;
+ }
+ }
+ break;
+ }
+
+ case C2PlanarLayout::TYPE_YUVA:
+ ALOGD("Converter: unrecognized color format "
+ "(client %d component %d) for YUVA layout",
+ mClientColorFormat, mComponentColorFormat);
+ mInitCheck = NO_INIT;
+ return;
+ case C2PlanarLayout::TYPE_RGB:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGB;
+ // TODO: support MediaImage layout
+ switch (mClientColorFormat) {
+ case COLOR_FormatSurface:
+ case COLOR_FormatRGBFlexible:
+ case COLOR_Format24bitBGR888:
+ case COLOR_Format24bitRGB888:
+ ALOGD("Converter: accept color format "
+ "(client %d component %d) for RGB layout",
+ mClientColorFormat, mComponentColorFormat);
+ break;
+ default:
+ ALOGD("Converter: unrecognized color format "
+ "(client %d component %d) for RGB layout",
+ mClientColorFormat, mComponentColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (layout.numPlanes != 3) {
+ ALOGD("Converter: %d planes for RGB layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ break;
+ case C2PlanarLayout::TYPE_RGBA:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_RGBA;
+ // TODO: support MediaImage layout
+ switch (mClientColorFormat) {
+ case COLOR_FormatSurface:
+ case COLOR_FormatRGBAFlexible:
+ case COLOR_Format32bitABGR8888:
+ case COLOR_Format32bitARGB8888:
+ case COLOR_Format32bitBGRA8888:
+ ALOGD("Converter: accept color format "
+ "(client %d component %d) for RGBA layout",
+ mClientColorFormat, mComponentColorFormat);
+ break;
+ default:
+ ALOGD("Converter: unrecognized color format "
+ "(client %d component %d) for RGBA layout",
+ mClientColorFormat, mComponentColorFormat);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (layout.numPlanes != 4) {
+ ALOGD("Converter: %d planes for RGBA layout", layout.numPlanes);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ break;
+ default:
+ mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ if (layout.numPlanes == 1) {
+ const C2PlaneInfo &plane = layout.planes[0];
+ if (plane.colInc < 0 || plane.rowInc < 0) {
+ // Copy-only if we have negative colInc/rowInc
+ tryWrapping = false;
+ }
+ mediaImage->mPlane[0].mOffset = 0;
+ mediaImage->mPlane[0].mColInc = std::abs(plane.colInc);
+ mediaImage->mPlane[0].mRowInc = std::abs(plane.rowInc);
+ mediaImage->mPlane[0].mHorizSubsampling = plane.colSampling;
+ mediaImage->mPlane[0].mVertSubsampling = plane.rowSampling;
+ } else {
+ ALOGD("Converter: unrecognized layout: color format (client %d component %d)",
+ mClientColorFormat, mComponentColorFormat);
+ mInitCheck = NO_INIT;
+ return;
+ }
+ break;
+ }
+ if (tryWrapping) {
+ // try to map directly. check if the planes are near one another
+ const uint8_t *minPtr = mView.data()[0];
+ const uint8_t *maxPtr = mView.data()[0];
+ int32_t planeSize = 0;
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
+ ssize_t minOffset = plane.minOffset(
+ mWidth / plane.colSampling, mHeight / plane.rowSampling);
+ ssize_t maxOffset = plane.maxOffset(
+ mWidth / plane.colSampling, mHeight / plane.rowSampling);
+ if (minPtr > mView.data()[i] + minOffset) {
+ minPtr = mView.data()[i] + minOffset;
+ }
+ if (maxPtr < mView.data()[i] + maxOffset) {
+ maxPtr = mView.data()[i] + maxOffset;
+ }
+ planeSize += planeStride * divUp(mAllocatedDepth, 8u)
+ * align(mHeight, 64) / plane.rowSampling;
+ }
+
+ if (minPtr == mView.data()[0] && (maxPtr - minPtr) <= planeSize) {
+ // FIXME: this is risky as reading/writing data out of bound results
+ // in an undefined behavior, but gralloc does assume a
+ // contiguous mapping
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ mediaImage->mPlane[i].mOffset = mView.data()[i] - minPtr;
+ mediaImage->mPlane[i].mColInc = plane.colInc;
+ mediaImage->mPlane[i].mRowInc = plane.rowInc;
+ mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
+ mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
+ }
+ mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr), maxPtr - minPtr);
+ ALOGV("Converter: wrapped (capacity=%zu)", mWrapped->capacity());
+ }
+ }
+ mediaImage->mNumPlanes = layout.numPlanes;
+ mediaImage->mWidth = view.crop().width;
+ mediaImage->mHeight = view.crop().height;
+ mediaImage->mBitDepth = bitDepth;
+ mediaImage->mBitDepthAllocated = mAllocatedDepth;
+
+ uint32_t bufferSize = 0;
+ for (uint32_t i = 0; i < layout.numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout.planes[i];
+ if (plane.allocatedDepth < plane.bitDepth
+ || plane.rightShift != plane.allocatedDepth - plane.bitDepth) {
+ ALOGD("rightShift value of %u unsupported", plane.rightShift);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (plane.allocatedDepth > 8 && plane.endianness != C2PlaneInfo::NATIVE) {
+ ALOGD("endianness value of %u unsupported", plane.endianness);
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ if (plane.allocatedDepth != mAllocatedDepth || plane.bitDepth != bitDepth) {
+ ALOGD("different allocatedDepth/bitDepth per plane unsupported");
+ mInitCheck = BAD_VALUE;
+ return;
+ }
+ // stride is in bytes
+ bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
+ }
+
+ mBackBufferSize = bufferSize;
+ mInitCheck = OK;
+}
+
+status_t GraphicView2MediaImageConverter::initCheck() const { return mInitCheck; }
+
+uint32_t GraphicView2MediaImageConverter::backBufferSize() const { return mBackBufferSize; }
+
+sp<ABuffer> GraphicView2MediaImageConverter::wrap() const {
+ if (mBackBuffer == nullptr) {
+ return mWrapped;
+ }
+ return nullptr;
+}
+
+bool GraphicView2MediaImageConverter::setBackBuffer(const sp<ABuffer> &backBuffer) {
+ if (backBuffer == nullptr) {
+ return false;
+ }
+ if (backBuffer->capacity() < mBackBufferSize) {
+ return false;
+ }
+ backBuffer->setRange(0, mBackBufferSize);
+ mBackBuffer = backBuffer;
+ return true;
+}
+
+status_t GraphicView2MediaImageConverter::copyToMediaImage() {
+ ATRACE_CALL();
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+ return ImageCopy(mBackBuffer->base(), getMediaImage(), mView);
+}
+
+const sp<ABuffer> &GraphicView2MediaImageConverter::imageData() const { return mMediaImage; }
+
+MediaImage2 *GraphicView2MediaImageConverter::getMediaImage() {
+ return (MediaImage2 *)mMediaImage->base();
+}
+
} // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index 6b0ba7f..8daf3d8 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -22,6 +22,7 @@
#include <C2ParamDef.h>
#include <media/hardware/VideoAPI.h>
+#include <utils/StrongPointer.h>
#include <utils/Errors.h>
namespace android {
@@ -194,6 +195,61 @@
std::shared_ptr<Impl> mImpl;
};
+struct ABuffer;
+struct AMessage;
+
+class GraphicView2MediaImageConverter {
+public:
+ /**
+ * Creates a C2GraphicView <=> MediaImage converter
+ *
+ * \param view C2GraphicView object
+ * \param format buffer format
+ * \param copy whether the converter is used for copy or not
+ */
+ GraphicView2MediaImageConverter(
+ const C2GraphicView &view, const sp<AMessage> &format, bool copy);
+
+ status_t initCheck() const;
+
+ uint32_t backBufferSize() const;
+
+ /**
+ * Wrap C2GraphicView using a MediaImage2. Note that if not wrapped, the content is not mapped
+ * in this function --- the caller should use CopyGraphicView2MediaImage() function to copy the
+ * data into a backing buffer explicitly.
+ *
+ * \return media buffer. This is null if wrapping failed.
+ */
+ sp<ABuffer> wrap() const;
+
+ bool setBackBuffer(const sp<ABuffer> &backBuffer);
+
+ /**
+ * Copy C2GraphicView to MediaImage2.
+ */
+ status_t copyToMediaImage();
+
+ const sp<ABuffer> &imageData() const;
+
+private:
+ status_t mInitCheck;
+
+ const C2GraphicView mView;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ int32_t mClientColorFormat; ///< SDK color format for MediaImage
+ int32_t mComponentColorFormat; ///< SDK color format from component
+ sp<ABuffer> mWrapped; ///< wrapped buffer (if we can map C2Buffer to an ABuffer)
+ uint32_t mAllocatedDepth;
+ uint32_t mBackBufferSize;
+ sp<ABuffer> mMediaImage;
+
+ sp<ABuffer> mBackBuffer; ///< backing buffer if we have to copy C2Buffer <=> ABuffer
+
+ MediaImage2 *getMediaImage();
+};
+
} // namespace android
#endif // CODEC2_BUFFER_UTILS_H_
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index f729e1b..0f43de5 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -680,6 +680,17 @@
AutoMutex lock(mLock);
ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
__func__, mPortId, deviceId, mSelectedDeviceId);
+ const int64_t beginNs = systemTime();
+ mediametrics::Defer defer([&] {
+ mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_CALLERNAME,
+ mCallerName.empty()
+ ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
+ : mCallerName.c_str())
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPREFERREDDEVICE)
+ .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
+ .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)deviceId)
+ .record(); });
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 161c4d5..336af36 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1676,6 +1676,18 @@
AutoMutex lock(mLock);
ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
__func__, mPortId, deviceId, mSelectedDeviceId);
+ const int64_t beginNs = systemTime();
+ mediametrics::Defer defer([&] {
+ mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_CALLERNAME,
+ mCallerName.empty()
+ ? AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN
+ : mCallerName.c_str())
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETPREFERREDDEVICE)
+ .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
+ .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)deviceId)
+ .record(); });
+
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
diff --git a/media/libaudiohal/impl/AidlUtils.h b/media/libaudiohal/impl/AidlUtils.h
index 97a5bba..fcd7a01 100644
--- a/media/libaudiohal/impl/AidlUtils.h
+++ b/media/libaudiohal/impl/AidlUtils.h
@@ -25,6 +25,32 @@
namespace android {
+/*
+ * Helper macro to add instance name, function name in logs
+ * classes should provide getInstanceName and getClassName API to use these macros.
+ * print function names along with instance name.
+ *
+ * Usage:
+ * AUGMENT_LOG(I, "hello!");
+ * AUGMENT_LOG(W, "value: %d", value);
+ *
+ * AUGMENT_LOG_IF(D, value < 0, "negative");
+ * AUGMENT_LOG_IF(E, value < 0, "bad value: %d", value);
+ */
+
+#define AUGMENT_LOG(level, ...) \
+ ALOG##level("[%s] %s: " __android_second(0, __VA_ARGS__, ""), getInstanceName().c_str(), \
+ __func__ __android_rest(__VA_ARGS__))
+
+#define AUGMENT_LOG_IF(level, cond, ...) \
+ ALOG##level##_IF(cond, "[%s] %s: " __android_second(0, __VA_ARGS__, ""), \
+ getInstanceName().c_str(), __func__ __android_rest(__VA_ARGS__))
+// Used to register an entry into the function
+#define LOG_ENTRY() ALOGD("[%s] %s", getInstanceName().c_str(), __func__)
+
+// entry logging as verbose level
+#define LOG_ENTRY_V() ALOGV("[%s] %s", getInstanceName().c_str(), __func__)
+
class HalDeathHandler {
public:
static HalDeathHandler& getInstance();
diff --git a/media/libaudiohal/impl/ConversionHelperAidl.h b/media/libaudiohal/impl/ConversionHelperAidl.h
index 0fadd9c..2d19bee 100644
--- a/media/libaudiohal/impl/ConversionHelperAidl.h
+++ b/media/libaudiohal/impl/ConversionHelperAidl.h
@@ -49,13 +49,15 @@
class ConversionHelperAidl {
protected:
- ConversionHelperAidl(std::string_view className) : mClassName(className) {}
+ ConversionHelperAidl(std::string_view className, std::string_view instanceName)
+ : mClassName(className), mInstanceName(instanceName) {}
- const std::string& getClassName() const {
- return mClassName;
- }
+ const std::string& getClassName() const { return mClassName; }
+
+ const std::string& getInstanceName() const { return mInstanceName; }
const std::string mClassName;
+ const std::string mInstanceName;
};
// 'action' must accept a value of type 'T' and return 'status_t'.
diff --git a/media/libaudiohal/impl/DeviceHalAidl.cpp b/media/libaudiohal/impl/DeviceHalAidl.cpp
index 4d780f5..86e2ca5 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalAidl.cpp
@@ -32,6 +32,7 @@
#include <Utils.h>
#include <utils/Log.h>
+#include "AidlUtils.h"
#include "DeviceHalAidl.h"
#include "EffectHalAidl.h"
#include "StreamHalAidl.h"
@@ -74,6 +75,18 @@
using aidl::android::hardware::audio::core::ModuleDebug;
using aidl::android::hardware::audio::core::VendorParameter;
+#define RETURN_IF_MODULE_NOT_INIT(retVal) \
+ if (mModule == nullptr) { \
+ AUGMENT_LOG(E, "module not initialized"); \
+ return retVal; \
+ }
+
+#define RETURN_IF_TELEPHONY_NOT_INIT(retVal) \
+ if (mTelephony == nullptr) { \
+ AUGMENT_LOG(E, "telephony not initialized"); \
+ return retVal; \
+ }
+
namespace android {
namespace {
@@ -106,15 +119,16 @@
DeviceHalAidl::DeviceHalAidl(const std::string& instance, const std::shared_ptr<IModule>& module,
const std::shared_ptr<IHalAdapterVendorExtension>& vext)
- : ConversionHelperAidl("DeviceHalAidl"),
- mInstance(instance), mModule(module), mVendorExt(vext),
- mTelephony(retrieveSubInterface<ITelephony>(module, &IModule::getTelephony)),
- mBluetooth(retrieveSubInterface<IBluetooth>(module, &IModule::getBluetooth)),
- mBluetoothA2dp(retrieveSubInterface<IBluetoothA2dp>(module, &IModule::getBluetoothA2dp)),
- mBluetoothLe(retrieveSubInterface<IBluetoothLe>(module, &IModule::getBluetoothLe)),
- mSoundDose(retrieveSubInterface<ISoundDose>(module, &IModule::getSoundDose)),
- mMapper(instance, module), mMapperAccessor(mMapper, mLock) {
-}
+ : ConversionHelperAidl("DeviceHalAidl", instance),
+ mModule(module),
+ mVendorExt(vext),
+ mTelephony(retrieveSubInterface<ITelephony>(module, &IModule::getTelephony)),
+ mBluetooth(retrieveSubInterface<IBluetooth>(module, &IModule::getBluetooth)),
+ mBluetoothA2dp(retrieveSubInterface<IBluetoothA2dp>(module, &IModule::getBluetoothA2dp)),
+ mBluetoothLe(retrieveSubInterface<IBluetoothLe>(module, &IModule::getBluetoothLe)),
+ mSoundDose(retrieveSubInterface<ISoundDose>(module, &IModule::getSoundDose)),
+ mMapper(instance, module),
+ mMapperAccessor(mMapper, mLock) {}
status_t DeviceHalAidl::getAudioPorts(std::vector<media::audio::common::AudioPort> *ports) {
std::lock_guard l(mLock);
@@ -127,11 +141,13 @@
}
status_t DeviceHalAidl::getSupportedModes(std::vector<media::audio::common::AudioMode> *modes) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
- if (mTelephony == nullptr) return INVALID_OPERATION;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+ RETURN_IF_TELEPHONY_NOT_INIT(INVALID_OPERATION);
+
if (modes == nullptr) {
+ AUGMENT_LOG(E, "uninitialized modes");
return BAD_VALUE;
}
std::vector<AudioMode> aidlModes;
@@ -149,48 +165,53 @@
}
status_t DeviceHalAidl::initCheck() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
std::lock_guard l(mLock);
return mMapper.initialize();
}
status_t DeviceHalAidl::setVoiceVolume(float volume) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "volume %f", volume);
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
- if (mTelephony == nullptr) return INVALID_OPERATION;
- ITelephony::TelecomConfig inConfig{ .voiceVolume = Float{volume} }, outConfig;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+ RETURN_IF_TELEPHONY_NOT_INIT(INVALID_OPERATION);
+
+ ITelephony::TelecomConfig inConfig{.voiceVolume = Float{volume}}, outConfig;
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(mTelephony->setTelecomConfig(inConfig, &outConfig)));
- ALOGW_IF(outConfig.voiceVolume.has_value() && volume != outConfig.voiceVolume.value().value,
- "%s: the resulting voice volume %f is not the same as requested %f",
- __func__, outConfig.voiceVolume.value().value, volume);
+ AUGMENT_LOG_IF(
+ W, outConfig.voiceVolume.has_value() && volume != outConfig.voiceVolume.value().value,
+ "the resulting voice volume %f is not the same as requested %f",
+ outConfig.voiceVolume.value().value, volume);
return OK;
}
status_t DeviceHalAidl::setMasterVolume(float volume) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "volume %f", volume);
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
return statusTFromBinderStatus(mModule->setMasterVolume(volume));
}
status_t DeviceHalAidl::getMasterVolume(float *volume) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (volume == nullptr) {
+ AUGMENT_LOG(E, "uninitialized volumes");
return BAD_VALUE;
}
return statusTFromBinderStatus(mModule->getMasterVolume(volume));
}
status_t DeviceHalAidl::setMode(audio_mode_t mode) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "mode %d", mode);
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
AudioMode audioMode = VALUE_OR_FATAL(::aidl::android::legacy2aidl_audio_mode_t_AudioMode(mode));
if (mTelephony != nullptr) {
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mTelephony->switchAudioMode(audioMode)));
@@ -199,90 +220,99 @@
}
status_t DeviceHalAidl::setMicMute(bool state) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "mute %d", state);
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
return statusTFromBinderStatus(mModule->setMicMute(state));
}
status_t DeviceHalAidl::getMicMute(bool *state) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (state == nullptr) {
+ AUGMENT_LOG(E, "uninitialized mute state");
return BAD_VALUE;
}
return statusTFromBinderStatus(mModule->getMicMute(state));
}
status_t DeviceHalAidl::setMasterMute(bool state) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "mute %d", state);
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
return statusTFromBinderStatus(mModule->setMasterMute(state));
}
status_t DeviceHalAidl::getMasterMute(bool *state) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (state == nullptr) {
+ AUGMENT_LOG(E, "uninitialized mute state");
return BAD_VALUE;
}
return statusTFromBinderStatus(mModule->getMasterMute(state));
}
status_t DeviceHalAidl::setParameters(const String8& kvPairs) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
AudioParameter parameters(kvPairs);
- ALOGD("%s: parameters: \"%s\"", __func__, parameters.toString().c_str());
+ AUGMENT_LOG(D, "parameters: \"%s\"", parameters.toString().c_str());
if (status_t status = filterAndUpdateBtA2dpParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating BT A2DP parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateBtA2dpParameters failed: %d", status);
}
if (status_t status = filterAndUpdateBtHfpParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating BT HFP parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateBtHfpParameters failed: %d", status);
}
if (status_t status = filterAndUpdateBtLeParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating BT LE parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateBtLeParameters failed: %d", status);
}
if (status_t status = filterAndUpdateBtScoParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating BT SCO parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateBtScoParameters failed: %d", status);
}
if (status_t status = filterAndUpdateScreenParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating screen parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateScreenParameters failed: %d", status);
}
if (status_t status = filterAndUpdateTelephonyParameters(parameters); status != OK) {
- ALOGW("%s: filtering or updating telephony parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndUpdateTelephonyParameters failed: %d", status);
}
return parseAndSetVendorParameters(mVendorExt, mModule, parameters);
}
status_t DeviceHalAidl::getParameters(const String8& keys, String8 *values) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "keys: \"%s\"", keys.c_str());
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (values == nullptr) {
+ AUGMENT_LOG(E, "invalid values");
return BAD_VALUE;
}
AudioParameter parameterKeys(keys), result;
if (status_t status = filterAndRetrieveBtA2dpParameters(parameterKeys, &result); status != OK) {
- ALOGW("%s: filtering or retrieving BT A2DP parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndRetrieveBtA2dpParameters failed: %d", status);
}
if (status_t status = filterAndRetrieveBtLeParameters(parameterKeys, &result); status != OK) {
- ALOGW("%s: filtering or retrieving BT LE parameters failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filterAndRetrieveBtLeParameters failed: %d", status);
}
*values = result.toString();
return parseAndGetVendorParameters(mVendorExt, mModule, parameterKeys, values);
}
status_t DeviceHalAidl::getInputBufferSize(struct audio_config* config, size_t* size) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (config == nullptr || size == nullptr) {
+ AUGMENT_LOG(E, "invalid config or size");
return BAD_VALUE;
}
constexpr bool isInput = true;
@@ -432,10 +462,11 @@
const char* address,
sp<StreamOutHalInterface>* outStream,
const std::vector<playback_track_metadata_v7_t>& sourceMetadata) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "handle: %d devices %0x flags %0x", handle, devices, flags);
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (outStream == nullptr || config == nullptr) {
+ AUGMENT_LOG(E, "invalid outStream or config");
return BAD_VALUE;
}
constexpr bool isInput = false;
@@ -485,10 +516,10 @@
args.sourceMetadata = aidlMetadata;
::aidl::android::hardware::audio::core::IModule::OpenOutputStreamReturn ret;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openOutputStream(args, &ret)));
- StreamContextAidl context(ret.desc, isOffload);
+ StreamContextAidl context(ret.desc, isOffload, aidlHandle);
if (!context.isValid()) {
- ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
- __func__, ret.desc.toString().c_str());
+ AUGMENT_LOG(E, "Failed to created a valid stream context from the descriptor: %s",
+ ret.desc.toString().c_str());
return NO_INIT;
}
auto stream = sp<StreamOutHalAidl>::make(*config, std::move(context), aidlPatch.latenciesMs[0],
@@ -520,10 +551,11 @@
const char* address, audio_source_t source,
audio_devices_t outputDevice, const char* outputDeviceAddress,
sp<StreamInHalInterface>* inStream) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "handle: %d devices %0x flags %0x", handle, devices, flags);
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (inStream == nullptr || config == nullptr) {
+ AUGMENT_LOG(E, "invalid inStream or config");
return BAD_VALUE;
}
constexpr bool isInput = true;
@@ -563,10 +595,10 @@
args.bufferSizeFrames = aidlConfig.frameCount;
::aidl::android::hardware::audio::core::IModule::OpenInputStreamReturn ret;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openInputStream(args, &ret)));
- StreamContextAidl context(ret.desc, false /*isAsynchronous*/);
+ StreamContextAidl context(ret.desc, false /*isAsynchronous*/, aidlHandle);
if (!context.isValid()) {
- ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
- __func__, ret.desc.toString().c_str());
+ AUGMENT_LOG(E, "Failed to created a valid stream context from the descriptor: %s",
+ ret.desc.toString().c_str());
return NO_INIT;
}
*inStream = sp<StreamInHalAidl>::make(*config, std::move(context), aidlPatch.latenciesMs[0],
@@ -580,7 +612,10 @@
}
status_t DeviceHalAidl::supportsAudioPatches(bool* supportsPatches) {
+ LOG_ENTRY_V();
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (supportsPatches == nullptr) {
+ AUGMENT_LOG(E, "uninitialized supportsPatches");
return BAD_VALUE;
}
*supportsPatches = true;
@@ -592,13 +627,20 @@
unsigned int num_sinks,
const struct audio_port_config* sinks,
audio_patch_handle_t* patch) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "sources: %d sinks %d", num_sources, num_sinks);
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
- if (num_sinks > AUDIO_PATCH_PORTS_MAX || num_sources > AUDIO_PATCH_PORTS_MAX ||
- sources == nullptr || sinks == nullptr || patch == nullptr) {
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+ if (num_sinks > AUDIO_PATCH_PORTS_MAX || num_sources > AUDIO_PATCH_PORTS_MAX) {
+ AUGMENT_LOG(E, "invalid sources %d or sinks %d ", num_sources, num_sinks);
return BAD_VALUE;
}
+
+ if (sources == nullptr || sinks == nullptr || patch == nullptr) {
+ AUGMENT_LOG(E, "uninitialized sources %d or sinks %d or patches %d", (sources == nullptr),
+ (sinks == nullptr), (patch == nullptr));
+ return BAD_VALUE;
+ }
+
// When the patch handle (*patch) is AUDIO_PATCH_HANDLE_NONE, it means
// the framework wants to create a new patch. The handle has to be generated
// by the HAL. Since handles generated this way can only be unique within
@@ -660,9 +702,10 @@
}
status_t DeviceHalAidl::releaseAudioPatch(audio_patch_handle_t patch) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ AUGMENT_LOG(D, "patch: %d", patch);
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
static_assert(AUDIO_PATCH_HANDLE_NONE == 0);
if (patch == AUDIO_PATCH_HANDLE_NONE) {
return BAD_VALUE;
@@ -685,7 +728,10 @@
}
status_t DeviceHalAidl::getAudioPort(struct audio_port* port) {
+ LOG_ENTRY_V();
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (port == nullptr) {
+ AUGMENT_LOG(E, "port not initialized");
return BAD_VALUE;
}
audio_port_v7 portV7;
@@ -695,10 +741,12 @@
}
status_t DeviceHalAidl::getAudioPort(struct audio_port_v7 *port) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (port == nullptr) {
+ AUGMENT_LOG(E, "port not initialized");
return BAD_VALUE;
}
bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(port->role, port->type)) ==
@@ -706,8 +754,7 @@
auto aidlPort = VALUE_OR_RETURN_STATUS(
::aidl::android::legacy2aidl_audio_port_v7_AudioPort(*port, isInput));
if (aidlPort.ext.getTag() != AudioPortExt::device) {
- ALOGE("%s: provided port is not a device port (module %s): %s",
- __func__, mInstance.c_str(), aidlPort.toString().c_str());
+ AUGMENT_LOG(E, "provided port is not a device port %s", aidlPort.toString().c_str());
return BAD_VALUE;
}
const auto& matchDevice = aidlPort.ext.get<AudioPortExt::device>().device;
@@ -726,11 +773,13 @@
status_t DeviceHalAidl::getAudioMixPort(const struct audio_port_v7 *devicePort,
struct audio_port_v7 *mixPort) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
- if (devicePort == nullptr || mixPort == nullptr ||
- devicePort->type != AUDIO_PORT_TYPE_DEVICE || mixPort->type != AUDIO_PORT_TYPE_MIX) {
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+
+ if (devicePort == nullptr || mixPort == nullptr || devicePort->type != AUDIO_PORT_TYPE_DEVICE ||
+ mixPort->type != AUDIO_PORT_TYPE_MIX) {
+ AUGMENT_LOG(E, "invalid device or mix port");
return BAD_VALUE;
}
const int32_t aidlHandle = VALUE_OR_RETURN_STATUS(
@@ -748,10 +797,12 @@
}
status_t DeviceHalAidl::setAudioPortConfig(const struct audio_port_config* config) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (config == nullptr) {
+ AUGMENT_LOG(E, "config not initialized");
return BAD_VALUE;
}
bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(
@@ -765,9 +816,10 @@
}
MicrophoneInfoProvider::Info const* DeviceHalAidl::getMicrophoneInfo() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (!mModule) return {};
+ RETURN_IF_MODULE_NOT_INIT({});
std::lock_guard l(mLock);
if (mMicrophones.status == Microphones::Status::UNKNOWN) {
TIME_CHECK();
@@ -779,7 +831,7 @@
} else if (status == INVALID_OPERATION) {
mMicrophones.status = Microphones::Status::NOT_SUPPORTED;
} else {
- ALOGE("%s: Unexpected status from 'IModule.getMicrophones': %d", __func__, status);
+ AUGMENT_LOG(E, "Unexpected status from HAL: %d", status);
return {};
}
}
@@ -791,10 +843,12 @@
status_t DeviceHalAidl::getMicrophones(
std::vector<audio_microphone_characteristic_t>* microphones) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (microphones == nullptr) {
+ AUGMENT_LOG(E, "microphones not initialized");
return BAD_VALUE;
}
auto staticInfo = getMicrophoneInfo();
@@ -813,10 +867,12 @@
status_t DeviceHalAidl::addDeviceEffect(
const struct audio_port_config *device, sp<EffectHalInterface> effect) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (device == nullptr || effect == nullptr) {
+ AUGMENT_LOG(E, "device or effect not initialized");
return BAD_VALUE;
}
bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(
@@ -825,8 +881,8 @@
::aidl::android::legacy2aidl_audio_port_config_AudioPortConfig(
*device, isInput, 0));
if (requestedPortConfig.ext.getTag() != AudioPortExt::Tag::device) {
- ALOGE("%s: provided port config is not a device port config: %s",
- __func__, requestedPortConfig.toString().c_str());
+ AUGMENT_LOG(E, "provided port config is not a device port config: %s",
+ requestedPortConfig.toString().c_str());
return BAD_VALUE;
}
AudioPortConfig devicePortConfig;
@@ -844,10 +900,11 @@
}
status_t DeviceHalAidl::removeDeviceEffect(
const struct audio_port_config *device, sp<EffectHalInterface> effect) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (device == nullptr || effect == nullptr) {
+ AUGMENT_LOG(E, "device or effect not initialized");
return BAD_VALUE;
}
bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(
@@ -856,8 +913,8 @@
::aidl::android::legacy2aidl_audio_port_config_AudioPortConfig(
*device, isInput, 0));
if (requestedPortConfig.ext.getTag() != AudioPortExt::Tag::device) {
- ALOGE("%s: provided port config is not a device port config: %s",
- __func__, requestedPortConfig.toString().c_str());
+ AUGMENT_LOG(E, "provided port config is not a device port config: %s",
+ requestedPortConfig.toString().c_str());
return BAD_VALUE;
}
AudioPortConfig devicePortConfig;
@@ -875,11 +932,13 @@
status_t DeviceHalAidl::getMmapPolicyInfos(
media::audio::common::AudioMMapPolicyType policyType,
std::vector<media::audio::common::AudioMMapPolicyInfo>* policyInfos) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
- AudioMMapPolicyType mmapPolicyType = VALUE_OR_RETURN_STATUS(
- cpp2ndk_AudioMMapPolicyType(policyType));
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+
+ AudioMMapPolicyType mmapPolicyType =
+ VALUE_OR_RETURN_STATUS(cpp2ndk_AudioMMapPolicyType(policyType));
std::vector<AudioMMapPolicyInfo> mmapPolicyInfos;
@@ -895,9 +954,10 @@
}
int32_t DeviceHalAidl::getAAudioMixerBurstCount() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
int32_t mixerBurstCount = 0;
if (mModule->getAAudioMixerBurstCount(&mixerBurstCount).isOk()) {
return mixerBurstCount;
@@ -906,9 +966,10 @@
}
int32_t DeviceHalAidl::getAAudioHardwareBurstMinUsec() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
int32_t hardwareBurstMinUsec = 0;
if (mModule->getAAudioHardwareBurstMinUsec(&hardwareBurstMinUsec).isOk()) {
return hardwareBurstMinUsec;
@@ -917,9 +978,10 @@
}
error::Result<audio_hw_sync_t> DeviceHalAidl::getHwAvSync() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
int32_t aidlHwAvSync;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->generateHwAvSyncId(&aidlHwAvSync)));
return VALUE_OR_RETURN_STATUS(
@@ -935,55 +997,59 @@
}
status_t DeviceHalAidl::supportsBluetoothVariableLatency(bool* supports) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
+
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (supports == nullptr) {
return BAD_VALUE;
}
return statusTFromBinderStatus(mModule->supportsVariableLatency(supports));
}
-status_t DeviceHalAidl::getSoundDoseInterface(const std::string& module,
- ::ndk::SpAIBinder* soundDoseBinder) {
+status_t DeviceHalAidl::getSoundDoseInterface([[maybe_unused]] const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) {
+ LOG_ENTRY_V();
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
+
if (soundDoseBinder == nullptr) {
return BAD_VALUE;
}
if (mSoundDose == nullptr) {
- ALOGE("%s failed to retrieve the sound dose interface for module %s",
- __func__, module.c_str());
+ AUGMENT_LOG(E, "failed to retrieve the sound dose interface");
return BAD_VALUE;
}
if (mSoundDose == nullptr) {
- ALOGE("%s failed to return the sound dose interface for module %s: not implemented",
- __func__,
- module.c_str());
+ AUGMENT_LOG(E, "failed to return the sound dose interface not implemented");
return NO_INIT;
}
*soundDoseBinder = mSoundDose->asBinder();
- ALOGI("%s using audio AIDL HAL sound dose interface", __func__);
+ AUGMENT_LOG(I, "using audio AIDL HAL sound dose interface");
return OK;
}
status_t DeviceHalAidl::prepareToDisconnectExternalDevice(const struct audio_port_v7* port) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (port == nullptr) {
+ AUGMENT_LOG(E, "port not initialized");
return BAD_VALUE;
}
- const bool isInput = VALUE_OR_RETURN_STATUS(
- ::aidl::android::portDirection(port->role, port->type)) ==
- ::aidl::android::AudioPortDirection::INPUT;
+ const bool isInput =
+ VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(port->role, port->type)) ==
+ ::aidl::android::AudioPortDirection::INPUT;
AudioPort aidlPort = VALUE_OR_RETURN_STATUS(
::aidl::android::legacy2aidl_audio_port_v7_AudioPort(*port, isInput));
if (aidlPort.ext.getTag() != AudioPortExt::device) {
- ALOGE("%s: provided port is not a device port (module %s): %s",
- __func__, mInstance.c_str(), aidlPort.toString().c_str());
+ AUGMENT_LOG(E, "provided port is not a device port: %s", aidlPort.toString().c_str());
return BAD_VALUE;
}
+
+ AUGMENT_LOG(D, "device %s", aidlPort.toString().c_str());
+
status_t status = NO_ERROR;
{
std::lock_guard l(mLock);
@@ -1004,10 +1070,11 @@
}
status_t DeviceHalAidl::setConnectedState(const struct audio_port_v7 *port, bool connected) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
if (port == nullptr) {
+ AUGMENT_LOG(E, "port not initialized");
return BAD_VALUE;
}
if (!connected) {
@@ -1026,17 +1093,18 @@
AudioPort aidlPort = VALUE_OR_RETURN_STATUS(
::aidl::android::legacy2aidl_audio_port_v7_AudioPort(*port, isInput));
if (aidlPort.ext.getTag() != AudioPortExt::device) {
- ALOGE("%s: provided port is not a device port (module %s): %s",
- __func__, mInstance.c_str(), aidlPort.toString().c_str());
+ AUGMENT_LOG(E, "provided port is not a device port: %s", aidlPort.toString().c_str());
return BAD_VALUE;
}
+ AUGMENT_LOG(D, "connected %d port: %s", connected, aidlPort.toString().c_str());
std::lock_guard l(mLock);
return mMapper.setDevicePortConnectedState(aidlPort, connected);
}
status_t DeviceHalAidl::setSimulateDeviceConnections(bool enabled) {
+ LOG_ENTRY_V();
TIME_CHECK();
- if (mModule == nullptr) return NO_INIT;
+ RETURN_IF_MODULE_NOT_INIT(NO_INIT);
{
std::lock_guard l(mLock);
mMapper.resetUnusedPatchesAndPortConfigs();
@@ -1045,9 +1113,9 @@
status_t status = statusTFromBinderStatus(mModule->setModuleDebug(debug));
// This is important to log as it affects HAL behavior.
if (status == OK) {
- ALOGI("%s: set enabled: %d", __func__, enabled);
+ AUGMENT_LOG(I, "set enabled: %d", enabled);
} else {
- ALOGW("%s: set enabled to %d failed: %d", __func__, enabled, status);
+ AUGMENT_LOG(W, "set enabled to %d failed: %d", enabled, status);
}
return status;
}
@@ -1062,7 +1130,7 @@
mBluetoothA2dp->supportsOffloadReconfiguration(&supports)));
result->addInt(key, supports ? 1 : 0);
} else {
- ALOGI("%s: no IBluetoothA2dp on %s", __func__, mInstance.c_str());
+ AUGMENT_LOG(I, "no IBluetoothA2dp");
result->addInt(key, 0);
}
}
@@ -1079,7 +1147,7 @@
mBluetoothLe->supportsOffloadReconfiguration(&supports)));
result->addInt(key, supports ? 1 : 0);
} else {
- ALOGI("%s: no mBluetoothLe on %s", __func__, mInstance.c_str());
+ AUGMENT_LOG(I, "no mBluetoothLe");
result->addInt(key, 0);
}
}
@@ -1090,29 +1158,29 @@
std::optional<bool> a2dpEnabled;
std::optional<std::vector<VendorParameter>> reconfigureOffload;
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtA2dpSuspended),
- [&a2dpEnabled](const String8& trueOrFalse) {
- if (trueOrFalse == AudioParameter::valueTrue) {
- a2dpEnabled = false; // 'suspended' == true
- return OK;
- } else if (trueOrFalse == AudioParameter::valueFalse) {
- a2dpEnabled = true; // 'suspended' == false
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtA2dpSuspended, trueOrFalse.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtA2dpSuspended),
+ [&a2dpEnabled, this](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ a2dpEnabled = false; // 'suspended' == true
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ a2dpEnabled = true; // 'suspended' == false
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtA2dpSuspended, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyReconfigA2dp),
- [&](const String8& value) -> status_t {
- std::vector<VendorParameter> result;
- RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
- mVendorExt->parseBluetoothA2dpReconfigureOffload(
- std::string(value.c_str()), &result)));
- reconfigureOffload = std::move(result);
- return OK;
- }));
+ parameters, String8(AudioParameter::keyReconfigA2dp),
+ [&](const String8& value) -> status_t {
+ std::vector<VendorParameter> result;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(mVendorExt->parseBluetoothA2dpReconfigureOffload(
+ std::string(value.c_str()), &result)));
+ reconfigureOffload = std::move(result);
+ return OK;
+ }));
if (mBluetoothA2dp != nullptr && a2dpEnabled.has_value()) {
return statusTFromBinderStatus(mBluetoothA2dp->setEnabled(a2dpEnabled.value()));
}
@@ -1126,34 +1194,33 @@
status_t DeviceHalAidl::filterAndUpdateBtHfpParameters(AudioParameter ¶meters) {
IBluetooth::HfpConfig hfpConfig;
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtHfpEnable),
- [&hfpConfig](const String8& trueOrFalse) {
- if (trueOrFalse == AudioParameter::valueTrue) {
- hfpConfig.isEnabled = Boolean{ .value = true };
- return OK;
- } else if (trueOrFalse == AudioParameter::valueFalse) {
- hfpConfig.isEnabled = Boolean{ .value = false };
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtHfpEnable, trueOrFalse.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtHfpEnable),
+ [&hfpConfig, this](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ hfpConfig.isEnabled = Boolean{.value = true};
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ hfpConfig.isEnabled = Boolean{.value = false};
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtHfpEnable, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
- parameters, String8(AudioParameter::keyBtHfpSamplingRate),
- [&hfpConfig](int sampleRate) {
- return sampleRate > 0 ?
- hfpConfig.sampleRate = Int{ .value = sampleRate }, OK : BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtHfpSamplingRate),
+ [&hfpConfig](int sampleRate) {
+ return sampleRate > 0 ? hfpConfig.sampleRate = Int{.value = sampleRate},
+ OK : BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
- parameters, String8(AudioParameter::keyBtHfpVolume),
- [&hfpConfig](int volume0to15) {
- if (volume0to15 >= 0 && volume0to15 <= 15) {
- hfpConfig.volume = Float{ .value = volume0to15 / 15.0f };
- return OK;
- }
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtHfpVolume), [&hfpConfig](int volume0to15) {
+ if (volume0to15 >= 0 && volume0to15 <= 15) {
+ hfpConfig.volume = Float{.value = volume0to15 / 15.0f};
+ return OK;
+ }
+ return BAD_VALUE;
+ }));
if (mBluetooth != nullptr && hfpConfig != IBluetooth::HfpConfig{}) {
IBluetooth::HfpConfig newHfpConfig;
return statusTFromBinderStatus(mBluetooth->setHfpConfig(hfpConfig, &newHfpConfig));
@@ -1165,39 +1232,39 @@
std::optional<bool> leEnabled;
std::optional<std::vector<VendorParameter>> reconfigureOffload;
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtLeSuspended),
- [&leEnabled](const String8& trueOrFalse) {
- if (trueOrFalse == AudioParameter::valueTrue) {
- leEnabled = false; // 'suspended' == true
- return OK;
- } else if (trueOrFalse == AudioParameter::valueFalse) {
- leEnabled = true; // 'suspended' == false
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtLeSuspended, trueOrFalse.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtLeSuspended),
+ [&leEnabled, this](const String8& trueOrFalse) {
+ if (trueOrFalse == AudioParameter::valueTrue) {
+ leEnabled = false; // 'suspended' == true
+ return OK;
+ } else if (trueOrFalse == AudioParameter::valueFalse) {
+ leEnabled = true; // 'suspended' == false
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtLeSuspended, trueOrFalse.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyReconfigLe),
- [&](const String8& value) -> status_t {
- if (mVendorExt != nullptr) {
- std::vector<VendorParameter> result;
- RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
- mVendorExt->parseBluetoothLeReconfigureOffload(
- std::string(value.c_str()), &result)));
- reconfigureOffload = std::move(result);
- } else {
- reconfigureOffload = std::vector<VendorParameter>();
- }
- return OK;
- }));
+ parameters, String8(AudioParameter::keyReconfigLe),
+ [&](const String8& value) -> status_t {
+ if (mVendorExt != nullptr) {
+ std::vector<VendorParameter> result;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(mVendorExt->parseBluetoothLeReconfigureOffload(
+ std::string(value.c_str()), &result)));
+ reconfigureOffload = std::move(result);
+ } else {
+ reconfigureOffload = std::vector<VendorParameter>();
+ }
+ return OK;
+ }));
if (mBluetoothLe != nullptr && leEnabled.has_value()) {
return statusTFromBinderStatus(mBluetoothLe->setEnabled(leEnabled.value()));
}
if (mBluetoothLe != nullptr && reconfigureOffload.has_value()) {
- return statusTFromBinderStatus(mBluetoothLe->reconfigureOffload(
- reconfigureOffload.value()));
+ return statusTFromBinderStatus(
+ mBluetoothLe->reconfigureOffload(reconfigureOffload.value()));
}
return OK;
}
@@ -1205,53 +1272,53 @@
status_t DeviceHalAidl::filterAndUpdateBtScoParameters(AudioParameter ¶meters) {
IBluetooth::ScoConfig scoConfig;
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtSco),
- [&scoConfig](const String8& onOrOff) {
- if (onOrOff == AudioParameter::valueOn) {
- scoConfig.isEnabled = Boolean{ .value = true };
- return OK;
- } else if (onOrOff == AudioParameter::valueOff) {
- scoConfig.isEnabled = Boolean{ .value = false };
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtSco, onOrOff.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtSco),
+ [&scoConfig, this](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.isEnabled = Boolean{.value = true};
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.isEnabled = Boolean{.value = false};
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtSco, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtScoHeadsetName),
- [&scoConfig](const String8& name) {
- scoConfig.debugName = name;
- return OK;
- }));
+ parameters, String8(AudioParameter::keyBtScoHeadsetName),
+ [&scoConfig](const String8& name) {
+ scoConfig.debugName = name;
+ return OK;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtNrec),
- [&scoConfig](const String8& onOrOff) {
- if (onOrOff == AudioParameter::valueOn) {
- scoConfig.isNrecEnabled = Boolean{ .value = true };
- return OK;
- } else if (onOrOff == AudioParameter::valueOff) {
- scoConfig.isNrecEnabled = Boolean{ .value = false };
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtNrec, onOrOff.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtNrec),
+ [&scoConfig, this](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.isNrecEnabled = Boolean{.value = true};
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.isNrecEnabled = Boolean{.value = false};
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtNrec, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyBtScoWb),
- [&scoConfig](const String8& onOrOff) {
- if (onOrOff == AudioParameter::valueOn) {
- scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO_WB;
- return OK;
- } else if (onOrOff == AudioParameter::valueOff) {
- scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO;
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyBtScoWb, onOrOff.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyBtScoWb),
+ [&scoConfig, this](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueOn) {
+ scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO_WB;
+ return OK;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ scoConfig.mode = IBluetooth::ScoConfig::Mode::SCO;
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyBtScoWb, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
if (mBluetooth != nullptr && scoConfig != IBluetooth::ScoConfig{}) {
IBluetooth::ScoConfig newScoConfig;
return statusTFromBinderStatus(mBluetooth->setScoConfig(scoConfig, &newScoConfig));
@@ -1261,34 +1328,41 @@
status_t DeviceHalAidl::filterAndUpdateScreenParameters(AudioParameter ¶meters) {
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyScreenState),
- [&](const String8& onOrOff) -> status_t {
- std::optional<bool> isTurnedOn;
- if (onOrOff == AudioParameter::valueOn) {
- isTurnedOn = true;
- } else if (onOrOff == AudioParameter::valueOff) {
- isTurnedOn = false;
- }
- if (!isTurnedOn.has_value()) {
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyScreenState, onOrOff.c_str());
- return BAD_VALUE;
- }
- return statusTFromBinderStatus(
- mModule->updateScreenState(isTurnedOn.value()));
- }));
+ parameters, String8(AudioParameter::keyScreenState),
+ [&, this](const String8& onOrOff) -> status_t {
+ std::optional<bool> isTurnedOn;
+ if (onOrOff == AudioParameter::valueOn) {
+ isTurnedOn = true;
+ } else if (onOrOff == AudioParameter::valueOff) {
+ isTurnedOn = false;
+ }
+ if (!isTurnedOn.has_value()) {
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyScreenState, onOrOff.c_str());
+ return BAD_VALUE;
+ }
+ return statusTFromBinderStatus(mModule->updateScreenState(isTurnedOn.value()));
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
- parameters, String8(AudioParameter::keyScreenRotation),
- [&](int rotationDegrees) -> status_t {
+ parameters, String8(AudioParameter::keyScreenRotation),
+ [&, this](int rotationDegrees) -> status_t {
IModule::ScreenRotation rotation;
switch (rotationDegrees) {
- case 0: rotation = IModule::ScreenRotation::DEG_0; break;
- case 90: rotation = IModule::ScreenRotation::DEG_90; break;
- case 180: rotation = IModule::ScreenRotation::DEG_180; break;
- case 270: rotation = IModule::ScreenRotation::DEG_270; break;
+ case 0:
+ rotation = IModule::ScreenRotation::DEG_0;
+ break;
+ case 90:
+ rotation = IModule::ScreenRotation::DEG_90;
+ break;
+ case 180:
+ rotation = IModule::ScreenRotation::DEG_180;
+ break;
+ case 270:
+ rotation = IModule::ScreenRotation::DEG_270;
+ break;
default:
- ALOGE("setParameters: parameter key \"%s\" has invalid value %d",
- AudioParameter::keyScreenRotation, rotationDegrees);
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value %d",
+ AudioParameter::keyScreenRotation, rotationDegrees);
return BAD_VALUE;
}
return statusTFromBinderStatus(mModule->updateScreenRotation(rotation));
@@ -1300,43 +1374,42 @@
using TtyMode = ITelephony::TelecomConfig::TtyMode;
ITelephony::TelecomConfig telConfig;
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyTtyMode),
- [&telConfig](const String8& mode) {
- if (mode == AudioParameter::valueTtyModeOff) {
- telConfig.ttyMode = TtyMode::OFF;
- return OK;
- } else if (mode == AudioParameter::valueTtyModeFull) {
- telConfig.ttyMode = TtyMode::FULL;
- return OK;
- } else if (mode == AudioParameter::valueTtyModeHco) {
- telConfig.ttyMode = TtyMode::HCO;
- return OK;
- } else if (mode == AudioParameter::valueTtyModeVco) {
- telConfig.ttyMode = TtyMode::VCO;
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyTtyMode, mode.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyTtyMode),
+ [&telConfig, this](const String8& mode) {
+ if (mode == AudioParameter::valueTtyModeOff) {
+ telConfig.ttyMode = TtyMode::OFF;
+ return OK;
+ } else if (mode == AudioParameter::valueTtyModeFull) {
+ telConfig.ttyMode = TtyMode::FULL;
+ return OK;
+ } else if (mode == AudioParameter::valueTtyModeHco) {
+ telConfig.ttyMode = TtyMode::HCO;
+ return OK;
+ } else if (mode == AudioParameter::valueTtyModeVco) {
+ telConfig.ttyMode = TtyMode::VCO;
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyTtyMode, mode.c_str());
+ return BAD_VALUE;
+ }));
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<String8>(
- parameters, String8(AudioParameter::keyHacSetting),
- [&telConfig](const String8& onOrOff) {
- if (onOrOff == AudioParameter::valueHacOn) {
- telConfig.isHacEnabled = Boolean{ .value = true };
- return OK;
- } else if (onOrOff == AudioParameter::valueHacOff) {
- telConfig.isHacEnabled = Boolean{ .value = false };
- return OK;
- }
- ALOGE("setParameters: parameter key \"%s\" has invalid value \"%s\"",
- AudioParameter::keyHacSetting, onOrOff.c_str());
- return BAD_VALUE;
- }));
+ parameters, String8(AudioParameter::keyHacSetting),
+ [&telConfig, this](const String8& onOrOff) {
+ if (onOrOff == AudioParameter::valueHacOn) {
+ telConfig.isHacEnabled = Boolean{.value = true};
+ return OK;
+ } else if (onOrOff == AudioParameter::valueHacOff) {
+ telConfig.isHacEnabled = Boolean{.value = false};
+ return OK;
+ }
+ AUGMENT_LOG(E, "setParameters: parameter key \"%s\" has invalid value \"%s\"",
+ AudioParameter::keyHacSetting, onOrOff.c_str());
+ return BAD_VALUE;
+ }));
if (mTelephony != nullptr && telConfig != ITelephony::TelecomConfig{}) {
ITelephony::TelecomConfig newTelConfig;
- return statusTFromBinderStatus(
- mTelephony->setTelecomConfig(telConfig, &newTelConfig));
+ return statusTFromBinderStatus(mTelephony->setTelecomConfig(telConfig, &newTelConfig));
}
return OK;
}
diff --git a/media/libaudiohal/impl/DeviceHalAidl.h b/media/libaudiohal/impl/DeviceHalAidl.h
index 6e823df..6ae6402 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.h
+++ b/media/libaudiohal/impl/DeviceHalAidl.h
@@ -235,7 +235,6 @@
// MicrophoneInfoProvider implementation
MicrophoneInfoProvider::Info const* getMicrophoneInfo() override;
- const std::string mInstance;
const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mModule;
const std::shared_ptr<::aidl::android::media::audio::IHalAdapterVendorExtension> mVendorExt;
const std::shared_ptr<::aidl::android::hardware::audio::core::ITelephony> mTelephony;
diff --git a/media/libaudiohal/impl/Hal2AidlMapper.cpp b/media/libaudiohal/impl/Hal2AidlMapper.cpp
index a01ac4b..f352849 100644
--- a/media/libaudiohal/impl/Hal2AidlMapper.cpp
+++ b/media/libaudiohal/impl/Hal2AidlMapper.cpp
@@ -25,6 +25,7 @@
#include <Utils.h>
#include <utils/Log.h>
+#include "AidlUtils.h"
#include "Hal2AidlMapper.h"
using aidl::android::aidl_utils::statusTFromBinderStatus;
@@ -99,8 +100,7 @@
} // namespace
Hal2AidlMapper::Hal2AidlMapper(const std::string& instance, const std::shared_ptr<IModule>& module)
- : mInstance(instance), mModule(module) {
-}
+ : ConversionHelperAidl("Hal2AidlMapper", instance), mModule(module) {}
void Hal2AidlMapper::addStream(
const sp<StreamHalInterface>& stream, int32_t mixPortConfigId, int32_t patchId) {
@@ -137,9 +137,9 @@
// 'sinks' will not be updated because 'setAudioPatch' only needs IDs. Here we log
// the source arguments, where only the audio configuration and device specifications
// are relevant.
- ALOGD("%s: patch ID: %d, [disregard IDs] sources: %s, sinks: %s",
- __func__, *patchId, ::android::internal::ToString(sources).c_str(),
- ::android::internal::ToString(sinks).c_str());
+ AUGMENT_LOG(D, "patch ID: %d, [disregard IDs] sources: %s, sinks: %s", *patchId,
+ ::android::internal::ToString(sources).c_str(),
+ ::android::internal::ToString(sinks).c_str());
auto fillPortConfigs = [&](
const std::vector<AudioPortConfig>& configs,
const std::set<int32_t>& destinationPortIds,
@@ -152,18 +152,20 @@
// See b/315528763. Despite that the framework knows the actual format of
// the mix port, it still uses the original format. Luckily, there is
// the I/O handle which can be used to find the mix port.
- ALOGI("fillPortConfigs: retrying to find a mix port config with default "
- "configuration");
+ AUGMENT_LOG(I,
+ "fillPortConfigs: retrying to find a mix port config with"
+ " default configuration");
if (auto it = findPortConfig(std::nullopt, s.flags,
s.ext.get<AudioPortExt::mix>().handle);
it != mPortConfigs.end()) {
portConfig = it->second;
} else {
- const std::string flags = s.flags.has_value() ?
- s.flags->toString() : "<unspecified>";
- ALOGE("fillPortConfigs: existing port config for flags %s, handle %d "
- "not found in module %s", flags.c_str(),
- s.ext.get<AudioPortExt::mix>().handle, mInstance.c_str());
+ const std::string flags =
+ s.flags.has_value() ? s.flags->toString() : "<unspecified>";
+ AUGMENT_LOG(E,
+ "fillPortConfigs: existing port config for flags %s, "
+ " handle %d not found",
+ flags.c_str(), s.ext.get<AudioPortExt::mix>().handle);
return BAD_VALUE;
}
} else {
@@ -171,8 +173,8 @@
}
}
LOG_ALWAYS_FATAL_IF(portConfig.id == 0,
- "fillPortConfigs: initial config: %s, port config: %s",
- s.toString().c_str(), portConfig.toString().c_str());
+ "fillPortConfigs: initial config: %s, port config: %s",
+ s.toString().c_str(), portConfig.toString().c_str());
ids->push_back(portConfig.id);
if (portIds != nullptr) {
portIds->insert(portConfig.portId);
@@ -218,8 +220,8 @@
if (!created) {
requestedPatch.id = patch.id;
if (patch != requestedPatch) {
- ALOGI("%s: Updating transient patch. Current: %s, new: %s",
- __func__, patch.toString().c_str(), requestedPatch.toString().c_str());
+ AUGMENT_LOG(I, "Updating transient patch. Current: %s, new: %s",
+ patch.toString().c_str(), requestedPatch.toString().c_str());
// Since matching may be done by mix port only, update the patch if the device port
// config has changed.
patch = requestedPatch;
@@ -252,7 +254,7 @@
int32_t id = result->id;
if (requestedPortConfig.id != 0 && requestedPortConfig.id != id) {
LOG_ALWAYS_FATAL("%s: requested port config id %d changed to %d", __func__,
- requestedPortConfig.id, id);
+ requestedPortConfig.id, id);
}
auto [_, inserted] = mPortConfigs.insert_or_assign(id, *result);
@@ -272,8 +274,8 @@
RETURN_STATUS_IF_ERROR(createOrUpdatePortConfig(suggestedOrAppliedPortConfig,
&appliedPortConfig, created));
if (appliedPortConfig.id == 0) {
- ALOGE("%s: module %s did not apply suggested config %s", __func__,
- mInstance.c_str(), suggestedOrAppliedPortConfig.toString().c_str());
+ AUGMENT_LOG(E, "did not apply suggested config %s",
+ suggestedOrAppliedPortConfig.toString().c_str());
return NO_INIT;
}
*result = appliedPortConfig;
@@ -289,7 +291,7 @@
if (mDisconnectedPortReplacement.first == portId) {
const auto& port = mDisconnectedPortReplacement.second;
mPorts.insert(std::make_pair(port.id, port));
- ALOGD("%s: disconnected port replacement: %s", __func__, port.toString().c_str());
+ AUGMENT_LOG(D, "disconnected port replacement: %s", port.toString().c_str());
mDisconnectedPortReplacement = std::pair<int32_t, AudioPort>();
}
updateDynamicMixPorts();
@@ -331,8 +333,7 @@
if (auto portConfigIt = findPortConfig(device); portConfigIt == mPortConfigs.end()) {
auto portsIt = findPort(device);
if (portsIt == mPorts.end()) {
- ALOGE("%s: device port for device %s is not found in the module %s",
- __func__, device.toString().c_str(), mInstance.c_str());
+ AUGMENT_LOG(E, "device port for device %s is not found", device.toString().c_str());
return BAD_VALUE;
}
AudioPortConfig requestedPortConfig;
@@ -385,15 +386,15 @@
matchFlags.set<AudioIoFlags::Tag::input>(matchFlags.get<AudioIoFlags::Tag::input>() &
~makeBitPositionFlagMask(*optionalInputFlagsIt++));
portsIt = findPort(config, matchFlags, destinationPortIds);
- ALOGI("%s: mix port for config %s, flags %s was not found in the module %s, "
- "retried with flags %s", __func__, config.toString().c_str(),
- flags.value().toString().c_str(), mInstance.c_str(),
- matchFlags.toString().c_str());
+ AUGMENT_LOG(I,
+ "mix port for config %s, flags %s was not found"
+ "retried with flags %s",
+ config.toString().c_str(), flags.value().toString().c_str(),
+ matchFlags.toString().c_str());
}
if (portsIt == mPorts.end()) {
- ALOGE("%s: mix port for config %s, flags %s is not found in the module %s",
- __func__, config.toString().c_str(), matchFlags.toString().c_str(),
- mInstance.c_str());
+ AUGMENT_LOG(E, "mix port for config %s, flags %s is not found",
+ config.toString().c_str(), matchFlags.toString().c_str());
return BAD_VALUE;
}
AudioPortConfig requestedPortConfig;
@@ -408,9 +409,10 @@
}
return createOrUpdatePortConfig(requestedPortConfig, portConfig, created);
} else if (portConfigIt == mPortConfigs.end() && !flags.has_value()) {
- ALOGW("%s: mix port config for %s, handle %d not found in the module %s, "
- "and was not created as flags are not specified",
- __func__, config.toString().c_str(), ioHandle, mInstance.c_str());
+ AUGMENT_LOG(W,
+ "mix port config for %s, handle %d not found "
+ "and was not created as flags are not specified",
+ config.toString().c_str(), ioHandle);
return BAD_VALUE;
} else {
AudioPortConfig requestedPortConfig = portConfigIt->second;
@@ -440,8 +442,8 @@
if (const auto& p = requestedPortConfig;
!p.sampleRate.has_value() || !p.channelMask.has_value() ||
!p.format.has_value()) {
- ALOGW("%s: provided mix port config is not fully specified: %s",
- __func__, p.toString().c_str());
+ AUGMENT_LOG(W, "provided mix port config is not fully specified: %s",
+ p.toString().c_str());
return BAD_VALUE;
}
AudioConfig config;
@@ -470,14 +472,13 @@
requestedPortConfig.ext.get<Tag::device>().device, configPtr, gainConfigPtr,
portConfig, created);
} else {
- ALOGD("%s: device port config does not have audio or gain config specified", __func__);
+ AUGMENT_LOG(D, "device port config does not have audio or gain config specified");
return findOrCreateDevicePortConfig(
requestedPortConfig.ext.get<Tag::device>().device, nullptr /*config*/,
nullptr /*gainConfig*/, portConfig, created);
}
}
- ALOGW("%s: unsupported audio port config: %s",
- __func__, requestedPortConfig.toString().c_str());
+ AUGMENT_LOG(W, "unsupported audio port config: %s", requestedPortConfig.toString().c_str());
return BAD_VALUE;
}
@@ -486,8 +487,7 @@
*portConfig = it->second;
return OK;
}
- ALOGE("%s: could not find a device port config for device %s",
- __func__, device.toString().c_str());
+ AUGMENT_LOG(E, "could not find a device port config for device %s", device.toString().c_str());
return BAD_VALUE;
}
@@ -593,9 +593,10 @@
}
optionalFlags |= makeBitPositionFlagMask(*optionalOutputFlagsIt++);
result = std::find_if(mPorts.begin(), mPorts.end(), matcher);
- ALOGI("%s: port for config %s, flags %s was not found in the module %s, "
- "retried with excluding optional flags %#x", __func__, config.toString().c_str(),
- flags.toString().c_str(), mInstance.c_str(), optionalFlags);
+ AUGMENT_LOG(I,
+ "port for config %s, flags %s was not found "
+ "retried with excluding optional flags %#x",
+ config.toString().c_str(), flags.toString().c_str(), optionalFlags);
}
}
return result;
@@ -629,7 +630,7 @@
status_t Hal2AidlMapper::getAudioMixPort(int32_t ioHandle, AudioPort* port) {
auto it = findPortConfig(std::nullopt /*config*/, std::nullopt /*flags*/, ioHandle);
if (it == mPortConfigs.end()) {
- ALOGE("%s, cannot find mix port config for handle %u", __func__, ioHandle);
+ AUGMENT_LOG(E, "cannot find mix port config for handle %u", ioHandle);
return BAD_VALUE;
}
return updateAudioPort(it->second.portId, port);
@@ -638,21 +639,18 @@
status_t Hal2AidlMapper::getAudioPortCached(
const ::aidl::android::media::audio::common::AudioDevice& device,
::aidl::android::media::audio::common::AudioPort* port) {
-
if (auto portsIt = findPort(device); portsIt != mPorts.end()) {
*port = portsIt->second;
return OK;
}
- ALOGE("%s: device port for device %s is not found in the module %s",
- __func__, device.toString().c_str(), mInstance.c_str());
+ AUGMENT_LOG(E, "device port for device %s is not found", device.toString().c_str());
return BAD_VALUE;
}
status_t Hal2AidlMapper::initialize() {
std::vector<AudioPort> ports;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->getAudioPorts(&ports)));
- ALOGW_IF(ports.empty(), "%s: module %s returned an empty list of audio ports",
- __func__, mInstance.c_str());
+ AUGMENT_LOG_IF(W, ports.empty(), "returned an empty list of audio ports");
mDefaultInputPortId = mDefaultOutputPortId = -1;
const int defaultDeviceFlag = 1 << AudioPortDeviceExt::FLAG_INDEX_DEFAULT_DEVICE;
for (auto it = ports.begin(); it != ports.end(); ) {
@@ -685,8 +683,9 @@
}
}
if (mRemoteSubmixIn.has_value() != mRemoteSubmixOut.has_value()) {
- ALOGE("%s: The configuration only has input or output remote submix device, must have both",
- __func__);
+ AUGMENT_LOG(E,
+ "The configuration only has input or output remote submix device, "
+ "must have both");
mRemoteSubmixIn.reset();
mRemoteSubmixOut.reset();
}
@@ -694,7 +693,7 @@
AudioPort connectedRSubmixIn = *mRemoteSubmixIn;
connectedRSubmixIn.ext.get<AudioPortExt::Tag::device>().device.address =
AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS;
- ALOGD("%s: connecting remote submix input", __func__);
+ AUGMENT_LOG(D, "connecting remote submix input");
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->connectExternalDevice(
connectedRSubmixIn, &connectedRSubmixIn)));
// The template port for the remote submix input couldn't be "default" because it is not
@@ -711,7 +710,7 @@
AudioPort tempConnectedRSubmixOut = *mRemoteSubmixOut;
tempConnectedRSubmixOut.ext.get<AudioPortExt::Tag::device>().device.address =
AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS;
- ALOGD("%s: temporarily connecting and disconnecting remote submix output", __func__);
+ AUGMENT_LOG(D, "temporarily connecting and disconnecting remote submix output");
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->connectExternalDevice(
tempConnectedRSubmixOut, &tempConnectedRSubmixOut)));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->disconnectExternalDevice(
@@ -720,8 +719,8 @@
ports.push_back(std::move(tempConnectedRSubmixOut));
}
- ALOGI("%s: module %s default port ids: input %d, output %d",
- __func__, mInstance.c_str(), mDefaultInputPortId, mDefaultOutputPortId);
+ AUGMENT_LOG(I, "default port ids: input %d, output %d", mDefaultInputPortId,
+ mDefaultOutputPortId);
std::transform(ports.begin(), ports.end(), std::inserter(mPorts, mPorts.end()),
[](const auto& p) { return std::make_pair(p.id, p); });
RETURN_STATUS_IF_ERROR(updateRoutes());
@@ -774,10 +773,10 @@
int32_t ioHandle, const AudioDevice& device, const AudioIoFlags& flags,
AudioSource source, Cleanups* cleanups, AudioConfig* config,
AudioPortConfig* mixPortConfig, AudioPatch* patch) {
- ALOGD("%p %s: handle %d, device %s, flags %s, source %s, config %s, mix port config %s",
- this, __func__, ioHandle, device.toString().c_str(),
- flags.toString().c_str(), toString(source).c_str(),
- config->toString().c_str(), mixPortConfig->toString().c_str());
+ AUGMENT_LOG(D, "handle %d, device %s, flags %s, source %s, config %s, mixport config %s",
+ ioHandle, device.toString().c_str(), flags.toString().c_str(),
+ toString(source).c_str(), config->toString().c_str(),
+ mixPortConfig->toString().c_str());
resetUnusedPatchesAndPortConfigs();
const AudioConfig initialConfig = *config;
// Find / create AudioPortConfigs for the device port and the mix port,
@@ -800,8 +799,8 @@
// module can't perform audio stream conversions.
AudioConfig deviceConfig = initialConfig;
if (setConfigFromPortConfig(&deviceConfig, devicePortConfig)->base != initialConfig.base) {
- ALOGD("%s: retrying with device port config: %s", __func__,
- devicePortConfig.toString().c_str());
+ AUGMENT_LOG(D, "retrying with device port config: %s",
+ devicePortConfig.toString().c_str());
status = prepareToOpenStreamHelper(ioHandle, devicePortConfig.portId,
devicePortConfig.id, flags, source, initialConfig, cleanups,
&deviceConfig, mixPortConfig, patch);
@@ -845,8 +844,8 @@
retryWithSuggestedConfig = true;
}
if (mixPortConfig->id == 0 && retryWithSuggestedConfig) {
- ALOGD("%s: retrying to find/create a mix port config using config %s", __func__,
- config->toString().c_str());
+ AUGMENT_LOG(D, "retrying to find/create a mix port config using config %s",
+ config->toString().c_str());
RETURN_STATUS_IF_ERROR(findOrCreateMixPortConfig(*config, flags, ioHandle, source,
std::set<int32_t>{devicePortId}, mixPortConfig, &created));
if (created) {
@@ -855,8 +854,8 @@
setConfigFromPortConfig(config, *mixPortConfig);
}
if (mixPortConfig->id == 0) {
- ALOGD("%p %s: returning suggested config for the stream: %s", this, __func__,
- config->toString().c_str());
+ AUGMENT_LOG(D, "returning suggested config for the stream: %s",
+ config->toString().c_str());
return OK;
}
if (isInput) {
@@ -894,9 +893,10 @@
// Note: does not reset port configs.
status_t Hal2AidlMapper::releaseAudioPatch(Patches::iterator it) {
const int32_t patchId = it->first;
+ AUGMENT_LOG(D, "patchId %d", patchId);
if (ndk::ScopedAStatus status = mModule->resetAudioPatch(patchId); !status.isOk()) {
- ALOGE("%s: error while resetting patch %d: %s",
- __func__, patchId, status.getDescription().c_str());
+ AUGMENT_LOG(E, "error while resetting patch %d: %s", patchId,
+ status.getDescription().c_str());
return statusTFromBinderStatus(status);
}
mPatches.erase(it);
@@ -915,7 +915,7 @@
if (auto it = mPatches.find(patchId); it != mPatches.end()) {
releaseAudioPatch(it);
} else {
- ALOGE("%s: patch id %d not found", __func__, patchId);
+ AUGMENT_LOG(E, "patch id %d not found", patchId);
result = BAD_VALUE;
}
}
@@ -925,16 +925,17 @@
void Hal2AidlMapper::resetPortConfig(int32_t portConfigId) {
if (auto it = mPortConfigs.find(portConfigId); it != mPortConfigs.end()) {
+ AUGMENT_LOG(D, "%s", it->second.toString().c_str());
if (ndk::ScopedAStatus status = mModule->resetAudioPortConfig(portConfigId);
!status.isOk()) {
- ALOGE("%s: error while resetting port config %d: %s",
- __func__, portConfigId, status.getDescription().c_str());
+ AUGMENT_LOG(E, "error while resetting port config %d: %s", portConfigId,
+ status.getDescription().c_str());
return;
}
mPortConfigs.erase(it);
return;
}
- ALOGE("%s: port config id %d not found", __func__, portConfigId);
+ AUGMENT_LOG(E, "port config id %d not found", portConfigId);
}
void Hal2AidlMapper::resetUnusedPatchesAndPortConfigs() {
@@ -979,6 +980,8 @@
}
status_t Hal2AidlMapper::setDevicePortConnectedState(const AudioPort& devicePort, bool connected) {
+ AUGMENT_LOG(D, "state %s, device %s", (connected ? "connected" : "disconnected"),
+ devicePort.toString().c_str());
resetUnusedPatchesAndPortConfigs();
if (connected) {
AudioDevice matchDevice = devicePort.ext.get<AudioPortExt::device>().device;
@@ -1009,8 +1012,7 @@
// port not found in every one of them.
return BAD_VALUE;
} else {
- ALOGD("%s: device port for device %s found in the module %s",
- __func__, matchDevice.toString().c_str(), mInstance.c_str());
+ AUGMENT_LOG(D, "device port for device %s found", matchDevice.toString().c_str());
}
templatePort = portsIt->second;
}
@@ -1021,10 +1023,9 @@
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->connectExternalDevice(
connectedPort, &connectedPort)));
const auto [it, inserted] = mPorts.insert(std::make_pair(connectedPort.id, connectedPort));
- LOG_ALWAYS_FATAL_IF(!inserted,
- "%s: module %s, duplicate port ID received from HAL: %s, existing port: %s",
- __func__, mInstance.c_str(), connectedPort.toString().c_str(),
- it->second.toString().c_str());
+ LOG_ALWAYS_FATAL_IF(
+ !inserted, "%s duplicate port ID received from HAL: %s, existing port: %s",
+ __func__, connectedPort.toString().c_str(), it->second.toString().c_str());
mConnectedPorts.insert(connectedPort.id);
if (erasePortAfterConnectionIt != mPorts.end()) {
mPorts.erase(erasePortAfterConnectionIt);
@@ -1037,8 +1038,7 @@
// port not found in every one of them.
return BAD_VALUE;
} else {
- ALOGD("%s: device port for device %s found in the module %s",
- __func__, matchDevice.toString().c_str(), mInstance.c_str());
+ AUGMENT_LOG(D, "device port for device %s found", matchDevice.toString().c_str());
}
// Disconnection of remote submix out with address "0" is a special case. We need to replace
@@ -1094,8 +1094,8 @@
}
portIt->second = *port;
} else {
- ALOGW("%s, port(%d) returned successfully from the HAL but not it is not cached",
- __func__, portId);
+ AUGMENT_LOG(W, "port(%d) returned successfully from the HAL but not it is not cached",
+ portId);
}
}
return status;
@@ -1104,8 +1104,7 @@
status_t Hal2AidlMapper::updateRoutes() {
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(mModule->getAudioRoutes(&mRoutes)));
- ALOGW_IF(mRoutes.empty(), "%s: module %s returned an empty list of audio routes",
- __func__, mInstance.c_str());
+ AUGMENT_LOG_IF(W, mRoutes.empty(), "returned an empty list of audio routes");
if (mRemoteSubmixIn.has_value()) {
// Remove mentions of the template remote submix input from routes.
int32_t rSubmixInId = mRemoteSubmixIn->id;
@@ -1146,7 +1145,7 @@
updateAudioPort(portId, &it->second);
} else {
// This must not happen
- ALOGE("%s, cannot find port for id=%d", __func__, portId);
+ AUGMENT_LOG(E, "cannot find port for id=%d", portId);
}
}
}
diff --git a/media/libaudiohal/impl/Hal2AidlMapper.h b/media/libaudiohal/impl/Hal2AidlMapper.h
index 710b43e..2548752 100644
--- a/media/libaudiohal/impl/Hal2AidlMapper.h
+++ b/media/libaudiohal/impl/Hal2AidlMapper.h
@@ -26,6 +26,7 @@
#include <media/AidlConversionUtil.h>
#include "Cleanups.h"
+#include "ConversionHelperAidl.h"
namespace android {
@@ -41,7 +42,7 @@
// but still consider some of the outputs to be valid (for example, in 'open{Input|Output}Stream'),
// 'Hal2AidlMapper' follows the Binder convention. It means that if a method returns an error,
// the outputs may not be initialized at all and should not be considered by the caller.
-class Hal2AidlMapper {
+class Hal2AidlMapper : public ConversionHelperAidl {
public:
using Cleanups = Cleanups<Hal2AidlMapper>;
@@ -135,7 +136,6 @@
enum PatchMatch { MATCH_SOURCES, MATCH_SINKS, MATCH_BOTH };
- const std::string mInstance;
const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mModule;
bool audioDeviceMatches(const ::aidl::android::media::audio::common::AudioDevice& device,
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
index 918f886..99e2c66 100644
--- a/media/libaudiohal/impl/StreamHalAidl.cpp
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -32,6 +32,7 @@
#include <Utils.h>
#include <utils/Log.h>
+#include "AidlUtils.h"
#include "DeviceHalAidl.h"
#include "EffectHalAidl.h"
#include "StreamHalAidl.h"
@@ -74,12 +75,12 @@
return streamCommon;
}
-StreamHalAidl::StreamHalAidl(
- std::string_view className, bool isInput, const audio_config& config,
- int32_t nominalLatency, StreamContextAidl&& context,
- const std::shared_ptr<IStreamCommon>& stream,
- const std::shared_ptr<IHalAdapterVendorExtension>& vext)
- : ConversionHelperAidl(className),
+StreamHalAidl::StreamHalAidl(std::string_view className, bool isInput, const audio_config& config,
+ int32_t nominalLatency, StreamContextAidl&& context,
+ const std::shared_ptr<IStreamCommon>& stream,
+ const std::shared_ptr<IHalAdapterVendorExtension>& vext)
+ : ConversionHelperAidl(className, std::string(isInput ? "in" : "out") + "|ioHandle:" +
+ std::to_string(context.getIoHandle())),
mIsInput(isInput),
mConfig(configToBase(config)),
mContext(std::move(context)),
@@ -90,7 +91,7 @@
mContext.getBufferDurationMs(mConfig.sample_rate))
* NANOS_PER_MILLISECOND)
{
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
{
std::lock_guard l(mLock);
mLastReply.latencyMs = nominalLatency;
@@ -105,15 +106,15 @@
}
StreamHalAidl::~StreamHalAidl() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
if (mStream != nullptr) {
ndk::ScopedAStatus status = mStream->close();
- ALOGE_IF(!status.isOk(), "%s: status %s", __func__, status.getDescription().c_str());
+ AUGMENT_LOG_IF(E, !status.isOk(), "status %s", status.getDescription().c_str());
}
}
status_t StreamHalAidl::getBufferSize(size_t *size) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
if (size == nullptr) {
return BAD_VALUE;
}
@@ -122,11 +123,12 @@
return NO_INIT;
}
*size = mContext.getBufferSizeBytes();
+ AUGMENT_LOG(I, "size: %zu", *size);
return OK;
}
status_t StreamHalAidl::getAudioProperties(audio_config_base_t *configBase) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
if (configBase == nullptr) {
return BAD_VALUE;
}
@@ -136,10 +138,11 @@
}
status_t StreamHalAidl::setParameters(const String8& kvPairs) {
+ LOG_ENTRY_V();
TIME_CHECK();
if (!mStream) return NO_INIT;
AudioParameter parameters(kvPairs);
- ALOGD("%s: parameters: %s", __func__, parameters.toString().c_str());
+ AUGMENT_LOG(D, "parameters: %s", parameters.toString().c_str());
(void)VALUE_OR_RETURN_STATUS(filterOutAndProcessParameter<int>(
parameters, String8(AudioParameter::keyStreamHwAvSync),
@@ -150,6 +153,7 @@
}
status_t StreamHalAidl::getParameters(const String8& keys __unused, String8 *values) {
+ LOG_ENTRY_V();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (values == nullptr) {
@@ -161,7 +165,7 @@
}
status_t StreamHalAidl::getFrameSize(size_t *size) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
if (size == nullptr) {
return BAD_VALUE;
}
@@ -173,7 +177,7 @@
}
status_t StreamHalAidl::addEffect(sp<EffectHalInterface> effect) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (effect == nullptr) {
@@ -184,7 +188,7 @@
}
status_t StreamHalAidl::removeEffect(sp<EffectHalInterface> effect) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (effect == nullptr) {
@@ -195,7 +199,7 @@
}
status_t StreamHalAidl::standby() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
const auto state = getState();
@@ -208,8 +212,8 @@
if (reply.state != StreamDescriptor::State::PAUSED &&
reply.state != StreamDescriptor::State::DRAIN_PAUSED &&
reply.state != StreamDescriptor::State::TRANSFER_PAUSED) {
- ALOGE("%s: unexpected stream state: %s (expected PAUSED)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected PAUSED)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
FALLTHROUGH_INTENDED;
@@ -219,8 +223,8 @@
if (mIsInput) return flush();
RETURN_STATUS_IF_ERROR(flush(&reply));
if (reply.state != StreamDescriptor::State::IDLE) {
- ALOGE("%s: unexpected stream state: %s (expected IDLE)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected IDLE)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
FALLTHROUGH_INTENDED;
@@ -228,22 +232,22 @@
RETURN_STATUS_IF_ERROR(sendCommand(makeHalCommand<HalCommand::Tag::standby>(),
&reply, true /*safeFromNonWorkerThread*/));
if (reply.state != StreamDescriptor::State::STANDBY) {
- ALOGE("%s: unexpected stream state: %s (expected STANDBY)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected STANDBY)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
FALLTHROUGH_INTENDED;
case StreamDescriptor::State::STANDBY:
return OK;
default:
- ALOGE("%s: not supported from %s stream state %s",
- __func__, mIsInput ? "input" : "output", toString(state).c_str());
+ AUGMENT_LOG(E, "not supported from %s stream state %s", mIsInput ? "input" : "output",
+ toString(state).c_str());
return INVALID_OPERATION;
}
}
status_t StreamHalAidl::dump(int fd, const Vector<String16>& args) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
Vector<String16> newArgs = args;
@@ -254,7 +258,7 @@
}
status_t StreamHalAidl::start() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (!mContext.isMmapped()) {
@@ -267,8 +271,8 @@
RETURN_STATUS_IF_ERROR(
sendCommand(makeHalCommand<HalCommand::Tag::start>(), &reply, true));
if (reply.state != StreamDescriptor::State::IDLE) {
- ALOGE("%s: unexpected stream state: %s (expected IDLE)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected IDLE)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
FALLTHROUGH_INTENDED;
@@ -276,8 +280,8 @@
RETURN_STATUS_IF_ERROR(
sendCommand(makeHalCommand<HalCommand::Tag::burst>(0), &reply, true));
if (reply.state != StreamDescriptor::State::ACTIVE) {
- ALOGE("%s: unexpected stream state: %s (expected ACTIVE)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected ACTIVE)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
FALLTHROUGH_INTENDED;
@@ -287,20 +291,20 @@
RETURN_STATUS_IF_ERROR(
sendCommand(makeHalCommand<HalCommand::Tag::start>(), &reply, true));
if (reply.state != StreamDescriptor::State::ACTIVE) {
- ALOGE("%s: unexpected stream state: %s (expected ACTIVE)",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected ACTIVE)",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
return OK;
default:
- ALOGE("%s: not supported from %s stream state %s",
- __func__, mIsInput ? "input" : "output", toString(reply.state).c_str());
+ AUGMENT_LOG(E, "not supported from %s stream state %s", mIsInput ? "input" : "output",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
}
status_t StreamHalAidl::stop() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (!mContext.isMmapped()) {
@@ -317,28 +321,28 @@
return flush();
} else if (state != StreamDescriptor::State::IDLE &&
state != StreamDescriptor::State::STANDBY) {
- ALOGE("%s: not supported from %s stream state %s",
- __func__, mIsInput ? "input" : "output", toString(state).c_str());
+ AUGMENT_LOG(E, "not supported from %s stream state %s", mIsInput ? "input" : "output",
+ toString(state).c_str());
return INVALID_OPERATION;
}
return OK;
}
status_t StreamHalAidl::getLatency(uint32_t *latency) {
- ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
if (!mStream) return NO_INIT;
StreamDescriptor::Reply reply;
RETURN_STATUS_IF_ERROR(updateCountersIfNeeded(&reply));
*latency = std::clamp(std::max<int32_t>(0, reply.latencyMs), 1, 3000);
- ALOGW_IF(reply.latencyMs != static_cast<int32_t>(*latency),
- "Suspicious latency value reported by HAL: %d, clamped to %u", reply.latencyMs,
- *latency);
+ AUGMENT_LOG_IF(W, reply.latencyMs != static_cast<int32_t>(*latency),
+ "Suspicious latency value reported by HAL: %d, clamped to %u", reply.latencyMs,
+ *latency);
return OK;
}
status_t StreamHalAidl::getObservablePosition(int64_t* frames, int64_t* timestamp,
StatePositions* statePositions) {
- ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
if (!mStream) return NO_INIT;
StreamDescriptor::Reply reply;
RETURN_STATUS_IF_ERROR(updateCountersIfNeeded(&reply, statePositions));
@@ -348,7 +352,7 @@
}
status_t StreamHalAidl::getHardwarePosition(int64_t *frames, int64_t *timestamp) {
- ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
if (!mStream) return NO_INIT;
StreamDescriptor::Reply reply;
RETURN_STATUS_IF_ERROR(updateCountersIfNeeded(&reply));
@@ -358,7 +362,7 @@
}
status_t StreamHalAidl::getXruns(int32_t *frames) {
- ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
if (!mStream) return NO_INIT;
StreamDescriptor::Reply reply;
RETURN_STATUS_IF_ERROR(updateCountersIfNeeded(&reply));
@@ -367,7 +371,7 @@
}
status_t StreamHalAidl::transfer(void *buffer, size_t bytes, size_t *transferred) {
- ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY_V();
// TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (!mStream || mContext.getDataMQ() == nullptr) return NO_INIT;
mWorkerTid.store(gettid(), std::memory_order_release);
@@ -379,8 +383,8 @@
StreamDescriptor::Reply reply;
RETURN_STATUS_IF_ERROR(sendCommand(makeHalCommand<HalCommand::Tag::start>(), &reply));
if (reply.state != StreamDescriptor::State::IDLE) {
- ALOGE("%s: failed to get the stream out of standby, actual state: %s",
- __func__, toString(reply.state).c_str());
+ AUGMENT_LOG(E, "failed to get the stream out of standby, actual state: %s",
+ toString(reply.state).c_str());
return INVALID_OPERATION;
}
}
@@ -394,7 +398,7 @@
StreamDescriptor::Command::make<StreamDescriptor::Command::Tag::burst>(bytes);
if (!mIsInput) {
if (!mContext.getDataMQ()->write(static_cast<const int8_t*>(buffer), bytes)) {
- ALOGE("%s: failed to write %zu bytes to data MQ", __func__, bytes);
+ AUGMENT_LOG(E, "failed to write %zu bytes to data MQ", bytes);
return NOT_ENOUGH_DATA;
}
}
@@ -407,7 +411,7 @@
__func__, *transferred, bytes);
if (auto toRead = mContext.getDataMQ()->availableToRead(&fmqError, &fmqErrorMsg);
toRead != 0 && !mContext.getDataMQ()->read(static_cast<int8_t*>(buffer), toRead)) {
- ALOGE("%s: failed to read %zu bytes to data MQ", __func__, toRead);
+ AUGMENT_LOG(E, "failed to read %zu bytes to data MQ", toRead);
return NOT_ENOUGH_DATA;
}
}
@@ -418,7 +422,7 @@
}
status_t StreamHalAidl::pause(StreamDescriptor::Reply* reply) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
@@ -427,14 +431,14 @@
makeHalCommand<HalCommand::Tag::pause>(), reply,
true /*safeFromNonWorkerThread*/); // The workers stops its I/O activity first.
} else {
- ALOGD("%s: already stream in one of the PAUSED kind of states, current state: %s", __func__,
- toString(state).c_str());
+ AUGMENT_LOG(D, "already stream in one of the PAUSED kind of states, current state: %s",
+ toString(state).c_str());
return OK;
}
}
status_t StreamHalAidl::resume(StreamDescriptor::Reply* reply) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (mIsInput) {
@@ -448,8 +452,8 @@
RETURN_STATUS_IF_ERROR(
sendCommand(makeHalCommand<HalCommand::Tag::burst>(0), innerReply));
if (innerReply->state != StreamDescriptor::State::ACTIVE) {
- ALOGE("%s: unexpected stream state: %s (expected ACTIVE)",
- __func__, toString(innerReply->state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected ACTIVE)",
+ toString(innerReply->state).c_str());
return INVALID_OPERATION;
}
return OK;
@@ -460,18 +464,18 @@
} else if (state == StreamDescriptor::State::ACTIVE ||
state == StreamDescriptor::State::TRANSFERRING ||
state == StreamDescriptor::State::DRAINING) {
- ALOGD("%s: already in stream state: %s", __func__, toString(state).c_str());
+ AUGMENT_LOG(D, "already in stream state: %s", toString(state).c_str());
return OK;
} else {
- ALOGE("%s: unexpected stream state: %s (expected IDLE or one of *PAUSED states)",
- __func__, toString(state).c_str());
+ AUGMENT_LOG(E, "unexpected stream state: %s (expected IDLE or one of *PAUSED states)",
+ toString(state).c_str());
return INVALID_OPERATION;
}
}
}
status_t StreamHalAidl::drain(bool earlyNotify, StreamDescriptor::Reply* reply) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
return sendCommand(makeHalCommand<HalCommand::Tag::drain>(
@@ -482,7 +486,7 @@
}
status_t StreamHalAidl::flush(StreamDescriptor::Reply* reply) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
@@ -491,17 +495,17 @@
makeHalCommand<HalCommand::Tag::flush>(), reply,
true /*safeFromNonWorkerThread*/); // The workers stops its I/O activity first.
} else if (isInPlayOrRecordState(state)) {
- ALOGE("%s: found stream in non-flushable state: %s", __func__, toString(state).c_str());
+ AUGMENT_LOG(E, "found stream in non-flushable state: %s", toString(state).c_str());
return INVALID_OPERATION;
} else {
- ALOGD("%s: already stream in one of the flushable state: current state: %s", __func__,
- toString(state).c_str());
+ AUGMENT_LOG(D, "already stream in one of the flushable state: current state: %s",
+ toString(state).c_str());
return OK;
}
}
status_t StreamHalAidl::exit() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
return statusTFromBinderStatus(mStream->prepareToClose());
@@ -514,7 +518,7 @@
sendCommand(makeHalCommand<HalCommand::Tag::getStatus>(),
nullptr, true /*safeFromNonWorkerThread */);
} else {
- ALOGW("%s: unexpected onTransferReady in the state %s", __func__, toString(state).c_str());
+ AUGMENT_LOG(W, "unexpected onTransferReady in the state %s", toString(state).c_str());
}
}
@@ -529,19 +533,19 @@
std::lock_guard l(mLock);
mStatePositions.framesAtFlushOrDrain = mLastReply.observable.frames;
} else {
- ALOGW("%s: unexpected onDrainReady in the state %s", __func__, toString(state).c_str());
+ AUGMENT_LOG(W, "unexpected onDrainReady in the state %s", toString(state).c_str());
}
}
void StreamHalAidl::onAsyncError() {
std::lock_guard l(mLock);
- ALOGW("%s: received in the state %s", __func__, toString(mLastReply.state).c_str());
+ AUGMENT_LOG(W, "received in the state %s", toString(mLastReply.state).c_str());
mLastReply.state = StreamDescriptor::State::ERROR;
}
status_t StreamHalAidl::createMmapBuffer(int32_t minSizeFrames __unused,
struct audio_mmap_buffer_info *info) {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (!mContext.isMmapped()) {
@@ -601,15 +605,14 @@
{
std::lock_guard l(mCommandReplyLock);
if (!mContext.getCommandMQ()->writeBlocking(&command, 1)) {
- ALOGE("%s: failed to write command %s to MQ", __func__, command.toString().c_str());
+ AUGMENT_LOG(E, "failed to write command %s to MQ", command.toString().c_str());
return NOT_ENOUGH_DATA;
}
if (reply == nullptr) {
reply = &localReply;
}
if (!mContext.getReplyMQ()->readBlocking(reply, 1)) {
- ALOGE("%s: failed to read from reply MQ, command %s",
- __func__, command.toString().c_str());
+ AUGMENT_LOG(E, "failed to read from reply MQ, command %s", command.toString().c_str());
return NOT_ENOUGH_DATA;
}
{
@@ -646,8 +649,8 @@
case STATUS_INVALID_OPERATION: return INVALID_OPERATION;
case STATUS_NOT_ENOUGH_DATA: return NOT_ENOUGH_DATA;
default:
- ALOGE("%s: unexpected status %d returned for command %s",
- __func__, reply->status, command.toString().c_str());
+ AUGMENT_LOG(E, "unexpected status %d returned for command %s", reply->status,
+ command.toString().c_str());
return INVALID_OPERATION;
}
}
@@ -712,10 +715,10 @@
if (!mStream) return NO_INIT;
AudioParameter parameters(kvPairs);
- ALOGD("%s: parameters: \"%s\"", __func__, parameters.toString().c_str());
+ AUGMENT_LOG(D, "parameters: \"%s\"", parameters.toString().c_str());
if (status_t status = filterAndUpdateOffloadMetadata(parameters); status != OK) {
- ALOGW("%s: filtering or updating offload metadata failed: %d", __func__, status);
+ AUGMENT_LOG(W, "filtering or updating offload metadata failed: %d", status);
}
return StreamHalAidl::setParameters(parameters.toString());
@@ -726,6 +729,7 @@
}
status_t StreamOutHalAidl::setVolume(float left, float right) {
+ AUGMENT_LOG(V, "left %f right %f", left, right);
TIME_CHECK();
if (!mStream) return NO_INIT;
size_t channelCount = audio_channel_count_from_out_mask(mConfig.channel_mask);
@@ -779,11 +783,11 @@
}
status_t StreamOutHalAidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- ALOGD("%p %s", this, __func__);
+ LOG_ENTRY();
TIME_CHECK();
if (!mStream) return NO_INIT;
if (!mContext.isAsynchronous()) {
- ALOGE("%s: the callback is intended for asynchronous streams only", __func__);
+ AUGMENT_LOG(E, "the callback is intended for asynchronous streams only");
return INVALID_OPERATION;
}
mClientCallback = callback;
@@ -822,7 +826,7 @@
if (!mStream) return NO_INIT;
if (const auto state = getState(); isInDrainedState(state)) {
- ALOGD("%p %s stream already in %s", this, __func__, toString(state).c_str());
+ AUGMENT_LOG(D, "stream already in %s state", toString(state).c_str());
if (mContext.isAsynchronous()) onDrainReady();
return OK;
}
@@ -855,7 +859,7 @@
}
status_t StreamOutHalAidl::presentationComplete() {
- ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ LOG_ENTRY();
return OK;
}
@@ -1046,10 +1050,10 @@
updateMetadata = true;
}
if (updateMetadata) {
- ALOGD("%s set offload metadata %s", __func__, mOffloadMetadata.toString().c_str());
+ AUGMENT_LOG(D, "set offload metadata %s", mOffloadMetadata.toString().c_str());
if (status_t status = statusTFromBinderStatus(
mStream->updateOffloadMetadata(mOffloadMetadata)); status != OK) {
- ALOGE("%s: updateOffloadMetadata failed %d", __func__, status);
+ AUGMENT_LOG(E, "updateOffloadMetadata failed %d", status);
return status;
}
}
@@ -1136,7 +1140,7 @@
// Note: info.portId is not filled because it's a bit of framework info.
result.push_back(std::move(info));
} else {
- ALOGE("%s: no static info for active microphone with id '%s'", __func__, d.id.c_str());
+ AUGMENT_LOG(E, "no static info for active microphone with id '%s'", d.id.c_str());
}
}
*microphones = std::move(result);
diff --git a/media/libaudiohal/impl/StreamHalAidl.h b/media/libaudiohal/impl/StreamHalAidl.h
index baf4ac0..a1cdac4 100644
--- a/media/libaudiohal/impl/StreamHalAidl.h
+++ b/media/libaudiohal/impl/StreamHalAidl.h
@@ -53,7 +53,7 @@
StreamContextAidl(
::aidl::android::hardware::audio::core::StreamDescriptor& descriptor,
- bool isAsynchronous)
+ bool isAsynchronous, int ioHandle)
: mFrameSizeBytes(descriptor.frameSizeBytes),
mCommandMQ(new CommandMQ(descriptor.command)),
mReplyMQ(new ReplyMQ(descriptor.reply)),
@@ -61,7 +61,8 @@
mDataMQ(maybeCreateDataMQ(descriptor)),
mIsAsynchronous(isAsynchronous),
mIsMmapped(isMmapped(descriptor)),
- mMmapBufferDescriptor(maybeGetMmapBuffer(descriptor)) {}
+ mMmapBufferDescriptor(maybeGetMmapBuffer(descriptor)),
+ mIoHandle(ioHandle) {}
StreamContextAidl(StreamContextAidl&& other) :
mFrameSizeBytes(other.mFrameSizeBytes),
mCommandMQ(std::move(other.mCommandMQ)),
@@ -70,7 +71,8 @@
mDataMQ(std::move(other.mDataMQ)),
mIsAsynchronous(other.mIsAsynchronous),
mIsMmapped(other.mIsMmapped),
- mMmapBufferDescriptor(std::move(other.mMmapBufferDescriptor)) {}
+ mMmapBufferDescriptor(std::move(other.mMmapBufferDescriptor)),
+ mIoHandle(other.mIoHandle) {}
StreamContextAidl& operator=(StreamContextAidl&& other) {
mFrameSizeBytes = other.mFrameSizeBytes;
mCommandMQ = std::move(other.mCommandMQ);
@@ -80,6 +82,7 @@
mIsAsynchronous = other.mIsAsynchronous;
mIsMmapped = other.mIsMmapped;
mMmapBufferDescriptor = std::move(other.mMmapBufferDescriptor);
+ mIoHandle = other.mIoHandle;
return *this;
}
bool isValid() const {
@@ -105,7 +108,9 @@
bool isAsynchronous() const { return mIsAsynchronous; }
bool isMmapped() const { return mIsMmapped; }
const MmapBufferDescriptor& getMmapBufferDescriptor() const { return mMmapBufferDescriptor; }
- size_t getMmapBurstSize() const { return mMmapBufferDescriptor.burstSizeFrames;}
+ size_t getMmapBurstSize() const { return mMmapBufferDescriptor.burstSizeFrames; }
+ int getIoHandle() const { return mIoHandle; }
+
private:
static std::unique_ptr<DataMQ> maybeCreateDataMQ(
const ::aidl::android::hardware::audio::core::StreamDescriptor& descriptor) {
@@ -137,6 +142,7 @@
bool mIsAsynchronous;
bool mIsMmapped;
MmapBufferDescriptor mMmapBufferDescriptor;
+ int mIoHandle;
};
class StreamHalAidl : public virtual StreamHalInterface, public ConversionHelperAidl {
diff --git a/media/libaudiohal/tests/CoreAudioHalAidl_test.cpp b/media/libaudiohal/tests/CoreAudioHalAidl_test.cpp
index dbd1f60..0f5334f 100644
--- a/media/libaudiohal/tests/CoreAudioHalAidl_test.cpp
+++ b/media/libaudiohal/tests/CoreAudioHalAidl_test.cpp
@@ -988,9 +988,9 @@
mVendorExt = ndk::SharedRefBase::make<TestHalAdapterVendorExtension>();
struct audio_config config = AUDIO_CONFIG_INITIALIZER;
::aidl::android::hardware::audio::core::StreamDescriptor descriptor;
+ StreamContextAidl context(descriptor, false /*isAsynchronous*/, 0);
mStream = sp<StreamHalAidl>::make("test", false /*isInput*/, config, 0 /*nominalLatency*/,
- StreamContextAidl(descriptor, false /*isAsynchronous*/),
- mStreamCommon, mVendorExt);
+ std::move(context), mStreamCommon, mVendorExt);
}
void TearDown() override {
mStream.clear();
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index f367a3e..98c3382 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -267,6 +267,7 @@
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETLOGSESSIONID "setLogSessionId" // AudioTrack, Record
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYBACKPARAM "setPlaybackParam" // AudioTrack
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYERIID "setPlayerIId" // AudioTrack
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETPREFERREDDEVICE "setPreferredDevice" // AudioTrack, Record
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETSAMPLERATE "setSampleRate" // AudioTrack
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETSTARTTHRESHOLD "setStartThreshold" // AudioTrack
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME "setVoiceVolume" // AudioFlinger
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 26b8d0c..e26f189 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -33,6 +33,7 @@
#include <camera/Camera.h>
#include <camera/CameraParameters.h>
#include <camera/StringUtils.h>
+#include <com_android_graphics_libgui_flags.h>
#include <gui/Surface.h>
#include <utils/String8.h>
#include <cutils/properties.h>
@@ -471,11 +472,13 @@
ALOGE("%s: Buffer queue already exists", __FUNCTION__);
return ALREADY_EXISTS;
}
-
+#if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
// Create a buffer queue.
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
+#endif // !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+
uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN;
if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -484,9 +487,15 @@
bufferCount += kConsumerBufferCount;
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mVideoBufferConsumer = new BufferItemConsumer(usage, bufferCount);
+ mVideoBufferConsumer->setName(String8::format("StageFright-CameraSource"));
+ mVideoBufferProducer = mVideoBufferConsumer->getSurface()->getIGraphicBufferProducer();
+#else
mVideoBufferConsumer = new BufferItemConsumer(consumer, usage, bufferCount);
mVideoBufferConsumer->setName(String8::format("StageFright-CameraSource"));
mVideoBufferProducer = producer;
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
status_t res = mVideoBufferConsumer->setDefaultBufferSize(width, height);
if (res != OK) {
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 7b19ac0..995c674 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -22,13 +22,15 @@
#include "NdkImagePriv.h"
#include "NdkImageReaderPriv.h"
-#include <cutils/atomic.h>
-#include <utils/Log.h>
#include <android_media_Utils.h>
-#include <ui/PublicFormat.h>
-#include <private/android/AHardwareBufferHelpers.h>
+#include <com_android_graphics_libgui_flags.h>
#include <grallocusage/GrallocUsageConversion.h>
#include <gui/bufferqueue/1.0/WGraphicBufferProducer.h>
+#include <private/android/AHardwareBufferHelpers.h>
+#include <ui/PublicFormat.h>
+#include <utils/Log.h>
+
+#include <cutils/atomic.h>
using namespace android;
@@ -291,22 +293,30 @@
AImageReader::init() {
mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
+#if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
+#endif // !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
String8 consumerName = String8::format("ImageReader-%dx%df%xu%" PRIu64 "m%d-%d-%d",
mWidth, mHeight, mFormat, mUsage, mMaxImages, getpid(),
createProcessUniqueId());
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mBufferItemConsumer = new BufferItemConsumer(mHalUsage, mMaxImages, /*controlledByApp*/ true);
+#else
mBufferItemConsumer =
new BufferItemConsumer(gbConsumer, mHalUsage, mMaxImages, /*controlledByApp*/ true);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
if (mBufferItemConsumer == nullptr) {
ALOGE("Failed to allocate BufferItemConsumer");
return AMEDIA_ERROR_UNKNOWN;
}
+#if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
mProducer = gbProducer;
+#endif // !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
mBufferItemConsumer->setName(consumerName);
mBufferItemConsumer->setFrameAvailableListener(mFrameListener);
mBufferItemConsumer->setBufferFreedListener(mBufferRemovedListener);
@@ -328,10 +338,18 @@
return AMEDIA_ERROR_UNKNOWN;
}
if (mUsage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mBufferItemConsumer->setConsumerIsProtected(true);
+#else
gbConsumer->setConsumerIsProtected(true);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
}
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ mSurface = mBufferItemConsumer->getSurface();
+#else
mSurface = new Surface(mProducer, /*controlledByApp*/true);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
if (mSurface == nullptr) {
ALOGE("Failed to create surface");
return AMEDIA_ERROR_UNKNOWN;
@@ -578,8 +596,13 @@
*handle = mWindowHandle;
return AMEDIA_OK;
}
+#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
+ sp<HGraphicBufferProducer> hgbp = new TWGraphicBufferProducer<HGraphicBufferProducer>(
+ mSurface->getIGraphicBufferProducer());
+#else
sp<HGraphicBufferProducer> hgbp =
new TWGraphicBufferProducer<HGraphicBufferProducer>(mProducer);
+#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
HalToken halToken;
if (!createHalToken(hgbp, &halToken)) {
return AMEDIA_ERROR_UNKNOWN;
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index 0199616..985f42b 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -25,6 +25,7 @@
#include <utils/Mutex.h>
#include <utils/StrongPointer.h>
+#include <com_android_graphics_libgui_flags.h>
#include <gui/BufferItem.h>
#include <gui/BufferItemConsumer.h>
#include <gui/Surface.h>
@@ -161,7 +162,9 @@
uint64_t mHalUsage;
+#if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
sp<IGraphicBufferProducer> mProducer;
+#endif // !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
sp<Surface> mSurface;
sp<BufferItemConsumer> mBufferItemConsumer;
sp<ANativeWindow> mWindow;
diff --git a/media/utils/include/mediautils/jthread.h b/media/utils/include/mediautils/jthread.h
new file mode 100644
index 0000000..17532a4
--- /dev/null
+++ b/media/utils/include/mediautils/jthread.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <thread>
+#include <utility>
+
+namespace android::mediautils {
+
+namespace impl {
+class stop_source;
+/**
+ * Const view on stop source, which the running thread uses and an interface
+ * for cancellation.
+ */
+class stop_token {
+ public:
+ stop_token(const stop_source& source) : stop_source_(source) {}
+ bool stop_requested() const;
+
+ private:
+ const stop_source& stop_source_;
+};
+
+class stop_source {
+ public:
+ stop_token get_token() { return stop_token{*this}; }
+ bool stop_requested() const { return cancellation_signal_.load(); }
+ bool request_stop() {
+ auto f = false;
+ return cancellation_signal_.compare_exchange_strong(f, true);
+ }
+
+ private:
+ std::atomic_bool cancellation_signal_ = false;
+};
+
+inline bool stop_token::stop_requested() const {
+ return stop_source_.stop_requested();
+}
+} // namespace impl
+
+using stop_token = impl::stop_token;
+/**
+ * Just a jthread, since std::jthread is still experimental in our toolchain.
+ * Implements a subset of essential functionality (co-op cancellation and join on dtor).
+ * If jthread gets picked up, usage can be cut over.
+ */
+class jthread {
+ public:
+ /**
+ * Construct/launch and thread with a callable which consumes a stop_token.
+ * The callable must be cooperatively cancellable via stop_token::stop_requested(), and will be
+ * automatically stopped then joined on destruction.
+ * Example:
+ * jthread([](stop_token stok) {
+ * while(!stok.stop_requested) {
+ * // do work
+ * }
+ * }
+ */
+ template <typename F>
+ jthread(F&& f) : stop_source_{}, thread_{std::forward<F>(f), stop_source_.get_token()} {}
+
+ ~jthread() {
+ stop_source_.request_stop();
+ thread_.join();
+ }
+
+ bool request_stop() { return stop_source_.request_stop(); }
+
+ private:
+ // order matters
+ impl::stop_source stop_source_;
+ std::thread thread_;
+};
+} // namespace android::mediautils
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index a68569a..ff11b42 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -237,3 +237,11 @@
"shared_memory_allocator_tests.cpp",
],
}
+
+cc_test {
+ name: "jthread_tests",
+ defaults: ["libmediautils_tests_defaults"],
+ srcs: [
+ "jthread_tests.cpp",
+ ],
+}
diff --git a/media/utils/tests/jthread_tests.cpp b/media/utils/tests/jthread_tests.cpp
new file mode 100644
index 0000000..ed77c27
--- /dev/null
+++ b/media/utils/tests/jthread_tests.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "jthread_tests"
+
+#include <mediautils/jthread.h>
+
+#include <gtest/gtest.h>
+
+#include <atomic>
+
+using namespace android::mediautils;
+
+namespace {
+TEST(jthread_tests, dtor) {
+ std::atomic_int x = 0;
+ std::atomic_bool is_stopped = false;
+ {
+ auto jt = jthread([&](stop_token stok) {
+ while (!stok.stop_requested()) {
+ if (x.load() < std::numeric_limits<int>::max())
+ x++;
+ }
+ is_stopped = true;
+ });
+ while (x.load() < 1000)
+ ;
+ }
+ // Check we triggered a stop on dtor
+ ASSERT_TRUE(is_stopped.load());
+ // Check we actually ran
+ ASSERT_GE(x.load(), 1000);
+}
+TEST(jthread_tests, request_stop) {
+ std::atomic_int x = 0;
+ std::atomic_bool is_stopped = false;
+ auto jt = jthread([&](stop_token stok) {
+ while (!stok.stop_requested()) {
+ if (x.load() < std::numeric_limits<int>::max())
+ x++;
+ }
+ is_stopped = true;
+ });
+ while (x.load() < 1000)
+ ;
+ // request stop manually
+ ASSERT_TRUE(jt.request_stop());
+ // busy loop till thread acks
+ while (!is_stopped.load())
+ ;
+ // Check we triggered a stop on dtor
+ ASSERT_TRUE(is_stopped.load());
+ // Check we actually ran
+ ASSERT_GE(x.load(), 1000);
+}
+
+} // namespace
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 77b342a..3bf1d72 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -748,9 +748,11 @@
result.append("Notification Clients:\n");
result.append(" pid uid name\n");
- for (const auto& [ _, client ] : mNotificationClients) {
- result.appendFormat("%6d %6u %s\n",
- client->getPid(), client->getUid(), client->getPackageName().c_str());
+ for (size_t i = 0; i < mNotificationClients.size(); ++i) {
+ const pid_t pid = mNotificationClients[i]->getPid();
+ const uid_t uid = mNotificationClients[i]->getUid();
+ const mediautils::UidInfo::Info info = mUidInfo.getInfo(uid);
+ result.appendFormat("%6d %6u %s\n", pid, uid, info.package.c_str());
}
result.append("Global session refs:\n");
@@ -2129,23 +2131,24 @@
void AudioFlinger::registerClient(const sp<media::IAudioFlingerClient>& client)
{
+ audio_utils::lock_guard _l(mutex());
if (client == 0) {
return;
}
- const pid_t pid = IPCThreadState::self()->getCallingPid();
+ pid_t pid = IPCThreadState::self()->getCallingPid();
const uid_t uid = IPCThreadState::self()->getCallingUid();
-
- audio_utils::lock_guard _l(mutex());
{
audio_utils::lock_guard _cl(clientMutex());
- if (mNotificationClients.count(pid) == 0) {
- const mediautils::UidInfo::Info info = mUidInfo.getInfo(uid);
- sp<NotificationClient> notificationClient = sp<NotificationClient>::make(
- this, client, pid, uid, info.package);
- ALOGV("registerClient() pid %d, uid %u, package %s",
- pid, uid, info.package.c_str());
+ if (mNotificationClients.indexOfKey(pid) < 0) {
+ sp<NotificationClient> notificationClient = new NotificationClient(this,
+ client,
+ pid,
+ uid);
+ ALOGV("registerClient() client %p, pid %d, uid %u",
+ notificationClient.get(), pid, uid);
- mNotificationClients[pid] = notificationClient;
+ mNotificationClients.add(pid, notificationClient);
+
sp<IBinder> binder = IInterface::asBinder(client);
binder->linkToDeath(notificationClient);
}
@@ -2172,7 +2175,7 @@
audio_utils::lock_guard _l(mutex());
{
audio_utils::lock_guard _cl(clientMutex());
- mNotificationClients.erase(pid);
+ mNotificationClients.removeItem(pid);
}
ALOGV("%d died, releasing its sessions", pid);
@@ -2213,13 +2216,11 @@
legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(ioDesc));
audio_utils::lock_guard _l(clientMutex());
- if (pid != 0) {
- if (auto it = mNotificationClients.find(pid); it != mNotificationClients.end()) {
- it->second->audioFlingerClient()->ioConfigChanged(eventAidl, descAidl);
- }
- } else {
- for (const auto& [ client_pid, client] : mNotificationClients) {
- client->audioFlingerClient()->ioConfigChanged(eventAidl, descAidl);
+ size_t size = mNotificationClients.size();
+ for (size_t i = 0; i < size; i++) {
+ if ((pid == 0) || (mNotificationClients.keyAt(i) == pid)) {
+ mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(eventAidl,
+ descAidl);
}
}
}
@@ -2233,8 +2234,9 @@
audio_utils::lock_guard _l(clientMutex());
size_t size = mNotificationClients.size();
- for (const auto& [_, client] : mNotificationClients) {
- client->audioFlingerClient()->onSupportedLatencyModesChanged(outputAidl, modesAidl);
+ for (size_t i = 0; i < size; i++) {
+ mNotificationClients.valueAt(i)->audioFlingerClient()
+ ->onSupportedLatencyModesChanged(outputAidl, modesAidl);
}
}
@@ -2291,10 +2293,8 @@
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<media::IAudioFlingerClient>& client,
pid_t pid,
- uid_t uid,
- std::string_view packageName)
- : mAudioFlinger(audioFlinger), mPid(pid), mUid(uid)
- , mPackageName(packageName), mAudioFlingerClient(client)
+ uid_t uid)
+ : mAudioFlinger(audioFlinger), mPid(pid), mUid(uid), mAudioFlingerClient(client)
{
}
@@ -2304,7 +2304,7 @@
void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who __unused)
{
- const auto keep = sp<NotificationClient>::fromExisting(this);
+ sp<NotificationClient> keep(this);
mAudioFlinger->removeNotificationClient(mPid);
}
@@ -3571,7 +3571,7 @@
// is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be
// called from a different pid leaving a stale session reference. Also we don't know how
// to clear this reference if the client process dies.
- if (mNotificationClients.count(caller) == 0) {
+ if (mNotificationClients.indexOfKey(caller) < 0) {
ALOGW("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession);
return;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b57a355..9513327 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -479,14 +479,12 @@
NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<media::IAudioFlingerClient>& client,
pid_t pid,
- uid_t uid,
- std::string_view packageName);
+ uid_t uid);
virtual ~NotificationClient();
sp<media::IAudioFlingerClient> audioFlingerClient() const { return mAudioFlingerClient; }
pid_t getPid() const { return mPid; }
uid_t getUid() const { return mUid; }
- const std::string& getPackageName() const { return mPackageName; }
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -497,7 +495,6 @@
const sp<AudioFlinger> mAudioFlinger;
const pid_t mPid;
const uid_t mUid;
- const std::string mPackageName;
const sp<media::IAudioFlingerClient> mAudioFlingerClient;
};
@@ -697,7 +694,8 @@
DefaultKeyedVector<audio_io_handle_t, sp<IAfRecordThread>> mRecordThreads GUARDED_BY(mutex());
- std::map<pid_t, sp<NotificationClient>> mNotificationClients GUARDED_BY(clientMutex());
+ DefaultKeyedVector<pid_t, sp<NotificationClient>> mNotificationClients
+ GUARDED_BY(clientMutex());
// updated by atomic_fetch_add_explicit
volatile atomic_uint_fast32_t mNextUniqueIds[AUDIO_UNIQUE_ID_USE_MAX]; // ctor init
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index 4006489..c600fb6 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -107,7 +107,7 @@
"-Werror",
],
- test_suites: ["device-tests"],
+ test_suites: ["general-tests"],
}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 38476a4..a74b6d6 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -58,6 +58,7 @@
"libcamera_metadata",
"libfmq",
"libgui",
+ "libguiflags",
"libhardware",
"libhidlbase",
"libimage_io",
@@ -169,6 +170,7 @@
"device3/Camera3OutputStreamInterface.cpp",
"device3/Camera3OutputUtils.cpp",
"device3/Camera3DeviceInjectionMethods.cpp",
+ "device3/deprecated/DeprecatedCamera3StreamSplitter.cpp",
"device3/UHRCropAndMeteringRegionMapper.cpp",
"device3/PreviewFrameSpacer.cpp",
"device3/hidl/HidlCamera3Device.cpp",
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index 719ff2c..57df314 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -417,7 +417,8 @@
int processDepthPhotoFrame(DepthPhotoInputFrame inputFrame, size_t depthPhotoBufferSize,
void* depthPhotoBuffer /*out*/, size_t* depthPhotoActualSize /*out*/) {
if ((inputFrame.mMainJpegBuffer == nullptr) || (inputFrame.mDepthMapBuffer == nullptr) ||
- (depthPhotoBuffer == nullptr) || (depthPhotoActualSize == nullptr)) {
+ (depthPhotoBuffer == nullptr) || (depthPhotoActualSize == nullptr) ||
+ (inputFrame.mMaxJpegSize < MIN_JPEG_BUFFER_SIZE)) {
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
index 09b6935..9e79fc0 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.h
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -23,6 +23,9 @@
namespace android {
namespace camera3 {
+// minimal jpeg buffer size: 256KB. Blob header is not included.
+constexpr const size_t MIN_JPEG_BUFFER_SIZE = 256 * 1024;
+
enum DepthPhotoOrientation {
DEPTH_ORIENTATION_0_DEGREES = 0,
DEPTH_ORIENTATION_90_DEGREES = 90,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 6f87ca3..e5ccbae 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -36,6 +36,7 @@
#include <aidl/android/hardware/camera/device/CameraBlob.h>
#include "common/CameraDeviceBase.h"
+#include "common/DepthPhotoProcessor.h"
#include "device3/BufferUtils.h"
#include "device3/StatusTracker.h"
#include "device3/Camera3BufferManager.h"
@@ -376,8 +377,8 @@
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
- static const ssize_t kMinJpegBufferSize =
- 256 * 1024 + sizeof(aidl::android::hardware::camera::device::CameraBlob);
+ static const ssize_t kMinJpegBufferSize = camera3::MIN_JPEG_BUFFER_SIZE +
+ sizeof(aidl::android::hardware::camera::device::CameraBlob);
// Constant to use for stream ID when one doesn't exist
static const int NO_STREAM = -1;
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 485f3f0..187bd93 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -18,6 +18,8 @@
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+#include "Flags.h"
+
#include "Camera3SharedOutputStream.h"
namespace android {
@@ -59,7 +61,11 @@
status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
status_t res = OK;
- mStreamSplitter = new Camera3StreamSplitter(mUseHalBufManager);
+#if USE_NEW_STREAM_SPLITTER
+ mStreamSplitter = sp<Camera3StreamSplitter>::make(mUseHalBufManager);
+#else
+ mStreamSplitter = sp<DeprecatedCamera3StreamSplitter>::make(mUseHalBufManager);
+#endif // USE_NEW_STREAM_SPLITTER
uint64_t usage = 0;
getEndpointUsage(&usage);
@@ -90,7 +96,11 @@
// Attach the buffer to the splitter output queues. This could block if
// the output queue doesn't have any empty slot. So unlock during the course
// of attachBufferToOutputs.
+#if USE_NEW_STREAM_SPLITTER
sp<Camera3StreamSplitter> splitter = mStreamSplitter;
+#else
+ sp<DeprecatedCamera3StreamSplitter> splitter = mStreamSplitter;
+#endif // USE_NEW_STREAM_SPLITTER
mLock.unlock();
res = splitter->attachBufferToOutputs(anb, surface_ids);
mLock.lock();
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 818ce17..ae11507 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -18,9 +18,17 @@
#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
#include <array>
-#include "Camera3StreamSplitter.h"
+
+#include "Flags.h"
+
#include "Camera3OutputStream.h"
+#if USE_NEW_STREAM_SPLITTER
+#include "Camera3StreamSplitter.h"
+#else
+#include "deprecated/DeprecatedCamera3StreamSplitter.h"
+#endif // USE_NEW_STREAM_SPLITTER
+
namespace android {
namespace camera3 {
@@ -106,8 +114,11 @@
* The Camera3StreamSplitter object this stream uses for stream
* sharing.
*/
+#if USE_NEW_STREAM_SPLITTER
sp<Camera3StreamSplitter> mStreamSplitter;
-
+#else
+ sp<DeprecatedCamera3StreamSplitter> mStreamSplitter;
+#endif // USE_NEW_STREAM_SPLITTER
/**
* Initialize stream splitter.
*/
diff --git a/services/camera/libcameraservice/device3/Flags.h b/services/camera/libcameraservice/device3/Flags.h
new file mode 100644
index 0000000..ca0006b
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Flags.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <com_android_graphics_libgui_flags.h>
+
+#ifndef USE_NEW_STREAM_SPLITTER
+
+#define USE_NEW_STREAM_SPLITTER \
+ COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_STREAM_SPLITTER) && \
+ COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
+
+#endif // USE_NEW_STREAM_SPLITTER
diff --git a/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.cpp
new file mode 100644
index 0000000..c1113e5
--- /dev/null
+++ b/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.cpp
@@ -0,0 +1,820 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "DeprecatedCamera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <camera/StringUtils.h>
+#include <gui/BufferItem.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include <cutils/atomic.h>
+
+#include "../Camera3Stream.h"
+
+#include "DeprecatedCamera3StreamSplitter.h"
+
+namespace android {
+
+status_t DeprecatedCamera3StreamSplitter::connect(
+ const std::unordered_map<size_t, sp<Surface>>& surfaces, uint64_t consumerUsage,
+ uint64_t producerUsage, size_t halMaxBuffers, uint32_t width, uint32_t height,
+ android::PixelFormat format, sp<Surface>* consumer, int64_t dynamicRangeProfile) {
+ ATRACE_CALL();
+ if (consumer == nullptr) {
+ SP_LOGE("%s: consumer pointer is NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+ status_t res = OK;
+
+ if (mOutputs.size() > 0 || mConsumer != nullptr) {
+ SP_LOGE("%s: already connected", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (mBuffers.size() > 0) {
+ SP_LOGE("%s: still has %zu pending buffers", __FUNCTION__, mBuffers.size());
+ return BAD_VALUE;
+ }
+
+ mMaxHalBuffers = halMaxBuffers;
+ mConsumerName = getUniqueConsumerName();
+ mDynamicRangeProfile = dynamicRangeProfile;
+ // Add output surfaces. This has to be before creating internal buffer queue
+ // in order to get max consumer side buffers.
+ for (auto& it : surfaces) {
+ if (it.second == nullptr) {
+ SP_LOGE("%s: Fatal: surface is NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ res = addOutputLocked(it.first, it.second);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to add output surface: %s(%d)", __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+
+ // Create BufferQueue for input
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+ // Allocate 1 extra buffer to handle the case where all buffers are detached
+ // from input, and attached to the outputs. In this case, the input queue's
+ // dequeueBuffer can still allocate 1 extra buffer before being blocked by
+ // the output's attachBuffer().
+ mMaxConsumerBuffers++;
+ mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage, mMaxConsumerBuffers);
+ if (mBufferItemConsumer == nullptr) {
+ return NO_MEMORY;
+ }
+ mConsumer->setConsumerName(toString8(mConsumerName));
+
+ *consumer = new Surface(mProducer);
+ if (*consumer == nullptr) {
+ return NO_MEMORY;
+ }
+
+ res = mProducer->setAsyncMode(true);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to enable input queue async mode: %s(%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+ mWidth = width;
+ mHeight = height;
+ mFormat = format;
+ mProducerUsage = producerUsage;
+ mAcquiredInputBuffers = 0;
+
+ SP_LOGV("%s: connected", __FUNCTION__);
+ return res;
+}
+
+status_t DeprecatedCamera3StreamSplitter::getOnFrameAvailableResult() {
+ ATRACE_CALL();
+ return mOnFrameAvailableRes.load();
+}
+
+void DeprecatedCamera3StreamSplitter::disconnect() {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ for (auto& notifier : mNotifiers) {
+ sp<IGraphicBufferProducer> producer = notifier.first;
+ sp<OutputListener> listener = notifier.second;
+ IInterface::asBinder(producer)->unlinkToDeath(listener);
+ }
+ mNotifiers.clear();
+
+ for (auto& output : mOutputs) {
+ if (output.second != nullptr) {
+ output.second->disconnect(NATIVE_WINDOW_API_CAMERA);
+ }
+ }
+ mOutputs.clear();
+ mOutputSurfaces.clear();
+ mOutputSlots.clear();
+ mConsumerBufferCount.clear();
+
+ if (mConsumer.get() != nullptr) {
+ mConsumer->consumerDisconnect();
+ }
+
+ if (mBuffers.size() > 0) {
+ SP_LOGW("%zu buffers still being tracked", mBuffers.size());
+ mBuffers.clear();
+ }
+
+ mMaxHalBuffers = 0;
+ mMaxConsumerBuffers = 0;
+ mAcquiredInputBuffers = 0;
+ SP_LOGV("%s: Disconnected", __FUNCTION__);
+}
+
+DeprecatedCamera3StreamSplitter::DeprecatedCamera3StreamSplitter(bool useHalBufManager)
+ : mUseHalBufManager(useHalBufManager) {}
+
+DeprecatedCamera3StreamSplitter::~DeprecatedCamera3StreamSplitter() {
+ disconnect();
+}
+
+status_t DeprecatedCamera3StreamSplitter::addOutput(size_t surfaceId,
+ const sp<Surface>& outputQueue) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+ status_t res = addOutputLocked(surfaceId, outputQueue);
+
+ if (res != OK) {
+ SP_LOGE("%s: addOutputLocked failed %d", __FUNCTION__, res);
+ return res;
+ }
+
+ if (mMaxConsumerBuffers > mAcquiredInputBuffers) {
+ res = mConsumer->setMaxAcquiredBufferCount(mMaxConsumerBuffers);
+ }
+
+ return res;
+}
+
+void DeprecatedCamera3StreamSplitter::setHalBufferManager(bool enabled) {
+ Mutex::Autolock lock(mMutex);
+ mUseHalBufManager = enabled;
+}
+
+status_t DeprecatedCamera3StreamSplitter::addOutputLocked(size_t surfaceId,
+ const sp<Surface>& outputQueue) {
+ ATRACE_CALL();
+ if (outputQueue == nullptr) {
+ SP_LOGE("addOutput: outputQueue must not be NULL");
+ return BAD_VALUE;
+ }
+
+ if (mOutputs[surfaceId] != nullptr) {
+ SP_LOGE("%s: surfaceId: %u already taken!", __FUNCTION__, (unsigned)surfaceId);
+ return BAD_VALUE;
+ }
+
+ status_t res = native_window_set_buffers_dimensions(outputQueue.get(), mWidth, mHeight);
+ if (res != NO_ERROR) {
+ SP_LOGE("addOutput: failed to set buffer dimensions (%d)", res);
+ return res;
+ }
+ res = native_window_set_buffers_format(outputQueue.get(), mFormat);
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream buffer format %#x for surfaceId %zu", __FUNCTION__,
+ mFormat, surfaceId);
+ return res;
+ }
+
+ sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+ // Connect to the buffer producer
+ sp<OutputListener> listener(new OutputListener(this, gbp));
+ IInterface::asBinder(gbp)->linkToDeath(listener);
+ res = outputQueue->connect(NATIVE_WINDOW_API_CAMERA, listener);
+ if (res != NO_ERROR) {
+ SP_LOGE("addOutput: failed to connect (%d)", res);
+ return res;
+ }
+
+ // Query consumer side buffer count, and update overall buffer count
+ int maxConsumerBuffers = 0;
+ res = static_cast<ANativeWindow*>(outputQueue.get())
+ ->query(outputQueue.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers);
+ if (res != OK) {
+ SP_LOGE("%s: Unable to query consumer undequeued buffer count"
+ " for surface",
+ __FUNCTION__);
+ return res;
+ }
+
+ SP_LOGV("%s: Consumer wants %d buffers, Producer wants %zu", __FUNCTION__, maxConsumerBuffers,
+ mMaxHalBuffers);
+ // The output slot count requirement can change depending on the current amount
+ // of outputs and incoming buffer consumption rate. To avoid any issues with
+ // insufficient slots, set their count to the maximum supported. The output
+ // surface buffer allocation is disabled so no real buffers will get allocated.
+ size_t totalBufferCount = BufferQueue::NUM_BUFFER_SLOTS;
+ res = native_window_set_buffer_count(outputQueue.get(), totalBufferCount);
+ if (res != OK) {
+ SP_LOGE("%s: Unable to set buffer count for surface %p", __FUNCTION__, outputQueue.get());
+ return res;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ uint64_t usage = 0;
+ res = native_window_get_consumer_usage(static_cast<ANativeWindow*>(outputQueue.get()), &usage);
+ if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+ nsecs_t timeout =
+ mUseHalBufManager ? kHalBufMgrDequeueBufferTimeout : kNormalDequeueBufferTimeout;
+ outputQueue->setDequeueTimeout(timeout);
+ }
+
+ res = gbp->allowAllocation(false);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+ return res;
+ }
+
+ // Add new entry into mOutputs
+ mOutputs[surfaceId] = gbp;
+ mOutputSurfaces[surfaceId] = outputQueue;
+ mConsumerBufferCount[surfaceId] = maxConsumerBuffers;
+ if (mConsumerBufferCount[surfaceId] > mMaxHalBuffers) {
+ SP_LOGW("%s: Consumer buffer count %zu larger than max. Hal buffers: %zu", __FUNCTION__,
+ mConsumerBufferCount[surfaceId], mMaxHalBuffers);
+ }
+ mNotifiers[gbp] = listener;
+ mOutputSlots[gbp] = std::make_unique<OutputSlots>(totalBufferCount);
+
+ mMaxConsumerBuffers += maxConsumerBuffers;
+ return NO_ERROR;
+}
+
+status_t DeprecatedCamera3StreamSplitter::removeOutput(size_t surfaceId) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ status_t res = removeOutputLocked(surfaceId);
+ if (res != OK) {
+ SP_LOGE("%s: removeOutputLocked failed %d", __FUNCTION__, res);
+ return res;
+ }
+
+ if (mAcquiredInputBuffers < mMaxConsumerBuffers) {
+ res = mConsumer->setMaxAcquiredBufferCount(mMaxConsumerBuffers);
+ if (res != OK) {
+ SP_LOGE("%s: setMaxAcquiredBufferCount failed %d", __FUNCTION__, res);
+ return res;
+ }
+ }
+
+ return res;
+}
+
+status_t DeprecatedCamera3StreamSplitter::removeOutputLocked(size_t surfaceId) {
+ if (mOutputs[surfaceId] == nullptr) {
+ SP_LOGE("%s: output surface is not present!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ sp<IGraphicBufferProducer> gbp = mOutputs[surfaceId];
+ // Search and decrement the ref. count of any buffers that are
+ // still attached to the removed surface.
+ std::vector<uint64_t> pendingBufferIds;
+ auto& outputSlots = *mOutputSlots[gbp];
+ for (size_t i = 0; i < outputSlots.size(); i++) {
+ if (outputSlots[i] != nullptr) {
+ pendingBufferIds.push_back(outputSlots[i]->getId());
+ auto rc = gbp->detachBuffer(i);
+ if (rc != NO_ERROR) {
+ // Buffers that fail to detach here will be scheduled for detach in the
+ // input buffer queue and the rest of the registered outputs instead.
+ // This will help ensure that camera stops accessing buffers that still
+ // can get referenced by the disconnected output.
+ mDetachedBuffers.emplace(outputSlots[i]->getId());
+ }
+ }
+ }
+ mOutputs[surfaceId] = nullptr;
+ mOutputSurfaces[surfaceId] = nullptr;
+ mOutputSlots[gbp] = nullptr;
+ for (const auto& id : pendingBufferIds) {
+ decrementBufRefCountLocked(id, surfaceId);
+ }
+
+ auto res = IInterface::asBinder(gbp)->unlinkToDeath(mNotifiers[gbp]);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to unlink producer death listener: %d ", __FUNCTION__, res);
+ return res;
+ }
+
+ res = gbp->disconnect(NATIVE_WINDOW_API_CAMERA);
+ if (res != OK) {
+ SP_LOGE("%s: Unable disconnect from producer interface: %d ", __FUNCTION__, res);
+ return res;
+ }
+
+ mNotifiers[gbp] = nullptr;
+ mMaxConsumerBuffers -= mConsumerBufferCount[surfaceId];
+ mConsumerBufferCount[surfaceId] = 0;
+
+ return res;
+}
+
+status_t DeprecatedCamera3StreamSplitter::outputBufferLocked(
+ const sp<IGraphicBufferProducer>& output, const BufferItem& bufferItem, size_t surfaceId) {
+ ATRACE_CALL();
+ status_t res;
+ IGraphicBufferProducer::QueueBufferInput queueInput(
+ bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp, bufferItem.mDataSpace,
+ bufferItem.mCrop, static_cast<int32_t>(bufferItem.mScalingMode), bufferItem.mTransform,
+ bufferItem.mFence);
+
+ IGraphicBufferProducer::QueueBufferOutput queueOutput;
+
+ uint64_t bufferId = bufferItem.mGraphicBuffer->getId();
+ const BufferTracker& tracker = *(mBuffers[bufferId]);
+ int slot = getSlotForOutputLocked(output, tracker.getBuffer());
+
+ if (mOutputSurfaces[surfaceId] != nullptr) {
+ sp<ANativeWindow> anw = mOutputSurfaces[surfaceId];
+ camera3::Camera3Stream::queueHDRMetadata(
+ bufferItem.mGraphicBuffer->getNativeBuffer()->handle, anw, mDynamicRangeProfile);
+ } else {
+ SP_LOGE("%s: Invalid surface id: %zu!", __FUNCTION__, surfaceId);
+ }
+
+ // In case the output BufferQueue has its own lock, if we hold splitter lock while calling
+ // queueBuffer (which will try to acquire the output lock), the output could be holding its
+ // own lock calling releaseBuffer (which will try to acquire the splitter lock), running into
+ // circular lock situation.
+ mMutex.unlock();
+ res = output->queueBuffer(slot, queueInput, &queueOutput);
+ mMutex.lock();
+
+ SP_LOGV("%s: Queuing buffer to buffer queue %p slot %d returns %d", __FUNCTION__, output.get(),
+ slot, res);
+ // During buffer queue 'mMutex' is not held which makes the removal of
+ //"output" possible. Check whether this is the case and return.
+ if (mOutputSlots[output] == nullptr) {
+ return res;
+ }
+ if (res != OK) {
+ if (res != NO_INIT && res != DEAD_OBJECT) {
+ SP_LOGE("Queuing buffer to output failed (%d)", res);
+ }
+ // If we just discovered that this output has been abandoned, note
+ // that, increment the release count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ decrementBufRefCountLocked(bufferItem.mGraphicBuffer->getId(), surfaceId);
+ return res;
+ }
+
+ // If the queued buffer replaces a pending buffer in the async
+ // queue, no onBufferReleased is called by the buffer queue.
+ // Proactively trigger the callback to avoid buffer loss.
+ if (queueOutput.bufferReplaced) {
+ onBufferReplacedLocked(output, surfaceId);
+ }
+
+ return res;
+}
+
+std::string DeprecatedCamera3StreamSplitter::getUniqueConsumerName() {
+ static volatile int32_t counter = 0;
+ return fmt::sprintf("DeprecatedCamera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t DeprecatedCamera3StreamSplitter::notifyBufferReleased(const sp<GraphicBuffer>& buffer) {
+ ATRACE_CALL();
+
+ Mutex::Autolock lock(mMutex);
+
+ uint64_t bufferId = buffer->getId();
+ std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[bufferId]);
+ mBuffers.erase(bufferId);
+
+ return OK;
+}
+
+status_t DeprecatedCamera3StreamSplitter::attachBufferToOutputs(
+ ANativeWindowBuffer* anb, const std::vector<size_t>& surface_ids) {
+ ATRACE_CALL();
+ status_t res = OK;
+
+ Mutex::Autolock lock(mMutex);
+
+ sp<GraphicBuffer> gb(static_cast<GraphicBuffer*>(anb));
+ uint64_t bufferId = gb->getId();
+
+ // Initialize buffer tracker for this input buffer
+ auto tracker = std::make_unique<BufferTracker>(gb, surface_ids);
+
+ for (auto& surface_id : surface_ids) {
+ sp<IGraphicBufferProducer>& gbp = mOutputs[surface_id];
+ if (gbp.get() == nullptr) {
+ // Output surface got likely removed by client.
+ continue;
+ }
+ int slot = getSlotForOutputLocked(gbp, gb);
+ if (slot != BufferItem::INVALID_BUFFER_SLOT) {
+ // Buffer is already attached to this output surface.
+ continue;
+ }
+ // Temporarly Unlock the mutex when trying to attachBuffer to the output
+ // queue, because attachBuffer could block in case of a slow consumer. If
+ // we block while holding the lock, onFrameAvailable and onBufferReleased
+ // will block as well because they need to acquire the same lock.
+ mMutex.unlock();
+ res = gbp->attachBuffer(&slot, gb);
+ mMutex.lock();
+ if (res != OK) {
+ SP_LOGE("%s: Cannot attachBuffer from GraphicBufferProducer %p: %s (%d)", __FUNCTION__,
+ gbp.get(), strerror(-res), res);
+ // TODO: might need to detach/cleanup the already attached buffers before return?
+ return res;
+ }
+ if ((slot < 0) || (slot > BufferQueue::NUM_BUFFER_SLOTS)) {
+ SP_LOGE("%s: Slot received %d either bigger than expected maximum %d or negative!",
+ __FUNCTION__, slot, BufferQueue::NUM_BUFFER_SLOTS);
+ return BAD_VALUE;
+ }
+ // During buffer attach 'mMutex' is not held which makes the removal of
+ //"gbp" possible. Check whether this is the case and continue.
+ if (mOutputSlots[gbp] == nullptr) {
+ continue;
+ }
+ auto& outputSlots = *mOutputSlots[gbp];
+ if (static_cast<size_t>(slot + 1) > outputSlots.size()) {
+ outputSlots.resize(slot + 1);
+ }
+ if (outputSlots[slot] != nullptr) {
+ // If the buffer is attached to a slot which already contains a buffer,
+ // the previous buffer will be removed from the output queue. Decrement
+ // the reference count accordingly.
+ decrementBufRefCountLocked(outputSlots[slot]->getId(), surface_id);
+ }
+ SP_LOGV("%s: Attached buffer %p to slot %d on output %p.", __FUNCTION__, gb.get(), slot,
+ gbp.get());
+ outputSlots[slot] = gb;
+ }
+
+ mBuffers[bufferId] = std::move(tracker);
+
+ return res;
+}
+
+void DeprecatedCamera3StreamSplitter::onFrameAvailable(const BufferItem& /*item*/) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ // Acquire and detach the buffer from the input
+ BufferItem bufferItem;
+ status_t res = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+ if (res != NO_ERROR) {
+ SP_LOGE("%s: Acquiring buffer from input failed (%d)", __FUNCTION__, res);
+ mOnFrameAvailableRes.store(res);
+ return;
+ }
+
+ uint64_t bufferId;
+ if (bufferItem.mGraphicBuffer != nullptr) {
+ mInputSlots[bufferItem.mSlot] = bufferItem;
+ } else if (bufferItem.mAcquireCalled) {
+ bufferItem.mGraphicBuffer = mInputSlots[bufferItem.mSlot].mGraphicBuffer;
+ mInputSlots[bufferItem.mSlot].mFrameNumber = bufferItem.mFrameNumber;
+ } else {
+ SP_LOGE("%s: Invalid input graphic buffer!", __FUNCTION__);
+ mOnFrameAvailableRes.store(BAD_VALUE);
+ return;
+ }
+ bufferId = bufferItem.mGraphicBuffer->getId();
+
+ if (mBuffers.find(bufferId) == mBuffers.end()) {
+ SP_LOGE("%s: Acquired buffer doesn't exist in attached buffer map", __FUNCTION__);
+ mOnFrameAvailableRes.store(INVALID_OPERATION);
+ return;
+ }
+
+ mAcquiredInputBuffers++;
+ SP_LOGV("acquired buffer %" PRId64 " from input at slot %d", bufferItem.mGraphicBuffer->getId(),
+ bufferItem.mSlot);
+
+ if (bufferItem.mTransformToDisplayInverse) {
+ bufferItem.mTransform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
+ }
+
+ // Attach and queue the buffer to each of the outputs
+ BufferTracker& tracker = *(mBuffers[bufferId]);
+
+ SP_LOGV("%s: BufferTracker for buffer %" PRId64 ", number of requests %zu", __FUNCTION__,
+ bufferItem.mGraphicBuffer->getId(), tracker.requestedSurfaces().size());
+ for (const auto id : tracker.requestedSurfaces()) {
+ if (mOutputs[id] == nullptr) {
+ // Output surface got likely removed by client.
+ continue;
+ }
+
+ res = outputBufferLocked(mOutputs[id], bufferItem, id);
+ if (res != OK) {
+ SP_LOGE("%s: outputBufferLocked failed %d", __FUNCTION__, res);
+ mOnFrameAvailableRes.store(res);
+ // If we fail to send buffer to certain output, keep sending to
+ // other outputs.
+ continue;
+ }
+ }
+
+ mOnFrameAvailableRes.store(res);
+}
+
+void DeprecatedCamera3StreamSplitter::onFrameReplaced(const BufferItem& item) {
+ ATRACE_CALL();
+ onFrameAvailable(item);
+}
+
+void DeprecatedCamera3StreamSplitter::decrementBufRefCountLocked(uint64_t id, size_t surfaceId) {
+ ATRACE_CALL();
+
+ if (mBuffers[id] == nullptr) {
+ return;
+ }
+
+ size_t referenceCount = mBuffers[id]->decrementReferenceCountLocked(surfaceId);
+ if (referenceCount > 0) {
+ return;
+ }
+
+ // We no longer need to track the buffer now that it is being returned to the
+ // input. Note that this should happen before we unlock the mutex and call
+ // releaseBuffer, to avoid the case where the same bufferId is acquired in
+ // attachBufferToOutputs resulting in a new BufferTracker with same bufferId
+ // overwrites the current one.
+ std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[id]);
+ mBuffers.erase(id);
+
+ uint64_t bufferId = tracker_ptr->getBuffer()->getId();
+ int consumerSlot = -1;
+ uint64_t frameNumber;
+ auto inputSlot = mInputSlots.begin();
+ for (; inputSlot != mInputSlots.end(); inputSlot++) {
+ if (inputSlot->second.mGraphicBuffer->getId() == bufferId) {
+ consumerSlot = inputSlot->second.mSlot;
+ frameNumber = inputSlot->second.mFrameNumber;
+ break;
+ }
+ }
+ if (consumerSlot == -1) {
+ SP_LOGE("%s: Buffer missing inside input slots!", __FUNCTION__);
+ return;
+ }
+
+ auto detachBuffer = mDetachedBuffers.find(bufferId);
+ bool detach = (detachBuffer != mDetachedBuffers.end());
+ if (detach) {
+ mDetachedBuffers.erase(detachBuffer);
+ mInputSlots.erase(inputSlot);
+ }
+ // Temporarily unlock mutex to avoid circular lock:
+ // 1. This function holds splitter lock, calls releaseBuffer which triggers
+ // onBufferReleased in Camera3OutputStream. onBufferReleased waits on the
+ // OutputStream lock
+ // 2. Camera3SharedOutputStream::getBufferLocked calls
+ // attachBufferToOutputs, which holds the stream lock, and waits for the
+ // splitter lock.
+ sp<IGraphicBufferConsumer> consumer(mConsumer);
+ mMutex.unlock();
+ int res = NO_ERROR;
+ if (consumer != nullptr) {
+ if (detach) {
+ res = consumer->detachBuffer(consumerSlot);
+ } else {
+ res = consumer->releaseBuffer(consumerSlot, frameNumber, EGL_NO_DISPLAY,
+ EGL_NO_SYNC_KHR, tracker_ptr->getMergedFence());
+ }
+ } else {
+ SP_LOGE("%s: consumer has become null!", __FUNCTION__);
+ }
+ mMutex.lock();
+
+ if (res != NO_ERROR) {
+ if (detach) {
+ SP_LOGE("%s: detachBuffer returns %d", __FUNCTION__, res);
+ } else {
+ SP_LOGE("%s: releaseBuffer returns %d", __FUNCTION__, res);
+ }
+ } else {
+ if (mAcquiredInputBuffers == 0) {
+ ALOGW("%s: Acquired input buffer count already at zero!", __FUNCTION__);
+ } else {
+ mAcquiredInputBuffers--;
+ }
+ }
+}
+
+void DeprecatedCamera3StreamSplitter::onBufferReleasedByOutput(
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ sp<Fence> fence;
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ auto res = from->dequeueBuffer(&slot, &fence, mWidth, mHeight, mFormat, mProducerUsage, nullptr,
+ nullptr);
+ Mutex::Autolock lock(mMutex);
+ handleOutputDequeueStatusLocked(res, slot);
+ if (res != OK) {
+ return;
+ }
+
+ size_t surfaceId = 0;
+ bool found = false;
+ for (const auto& it : mOutputs) {
+ if (it.second == from) {
+ found = true;
+ surfaceId = it.first;
+ break;
+ }
+ }
+ if (!found) {
+ SP_LOGV("%s: output surface not registered anymore!", __FUNCTION__);
+ return;
+ }
+
+ returnOutputBufferLocked(fence, from, surfaceId, slot);
+}
+
+void DeprecatedCamera3StreamSplitter::onBufferReplacedLocked(const sp<IGraphicBufferProducer>& from,
+ size_t surfaceId) {
+ ATRACE_CALL();
+ sp<Fence> fence;
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ auto res = from->dequeueBuffer(&slot, &fence, mWidth, mHeight, mFormat, mProducerUsage, nullptr,
+ nullptr);
+ handleOutputDequeueStatusLocked(res, slot);
+ if (res != OK) {
+ return;
+ }
+
+ returnOutputBufferLocked(fence, from, surfaceId, slot);
+}
+
+void DeprecatedCamera3StreamSplitter::returnOutputBufferLocked(
+ const sp<Fence>& fence, const sp<IGraphicBufferProducer>& from, size_t surfaceId,
+ int slot) {
+ sp<GraphicBuffer> buffer;
+
+ if (mOutputSlots[from] == nullptr) {
+ // Output surface got likely removed by client.
+ return;
+ }
+
+ auto outputSlots = *mOutputSlots[from];
+ buffer = outputSlots[slot];
+ BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+ // Merge the release fence of the incoming buffer so that the fence we send
+ // back to the input includes all of the outputs' fences
+ if (fence != nullptr && fence->isValid()) {
+ tracker.mergeFence(fence);
+ }
+
+ auto detachBuffer = mDetachedBuffers.find(buffer->getId());
+ bool detach = (detachBuffer != mDetachedBuffers.end());
+ if (detach) {
+ auto res = from->detachBuffer(slot);
+ if (res == NO_ERROR) {
+ outputSlots[slot] = nullptr;
+ } else {
+ SP_LOGE("%s: detach buffer from output failed (%d)", __FUNCTION__, res);
+ }
+ }
+
+ // Check to see if this is the last outstanding reference to this buffer
+ decrementBufRefCountLocked(buffer->getId(), surfaceId);
+}
+
+void DeprecatedCamera3StreamSplitter::handleOutputDequeueStatusLocked(status_t res, int slot) {
+ if (res == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note that,
+ // but we can't do anything else, since buffer is invalid
+ onAbandonedLocked();
+ } else if (res == IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) {
+ SP_LOGE("%s: Producer needs to re-allocate buffer!", __FUNCTION__);
+ SP_LOGE("%s: This should not happen with buffer allocation disabled!", __FUNCTION__);
+ } else if (res == IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
+ SP_LOGE("%s: All slot->buffer mapping should be released!", __FUNCTION__);
+ SP_LOGE("%s: This should not happen with buffer allocation disabled!", __FUNCTION__);
+ } else if (res == NO_MEMORY) {
+ SP_LOGE("%s: No free buffers", __FUNCTION__);
+ } else if (res == WOULD_BLOCK) {
+ SP_LOGE("%s: Dequeue call will block", __FUNCTION__);
+ } else if (res != OK || (slot == BufferItem::INVALID_BUFFER_SLOT)) {
+ SP_LOGE("%s: dequeue buffer from output failed (%d)", __FUNCTION__, res);
+ }
+}
+
+void DeprecatedCamera3StreamSplitter::onAbandonedLocked() {
+ // If this is called from binderDied callback, it means the app process
+ // holding the binder has died. CameraService will be notified of the binder
+ // death, and camera device will be closed, which in turn calls
+ // disconnect().
+ //
+ // If this is called from onBufferReleasedByOutput or onFrameAvailable, one
+ // consumer being abanoned shouldn't impact the other consumer. So we won't
+ // stop the buffer flow.
+ //
+ // In both cases, we don't need to do anything here.
+ SP_LOGV("One of my outputs has abandoned me");
+}
+
+int DeprecatedCamera3StreamSplitter::getSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
+ const sp<GraphicBuffer>& gb) {
+ auto& outputSlots = *mOutputSlots[gbp];
+
+ for (size_t i = 0; i < outputSlots.size(); i++) {
+ if (outputSlots[i] == gb) {
+ return (int)i;
+ }
+ }
+
+ SP_LOGV("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(), gbp.get());
+ return BufferItem::INVALID_BUFFER_SLOT;
+}
+
+DeprecatedCamera3StreamSplitter::OutputListener::OutputListener(
+ wp<DeprecatedCamera3StreamSplitter> splitter, wp<IGraphicBufferProducer> output)
+ : mSplitter(splitter), mOutput(output) {}
+
+void DeprecatedCamera3StreamSplitter::OutputListener::onBufferReleased() {
+ ATRACE_CALL();
+ sp<DeprecatedCamera3StreamSplitter> splitter = mSplitter.promote();
+ sp<IGraphicBufferProducer> output = mOutput.promote();
+ if (splitter != nullptr && output != nullptr) {
+ splitter->onBufferReleasedByOutput(output);
+ }
+}
+
+void DeprecatedCamera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+ sp<DeprecatedCamera3StreamSplitter> splitter = mSplitter.promote();
+ if (splitter != nullptr) {
+ Mutex::Autolock lock(splitter->mMutex);
+ splitter->onAbandonedLocked();
+ }
+}
+
+DeprecatedCamera3StreamSplitter::BufferTracker::BufferTracker(
+ const sp<GraphicBuffer>& buffer, const std::vector<size_t>& requestedSurfaces)
+ : mBuffer(buffer),
+ mMergedFence(Fence::NO_FENCE),
+ mRequestedSurfaces(requestedSurfaces),
+ mReferenceCount(requestedSurfaces.size()) {}
+
+void DeprecatedCamera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+ mMergedFence = Fence::merge(String8("DeprecatedCamera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t DeprecatedCamera3StreamSplitter::BufferTracker::decrementReferenceCountLocked(
+ size_t surfaceId) {
+ const auto& it = std::find(mRequestedSurfaces.begin(), mRequestedSurfaces.end(), surfaceId);
+ if (it == mRequestedSurfaces.end()) {
+ return mReferenceCount;
+ } else {
+ mRequestedSurfaces.erase(it);
+ }
+
+ if (mReferenceCount > 0) --mReferenceCount;
+ return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.h b/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.h
new file mode 100644
index 0000000..4610985
--- /dev/null
+++ b/services/camera/libcameraservice/device3/deprecated/DeprecatedCamera3StreamSplitter.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <unordered_set>
+
+#include <camera/CameraMetadata.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/IConsumerListener.h>
+#include <gui/Surface.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+#define SP_LOGV(x, ...) ALOGV("[%s] " x, mConsumerName.c_str(), ##__VA_ARGS__)
+#define SP_LOGI(x, ...) ALOGI("[%s] " x, mConsumerName.c_str(), ##__VA_ARGS__)
+#define SP_LOGW(x, ...) ALOGW("[%s] " x, mConsumerName.c_str(), ##__VA_ARGS__)
+#define SP_LOGE(x, ...) ALOGE("[%s] " x, mConsumerName.c_str(), ##__VA_ARGS__)
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// DeprecatedCamera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class DeprecatedCamera3StreamSplitter : public BnConsumerListener {
+ public:
+ // Constructor
+ DeprecatedCamera3StreamSplitter(bool useHalBufManager = false);
+
+ // Connect to the stream splitter by creating buffer queue and connecting it
+ // with output surfaces.
+ status_t connect(const std::unordered_map<size_t, sp<Surface>>& surfaces,
+ uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers,
+ uint32_t width, uint32_t height, android::PixelFormat format,
+ sp<Surface>* consumer, int64_t dynamicRangeProfile);
+
+ // addOutput adds an output BufferQueue to the splitter. The splitter
+ // connects to outputQueue as a CPU producer, and any buffers queued
+ // to the input will be queued to each output. If any output is abandoned
+ // by its consumer, the splitter will abandon its input queue (see onAbandoned).
+ //
+ // A return value other than NO_ERROR means that an error has occurred and
+ // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+ // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+ // of other error codes.
+ status_t addOutput(size_t surfaceId, const sp<Surface>& outputQueue);
+
+ // removeOutput will remove a BufferQueue that was previously added to
+ // the splitter outputs. Any pending buffers in the BufferQueue will get
+ // reclaimed.
+ status_t removeOutput(size_t surfaceId);
+
+ // Notification that the graphic buffer has been released to the input
+ // BufferQueue. The buffer should be reused by the camera device instead of
+ // queuing to the outputs.
+ status_t notifyBufferReleased(const sp<GraphicBuffer>& buffer);
+
+ // Attach a buffer to the specified outputs. This call reserves a buffer
+ // slot in the output queue.
+ status_t attachBufferToOutputs(ANativeWindowBuffer* anb,
+ const std::vector<size_t>& surface_ids);
+
+ // Get return value of onFrameAvailable to work around problem that
+ // onFrameAvailable is void. This function should be called by the producer
+ // right after calling queueBuffer().
+ status_t getOnFrameAvailableResult();
+
+ // Disconnect the buffer queue from output surfaces.
+ void disconnect();
+
+ void setHalBufferManager(bool enabled);
+
+ private:
+ // From IConsumerListener
+ //
+ // During this callback, we store some tracking information, detach the
+ // buffer from the input, and attach it to each of the outputs. This call
+ // can block if there are too many outstanding buffers. If it blocks, it
+ // will resume when onBufferReleasedByOutput releases a buffer back to the
+ // input.
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // From IConsumerListener
+ //
+ // Similar to onFrameAvailable, but buffer item is indeed replacing a buffer
+ // in the buffer queue. This can happen when buffer queue is in droppable
+ // mode.
+ void onFrameReplaced(const BufferItem& item) override;
+
+ // From IConsumerListener
+ // We don't care about released buffers because we detach each buffer as
+ // soon as we acquire it. See the comment for onBufferReleased below for
+ // some clarifying notes about the name.
+ void onBuffersReleased() override {}
+
+ // From IConsumerListener
+ // We don't care about sideband streams, since we won't be splitting them
+ void onSidebandStreamChanged() override {}
+
+ // This is the implementation of the onBufferReleased callback from
+ // IProducerListener. It gets called from an OutputListener (see below), and
+ // 'from' is which producer interface from which the callback was received.
+ //
+ // During this callback, we detach the buffer from the output queue that
+ // generated the callback, update our state tracking to see if this is the
+ // last output releasing the buffer, and if so, release it to the input.
+ // If we release the buffer to the input, we allow a blocked
+ // onFrameAvailable call to proceed.
+ void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+ // Called by outputBufferLocked when a buffer in the async buffer queue got replaced.
+ void onBufferReplacedLocked(const sp<IGraphicBufferProducer>& from, size_t surfaceId);
+
+ // When this is called, the splitter disconnects from (i.e., abandons) its
+ // input queue and signals any waiting onFrameAvailable calls to wake up.
+ // It still processes callbacks from other outputs, but only detaches their
+ // buffers so they can continue operating until they run out of buffers to
+ // acquire. This must be called with mMutex locked.
+ void onAbandonedLocked();
+
+ // Decrement the buffer's reference count. Once the reference count becomes
+ // 0, return the buffer back to the input BufferQueue.
+ void decrementBufRefCountLocked(uint64_t id, size_t surfaceId);
+
+ // Check for and handle any output surface dequeue errors.
+ void handleOutputDequeueStatusLocked(status_t res, int slot);
+
+ // Handles released output surface buffers.
+ void returnOutputBufferLocked(const sp<Fence>& fence, const sp<IGraphicBufferProducer>& from,
+ size_t surfaceId, int slot);
+
+ // This is a thin wrapper class that lets us determine which BufferQueue
+ // the IProducerListener::onBufferReleased callback is associated with. We
+ // create one of these per output BufferQueue, and then pass the producer
+ // into onBufferReleasedByOutput above.
+ class OutputListener : public SurfaceListener, public IBinder::DeathRecipient {
+ public:
+ OutputListener(wp<DeprecatedCamera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output);
+ virtual ~OutputListener() = default;
+
+ // From IProducerListener
+ void onBufferReleased() override;
+ bool needsReleaseNotify() override { return true; };
+ void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& /*buffers*/) override {};
+ void onBufferDetached(int /*slot*/) override {}
+
+ // From IBinder::DeathRecipient
+ void binderDied(const wp<IBinder>& who) override;
+
+ private:
+ wp<DeprecatedCamera3StreamSplitter> mSplitter;
+ wp<IGraphicBufferProducer> mOutput;
+ };
+
+ class BufferTracker {
+ public:
+ BufferTracker(const sp<GraphicBuffer>& buffer,
+ const std::vector<size_t>& requestedSurfaces);
+ ~BufferTracker() = default;
+
+ const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+ const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+ void mergeFence(const sp<Fence>& with);
+
+ // Returns the new value
+ // Only called while mMutex is held
+ size_t decrementReferenceCountLocked(size_t surfaceId);
+
+ const std::vector<size_t> requestedSurfaces() const { return mRequestedSurfaces; }
+
+ private:
+ // Disallow copying
+ BufferTracker(const BufferTracker& other);
+ BufferTracker& operator=(const BufferTracker& other);
+
+ sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+ sp<Fence> mMergedFence;
+
+ // Request surfaces for a particular buffer. And when the buffer becomes
+ // available from the input queue, the registered surfaces are used to decide
+ // which output is the buffer sent to.
+ std::vector<size_t> mRequestedSurfaces;
+ size_t mReferenceCount;
+ };
+
+ // Must be accessed through RefBase
+ virtual ~DeprecatedCamera3StreamSplitter();
+
+ status_t addOutputLocked(size_t surfaceId, const sp<Surface>& outputQueue);
+
+ status_t removeOutputLocked(size_t surfaceId);
+
+ // Send a buffer to particular output, and increment the reference count
+ // of the buffer. If this output is abandoned, the buffer's reference count
+ // won't be incremented.
+ status_t outputBufferLocked(const sp<IGraphicBufferProducer>& output,
+ const BufferItem& bufferItem, size_t surfaceId);
+
+ // Get unique name for the buffer queue consumer
+ std::string getUniqueConsumerName();
+
+ // Helper function to get the BufferQueue slot where a particular buffer is attached to.
+ int getSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp, const sp<GraphicBuffer>& gb);
+
+ // Sum of max consumer buffers for all outputs
+ size_t mMaxConsumerBuffers = 0;
+ size_t mMaxHalBuffers = 0;
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ android::PixelFormat mFormat = android::PIXEL_FORMAT_NONE;
+ uint64_t mProducerUsage = 0;
+ int mDynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+
+ // The attachBuffer call will happen on different thread according to mUseHalBufManager and have
+ // different timing constraint.
+ static const nsecs_t kNormalDequeueBufferTimeout = s2ns(1); // 1 sec
+ static const nsecs_t kHalBufMgrDequeueBufferTimeout = ms2ns(1); // 1 msec
+
+ Mutex mMutex;
+
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
+ sp<Surface> mSurface;
+
+ // Map graphic buffer ids -> buffer items
+ std::unordered_map<uint64_t, BufferItem> mInputSlots;
+
+ // Map surface ids -> gbp outputs
+ std::unordered_map<int, sp<IGraphicBufferProducer>> mOutputs;
+
+ // Map surface ids -> gbp outputs
+ std::unordered_map<int, sp<Surface>> mOutputSurfaces;
+
+ // Map surface ids -> consumer buffer count
+ std::unordered_map<int, size_t> mConsumerBufferCount;
+
+ // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+ // objects (which are mostly for counting how many outputs have released the
+ // buffer, but also contain merged release fences).
+ std::unordered_map<uint64_t, std::unique_ptr<BufferTracker>> mBuffers;
+
+ struct GBPHash {
+ std::size_t operator()(const sp<IGraphicBufferProducer>& producer) const {
+ return std::hash<IGraphicBufferProducer*>{}(producer.get());
+ }
+ };
+
+ std::unordered_map<sp<IGraphicBufferProducer>, sp<OutputListener>, GBPHash> mNotifiers;
+
+ typedef std::vector<sp<GraphicBuffer>> OutputSlots;
+ std::unordered_map<sp<IGraphicBufferProducer>, std::unique_ptr<OutputSlots>, GBPHash>
+ mOutputSlots;
+
+ // A set of buffers that could potentially stay in some of the outputs after removal
+ // and therefore should be detached from the input queue.
+ std::unordered_set<uint64_t> mDetachedBuffers;
+
+ // Latest onFrameAvailable return value
+ std::atomic<status_t> mOnFrameAvailableRes{0};
+
+ // Currently acquired input buffers
+ size_t mAcquiredInputBuffers;
+
+ std::string mConsumerName;
+
+ bool mUseHalBufManager;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/Camera3StreamSplitterTest.cpp b/services/camera/libcameraservice/tests/Camera3StreamSplitterTest.cpp
index 3d49ae5..5e32482 100644
--- a/services/camera/libcameraservice/tests/Camera3StreamSplitterTest.cpp
+++ b/services/camera/libcameraservice/tests/Camera3StreamSplitterTest.cpp
@@ -17,10 +17,9 @@
#define LOG_TAG "Camera3StreamSplitterTest"
// #define LOG_NDEBUG 0
-#include "../device3/Camera3StreamSplitter.h"
-
#include <android/hardware_buffer.h>
#include <com_android_graphics_libgui_flags.h>
+#include <com_android_internal_camera_flags.h>
#include <gui/BufferItemConsumer.h>
#include <gui/IGraphicBufferConsumer.h>
#include <gui/IGraphicBufferProducer.h>
@@ -35,6 +34,14 @@
#include <gtest/gtest.h>
+#include "../device3/Flags.h"
+
+#if USE_NEW_STREAM_SPLITTER
+#include "../device3/Camera3StreamSplitter.h"
+#else
+#include "../device3/deprecated/DeprecatedCamera3StreamSplitter.h"
+#endif // USE_NEW_STREAM_SPLITTER
+
using namespace android;
namespace {
@@ -62,10 +69,20 @@
class Camera3StreamSplitterTest : public testing::Test {
public:
- void SetUp() override { mSplitter = sp<Camera3StreamSplitter>::make(); }
+ void SetUp() override {
+#if USE_NEW_STREAM_SPLITTER
+ mSplitter = sp<Camera3StreamSplitter>::make();
+#else
+ mSplitter = sp<DeprecatedCamera3StreamSplitter>::make();
+#endif // USE_NEW_STREAM_SPLITTER
+ }
protected:
+#if USE_NEW_STREAM_SPLITTER
sp<Camera3StreamSplitter> mSplitter;
+#else
+ sp<DeprecatedCamera3StreamSplitter> mSplitter;
+#endif // USE_NEW_STREAM_SPLITTER
};
class TestSurfaceListener : public SurfaceListener {
@@ -102,7 +119,7 @@
} // namespace
-TEST_F(Camera3StreamSplitterTest, TestWithoutSurfaces_NoBuffersConsumed) {
+TEST_F(Camera3StreamSplitterTest, WithoutSurfaces_NoBuffersConsumed) {
sp<Surface> consumer;
EXPECT_EQ(OK, mSplitter->connect({}, kConsumerUsage, kProducerUsage, kHalMaxBuffers, kWidth,
kHeight, kFormat, &consumer, kDynamicRangeProfile));
diff --git a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
index 673c149..75cf21d 100644
--- a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
+++ b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
@@ -78,30 +78,30 @@
}
TEST(DepthProcessorTest, BadInput) {
+ static const size_t badInputBufferWidth = 17;
+ static const size_t badInputBufferHeight = 3;
+ static const size_t badInputJpegSize = 63;
+ static const size_t badInputBufferDepthSize = (badInputBufferWidth * badInputBufferHeight);
int jpegQuality = 95;
DepthPhotoInputFrame inputFrame;
+ std::vector<uint8_t> colorJpegBuffer(badInputJpegSize);
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
// Worst case both depth and confidence maps have the same size as the main color image.
inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
- std::vector<uint8_t> colorJpegBuffer;
- generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
- /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
-
- std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
- generateDepth16Buffer(&depth16Buffer);
+ std::array<uint16_t, badInputBufferDepthSize> depth16Buffer;
std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
size_t actualDepthPhotoSize = 0;
- inputFrame.mMainJpegWidth = kTestBufferWidth;
- inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mMainJpegWidth = badInputBufferWidth;
+ inputFrame.mMainJpegHeight = badInputBufferHeight;
inputFrame.mJpegQuality = jpegQuality;
ASSERT_NE(processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
&actualDepthPhotoSize), 0);
inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
- inputFrame.mMainJpegSize = colorJpegBuffer.size();
ASSERT_NE(processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
&actualDepthPhotoSize), 0);
@@ -113,6 +113,9 @@
ASSERT_NE(processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
nullptr), 0);
+
+ ASSERT_NE(processDepthPhotoFrame(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
}
TEST(DepthProcessorTest, BasicDepthPhotoValidation) {
diff --git a/services/camera/virtualcamera/util/Util.cc b/services/camera/virtualcamera/util/Util.cc
index 4aff60f..26015d1 100644
--- a/services/camera/virtualcamera/util/Util.cc
+++ b/services/camera/virtualcamera/util/Util.cc
@@ -89,13 +89,16 @@
return;
}
- const int32_t rawFence = fence != nullptr ? fence->get() : -1;
+ const int32_t rawFence = fence != nullptr ? dup(fence->get()) : -1;
mLockStatus = static_cast<status_t>(AHardwareBuffer_lockPlanes(
hwBuffer.get(), usageFlags, rawFence, nullptr, &mPlanes));
if (mLockStatus != OK) {
ALOGE("%s: Failed to lock graphic buffer: %s", __func__,
statusToString(mLockStatus).c_str());
}
+ if (rawFence >= 0) {
+ close(rawFence);
+ }
}
PlanesLockGuard::~PlanesLockGuard() {