[automerger skipped] Merge "Camera: listen to provider instance names from hwservicemanager" into qt-qpr1-dev am: 3ef81e1acc
am: 022afb0ad8 -s ours
am skip reason: change_id Ib57fd84ad8f22aac2a82920e03148cff2592daae with SHA1 177b0c1ed3 is in history
Change-Id: I3261f16e3ae76838019caa45012937b3b0ee65c7
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 8dd6e00..4a801a7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1126,10 +1126,17 @@
* </ul>
* <p>For devices at the LIMITED level or above:</p>
* <ul>
- * <li>For YUV_420_888 burst capture use case, this list will always include (<code>min</code>, <code>max</code>)
- * and (<code>max</code>, <code>max</code>) where <code>min</code> <= 15 and <code>max</code> = the maximum output frame rate of the
+ * <li>For devices that advertise NIR color filter arrangement in
+ * ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, this list will always include
+ * (<code>max</code>, <code>max</code>) where <code>max</code> = the maximum output frame rate of the maximum YUV_420_888
+ * output size.</li>
+ * <li>For devices advertising any color filter arrangement other than NIR, or devices not
+ * advertising color filter arrangement, this list will always include (<code>min</code>, <code>max</code>) and
+ * (<code>max</code>, <code>max</code>) where <code>min</code> <= 15 and <code>max</code> = the maximum output frame rate of the
* maximum YUV_420_888 output size.</li>
* </ul>
+ *
+ * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
*/
ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES = // int32[2*n]
ACAMERA_CONTROL_START + 20,
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 8fe029a..f07a1e6 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -57,7 +57,7 @@
#include <algorithm>
using namespace android;
-using ::android::hardware::ICameraServiceDefault;
+using ::android::hardware::ICameraService;
using ::android::hardware::camera2::ICameraDeviceUser;
#define ASSERT_NOT_NULL(x) \
@@ -507,7 +507,7 @@
bool queryStatus;
res = device->isSessionConfigurationSupported(sessionConfiguration, &queryStatus);
EXPECT_TRUE(res.isOk() ||
- (res.serviceSpecificErrorCode() == ICameraServiceDefault::ERROR_INVALID_OPERATION))
+ (res.serviceSpecificErrorCode() == ICameraService::ERROR_INVALID_OPERATION))
<< res;
if (res.isOk()) {
EXPECT_TRUE(queryStatus);
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 7aa655f..98164fd 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -37,6 +37,7 @@
#include <binder/IPCThreadState.h>
#include <utils/Errors.h>
+#include <utils/SystemClock.h>
#include <utils/Timers.h>
#include <utils/Trace.h>
@@ -95,6 +96,8 @@
static const uint32_t kFallbackWidth = 1280; // 720p
static const uint32_t kFallbackHeight = 720;
static const char* kMimeTypeAvc = "video/avc";
+static const char* kMimeTypeApplicationOctetstream = "application/octet-stream";
+static const char* kWinscopeMagicString = "#VV1NSC0PET1ME!#";
// Command-line parameters.
static bool gVerbose = false; // chatty on stdout
@@ -350,6 +353,50 @@
}
/*
+ * Writes an unsigned integer byte-by-byte in little endian order regardless
+ * of the platform endianness.
+ */
+template <typename UINT>
+static void writeValueLE(UINT value, uint8_t* buffer) {
+ for (int i = 0; i < sizeof(UINT); ++i) {
+ buffer[i] = static_cast<uint8_t>(value);
+ value >>= 8;
+ }
+}
+
+/*
+ * Saves frames presentation time relative to the elapsed realtime clock in microseconds
+ * preceded by a Winscope magic string and frame count to a metadata track.
+ * This metadata is used by the Winscope tool to sync video with SurfaceFlinger
+ * and WindowManager traces.
+ *
+ * The metadata is written as a binary array as follows:
+ * - winscope magic string (kWinscopeMagicString constant), without trailing null char,
+ * - the number of recorded frames (as little endian uint32),
+ * - for every frame its presentation time relative to the elapsed realtime clock in microseconds
+ * (as little endian uint64).
+ */
+static status_t writeWinscopeMetadata(const Vector<int64_t>& timestamps,
+ const ssize_t metaTrackIdx, const sp<MediaMuxer>& muxer) {
+ ALOGV("Writing metadata");
+ int64_t systemTimeToElapsedTimeOffsetMicros = (android::elapsedRealtimeNano()
+ - systemTime(SYSTEM_TIME_MONOTONIC)) / 1000;
+ sp<ABuffer> buffer = new ABuffer(timestamps.size() * sizeof(int64_t)
+ + sizeof(uint32_t) + strlen(kWinscopeMagicString));
+ uint8_t* pos = buffer->data();
+ strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicString);
+ pos += strlen(kWinscopeMagicString);
+ writeValueLE<uint32_t>(timestamps.size(), pos);
+ pos += sizeof(uint32_t);
+ for (size_t idx = 0; idx < timestamps.size(); ++idx) {
+ writeValueLE<uint64_t>(static_cast<uint64_t>(timestamps[idx]
+ + systemTimeToElapsedTimeOffsetMicros), pos);
+ pos += sizeof(uint64_t);
+ }
+ return muxer->writeSampleData(buffer, metaTrackIdx, timestamps[0], 0);
+}
+
+/*
* Runs the MediaCodec encoder, sending the output to the MediaMuxer. The
* input frames are coming from the virtual display as fast as SurfaceFlinger
* wants to send them.
@@ -364,10 +411,12 @@
static int kTimeout = 250000; // be responsive on signal
status_t err;
ssize_t trackIdx = -1;
+ ssize_t metaTrackIdx = -1;
uint32_t debugNumFrames = 0;
int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
DisplayInfo mainDpyInfo;
+ Vector<int64_t> timestamps;
assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
@@ -465,6 +514,9 @@
"Failed writing data to muxer (err=%d)\n", err);
return err;
}
+ if (gOutputFormat == FORMAT_MP4) {
+ timestamps.add(ptsUsec);
+ }
}
debugNumFrames++;
}
@@ -491,6 +543,11 @@
encoder->getOutputFormat(&newFormat);
if (muxer != NULL) {
trackIdx = muxer->addTrack(newFormat);
+ if (gOutputFormat == FORMAT_MP4) {
+ sp<AMessage> metaFormat = new AMessage;
+ metaFormat->setString(KEY_MIME, kMimeTypeApplicationOctetstream);
+ metaTrackIdx = muxer->addTrack(metaFormat);
+ }
ALOGV("Starting muxer");
err = muxer->start();
if (err != NO_ERROR) {
@@ -527,6 +584,13 @@
systemTime(CLOCK_MONOTONIC) - startWhenNsec));
fflush(stdout);
}
+ if (metaTrackIdx >= 0 && !timestamps.isEmpty()) {
+ err = writeWinscopeMetadata(timestamps, metaTrackIdx, muxer);
+ if (err != NO_ERROR) {
+ fprintf(stderr, "Failed writing metadata to muxer (err=%d)\n", err);
+ return err;
+ }
+ }
return NO_ERROR;
}
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index d62ccd6..954608f 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -19,9 +19,9 @@
#include <utils/Log.h>
#include <android/hardware/drm/1.0/types.h>
-#include <android/hidl/manager/1.0/IServiceManager.h>
-
+#include <android/hidl/manager/1.2/IServiceManager.h>
#include <binder/IMemory.h>
+#include <hidl/ServiceManagement.h>
#include <hidlmemory/FrameworkUtils.h>
#include <media/hardware/CryptoAPI.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -47,7 +47,6 @@
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ::android::hidl::manager::V1_0::IServiceManager;
using ::android::sp;
typedef drm::V1_2::Status Status_V1_2;
@@ -129,9 +128,9 @@
Vector<sp<ICryptoFactory>> CryptoHal::makeCryptoFactories() {
Vector<sp<ICryptoFactory>> factories;
- auto manager = ::IServiceManager::getService();
+ auto manager = hardware::defaultServiceManager1_2();
if (manager != NULL) {
- manager->listByInterface(drm::V1_0::ICryptoFactory::descriptor,
+ manager->listManifestByInterface(drm::V1_0::ICryptoFactory::descriptor,
[&factories](const hidl_vec<hidl_string> ®istered) {
for (const auto &instance : registered) {
auto factory = drm::V1_0::ICryptoFactory::getService(instance);
@@ -142,7 +141,7 @@
}
}
);
- manager->listByInterface(drm::V1_1::ICryptoFactory::descriptor,
+ manager->listManifestByInterface(drm::V1_1::ICryptoFactory::descriptor,
[&factories](const hidl_vec<hidl_string> ®istered) {
for (const auto &instance : registered) {
auto factory = drm::V1_1::ICryptoFactory::getService(instance);
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 919f4ee..7cfe900 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -26,7 +26,6 @@
#include <android/hardware/drm/1.2/types.h>
#include <android/hidl/manager/1.2/IServiceManager.h>
#include <hidl/ServiceManagement.h>
-
#include <media/EventMetric.h>
#include <media/PluginMetricsReporting.h>
#include <media/drm/DrmAPI.h>
@@ -57,7 +56,6 @@
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ::android::hidl::manager::V1_0::IServiceManager;
using ::android::os::PersistableBundle;
using ::android::sp;
@@ -394,7 +392,7 @@
}
}
);
- manager->listByInterface(drm::V1_2::IDrmFactory::descriptor,
+ manager->listManifestByInterface(drm::V1_2::IDrmFactory::descriptor,
[&factories](const hidl_vec<hidl_string> ®istered) {
for (const auto &instance : registered) {
auto factory = drm::V1_2::IDrmFactory::getService(instance);
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
index de839c6..85ee950 120000
--- a/include/media/AudioMixer.h
+++ b/include/media/AudioMixer.h
@@ -1 +1 @@
-../../media/libaudioclient/include/media/AudioMixer.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index 779bb15..778e1d8 120000
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -1 +1 @@
-../../media/libmedia/include/media/BufferProviders.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 969f2ee..573c415 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -9,6 +9,7 @@
libaaudioservice \
libaudioflinger \
libaudiopolicyservice \
+ libaudioprocessing \
libbinder \
libcutils \
liblog \
diff --git a/media/bufferpool/1.0/AccessorImpl.cpp b/media/bufferpool/1.0/AccessorImpl.cpp
index fa17f15..6b90088 100644
--- a/media/bufferpool/1.0/AccessorImpl.cpp
+++ b/media/bufferpool/1.0/AccessorImpl.cpp
@@ -247,7 +247,7 @@
ALOGD("Destruction - bufferpool %p "
"cached: %zu/%zuM, %zu/%d%% in use; "
"allocs: %zu, %d%% recycled; "
- "transfers: %zu, %d%% unfetced",
+ "transfers: %zu, %d%% unfetched",
this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 94cf006..32eaae9 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -303,7 +303,7 @@
ALOGD("Destruction - bufferpool2 %p "
"cached: %zu/%zuM, %zu/%d%% in use; "
"allocs: %zu, %d%% recycled; "
- "transfers: %zu, %d%% unfetced",
+ "transfers: %zu, %d%% unfetched",
this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
diff --git a/media/bufferpool/2.0/ClientManager.cpp b/media/bufferpool/2.0/ClientManager.cpp
index c31d313..48c2da4 100644
--- a/media/bufferpool/2.0/ClientManager.cpp
+++ b/media/bufferpool/2.0/ClientManager.cpp
@@ -351,7 +351,17 @@
}
client = it->second;
}
- return client->allocate(params, handle, buffer);
+ native_handle_t *origHandle;
+ ResultStatus res = client->allocate(params, &origHandle, buffer);
+ if (res != ResultStatus::OK) {
+ return res;
+ }
+ *handle = native_handle_clone(origHandle);
+ if (handle == NULL) {
+ buffer->reset();
+ return ResultStatus::NO_MEMORY;
+ }
+ return ResultStatus::OK;
}
ResultStatus ClientManager::Impl::receive(
@@ -367,7 +377,18 @@
}
client = it->second;
}
- return client->receive(transactionId, bufferId, timestampUs, handle, buffer);
+ native_handle_t *origHandle;
+ ResultStatus res = client->receive(
+ transactionId, bufferId, timestampUs, &origHandle, buffer);
+ if (res != ResultStatus::OK) {
+ return res;
+ }
+ *handle = native_handle_clone(origHandle);
+ if (handle == NULL) {
+ buffer->reset();
+ return ResultStatus::NO_MEMORY;
+ }
+ return ResultStatus::OK;
}
ResultStatus ClientManager::Impl::postSend(
diff --git a/media/bufferpool/2.0/include/bufferpool/ClientManager.h b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
index 953c304..24b61f4 100644
--- a/media/bufferpool/2.0/include/bufferpool/ClientManager.h
+++ b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
@@ -104,7 +104,9 @@
ResultStatus flush(ConnectionId connectionId);
/**
- * Allocates a buffer from the specified connection.
+ * Allocates a buffer from the specified connection. The output parameter
+ * handle is cloned from the internal handle. So it is safe to use directly,
+ * and it should be deleted and destroyed after use.
*
* @param connectionId The id of the connection.
* @param params The allocation parameters.
@@ -123,7 +125,9 @@
std::shared_ptr<BufferPoolData> *buffer);
/**
- * Receives a buffer for the transaction.
+ * Receives a buffer for the transaction. The output parameter handle is
+ * cloned from the internal handle. So it is safe to use directly, and it
+ * should be deleted and destoyed after use.
*
* @param connectionId The id of the receiving connection.
* @param transactionId The id for the transaction.
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 769895c..0cf277f 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -340,6 +340,7 @@
aom_codec_flags_t flags;
memset(&flags, 0, sizeof(aom_codec_flags_t));
+ ALOGV("Using libaom AV1 software decoder.");
aom_codec_err_t err;
if ((err = aom_codec_dec_init(mCodecCtx, aom_codec_av1_dx(), &cfg, 0))) {
ALOGE("av1 decoder failed to initialize. (%d)", err);
diff --git a/media/codec2/components/flac/Android.bp b/media/codec2/components/flac/Android.bp
index e5eb51d..48cc51b 100644
--- a/media/codec2/components/flac/Android.bp
+++ b/media/codec2/components/flac/Android.bp
@@ -23,8 +23,11 @@
srcs: ["C2SoftFlacEnc.cpp"],
- static_libs: [
+ shared_libs: [
"libaudioutils",
+ ],
+
+ static_libs: [
"libFLAC",
],
}
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
new file mode 100644
index 0000000..0a0545d
--- /dev/null
+++ b/media/codec2/components/gav1/Android.bp
@@ -0,0 +1,14 @@
+cc_library_shared {
+ name: "libcodec2_soft_gav1dec",
+ defaults: [
+ "libcodec2_soft-defaults",
+ "libcodec2_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["C2SoftGav1Dec.cpp"],
+ static_libs: ["libgav1"],
+
+ include_dirs: [
+ "external/libgav1/libgav1/",
+ ],
+}
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
new file mode 100644
index 0000000..f5321ba
--- /dev/null
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -0,0 +1,791 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftGav1Dec"
+#include "C2SoftGav1Dec.h"
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+#include <log/log.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+namespace android {
+
+constexpr char COMPONENT_NAME[] = "c2.android.gav1.decoder";
+
+class C2SoftGav1Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
+ public:
+ explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+ : SimpleInterface<void>::BaseParams(
+ helper, COMPONENT_NAME, C2Component::KIND_DECODER,
+ C2Component::DOMAIN_VIDEO, MEDIA_MIMETYPE_VIDEO_AV1) {
+ noPrivateBuffers(); // TODO: account for our buffers here.
+ noInputReferences();
+ noOutputReferences();
+ noInputLatency();
+ noTimeStretch();
+
+ addParameter(DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
+ .withConstValue(new C2ComponentAttributesSetting(
+ C2Component::ATTRIB_IS_TEMPORAL))
+ .build());
+
+ addParameter(
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(2, 2048, 2),
+ C2F(mSize, height).inRange(2, 2048, 2),
+ })
+ .withSetter(SizeSetter)
+ .build());
+
+ addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::input(
+ 0u, C2Config::PROFILE_AV1_0, C2Config::LEVEL_AV1_2_1))
+ .withFields({C2F(mProfileLevel, profile)
+ .oneOf({C2Config::PROFILE_AV1_0,
+ C2Config::PROFILE_AV1_1}),
+ C2F(mProfileLevel, level)
+ .oneOf({
+ C2Config::LEVEL_AV1_2,
+ C2Config::LEVEL_AV1_2_1,
+ C2Config::LEVEL_AV1_2_2,
+ C2Config::LEVEL_AV1_3,
+ C2Config::LEVEL_AV1_3_1,
+ C2Config::LEVEL_AV1_3_2,
+ })})
+ .withSetter(ProfileLevelSetter, mSize)
+ .build());
+
+ mHdr10PlusInfoInput = C2StreamHdr10PlusInfo::input::AllocShared(0);
+ addParameter(
+ DefineParam(mHdr10PlusInfoInput, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO)
+ .withDefault(mHdr10PlusInfoInput)
+ .withFields({
+ C2F(mHdr10PlusInfoInput, m.value).any(),
+ })
+ .withSetter(Hdr10PlusInfoInputSetter)
+ .build());
+
+ mHdr10PlusInfoOutput = C2StreamHdr10PlusInfo::output::AllocShared(0);
+ addParameter(
+ DefineParam(mHdr10PlusInfoOutput, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO)
+ .withDefault(mHdr10PlusInfoOutput)
+ .withFields({
+ C2F(mHdr10PlusInfoOutput, m.value).any(),
+ })
+ .withSetter(Hdr10PlusInfoOutputSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
+ .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(2, 2048, 2),
+ C2F(mSize, height).inRange(2, 2048, 2),
+ })
+ .withSetter(MaxPictureSizeSetter, mSize)
+ .build());
+
+ addParameter(DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+ .withDefault(new C2StreamMaxBufferSizeInfo::input(
+ 0u, 320 * 240 * 3 / 4))
+ .withFields({
+ C2F(mMaxInputSize, value).any(),
+ })
+ .calculatedAs(MaxInputSizeSetter, mMaxSize)
+ .build());
+
+ C2ChromaOffsetStruct locations[1] = {C2ChromaOffsetStruct::ITU_YUV_420_0()};
+ std::shared_ptr<C2StreamColorInfo::output> defaultColorInfo =
+ C2StreamColorInfo::output::AllocShared(1u, 0u, 8u /* bitDepth */,
+ C2Color::YUV_420);
+ memcpy(defaultColorInfo->m.locations, locations, sizeof(locations));
+
+ defaultColorInfo = C2StreamColorInfo::output::AllocShared(
+ {C2ChromaOffsetStruct::ITU_YUV_420_0()}, 0u, 8u /* bitDepth */,
+ C2Color::YUV_420);
+ helper->addStructDescriptors<C2ChromaOffsetStruct>();
+
+ addParameter(DefineParam(mColorInfo, C2_PARAMKEY_CODED_COLOR_INFO)
+ .withConstValue(defaultColorInfo)
+ .build());
+
+ addParameter(
+ DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+ .withDefault(new C2StreamColorAspectsTuning::output(
+ 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+ .withFields(
+ {C2F(mDefaultColorAspects, range)
+ .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
+ C2F(mDefaultColorAspects, primaries)
+ .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::PRIMARIES_OTHER),
+ C2F(mDefaultColorAspects, transfer)
+ .inRange(C2Color::TRANSFER_UNSPECIFIED,
+ C2Color::TRANSFER_OTHER),
+ C2F(mDefaultColorAspects, matrix)
+ .inRange(C2Color::MATRIX_UNSPECIFIED,
+ C2Color::MATRIX_OTHER)})
+ .withSetter(DefaultColorAspectsSetter)
+ .build());
+
+ // TODO: support more formats?
+ addParameter(DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
+ .withConstValue(new C2StreamPixelFormatInfo::output(
+ 0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
+ .build());
+ }
+
+ static C2R SizeSetter(bool mayBlock,
+ const C2P<C2StreamPictureSizeInfo::output> &oldMe,
+ C2P<C2StreamPictureSizeInfo::output> &me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+ me.set().width = oldMe.v.width;
+ }
+ if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+ me.set().height = oldMe.v.height;
+ }
+ return res;
+ }
+
+ static C2R MaxPictureSizeSetter(
+ bool mayBlock, C2P<C2StreamMaxPictureSizeTuning::output> &me,
+ const C2P<C2StreamPictureSizeInfo::output> &size) {
+ (void)mayBlock;
+ // TODO: get max width/height from the size's field helpers vs.
+ // hardcoding
+ me.set().width = c2_min(c2_max(me.v.width, size.v.width), 4096u);
+ me.set().height = c2_min(c2_max(me.v.height, size.v.height), 4096u);
+ return C2R::Ok();
+ }
+
+ static C2R MaxInputSizeSetter(
+ bool mayBlock, C2P<C2StreamMaxBufferSizeInfo::input> &me,
+ const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
+ (void)mayBlock;
+ // assume compression ratio of 2
+ me.set().value =
+ (((maxSize.v.width + 63) / 64) * ((maxSize.v.height + 63) / 64) * 3072);
+ return C2R::Ok();
+ }
+
+ static C2R DefaultColorAspectsSetter(
+ bool mayBlock, C2P<C2StreamColorAspectsTuning::output> &me) {
+ (void)mayBlock;
+ if (me.v.range > C2Color::RANGE_OTHER) {
+ me.set().range = C2Color::RANGE_OTHER;
+ }
+ if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+ me.set().primaries = C2Color::PRIMARIES_OTHER;
+ }
+ if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+ me.set().transfer = C2Color::TRANSFER_OTHER;
+ }
+ if (me.v.matrix > C2Color::MATRIX_OTHER) {
+ me.set().matrix = C2Color::MATRIX_OTHER;
+ }
+ return C2R::Ok();
+ }
+
+ static C2R ProfileLevelSetter(
+ bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me,
+ const C2P<C2StreamPictureSizeInfo::output> &size) {
+ (void)mayBlock;
+ (void)size;
+ (void)me; // TODO: validate
+ return C2R::Ok();
+ }
+
+ std::shared_ptr<C2StreamColorAspectsTuning::output>
+ getDefaultColorAspects_l() {
+ return mDefaultColorAspects;
+ }
+
+ static C2R Hdr10PlusInfoInputSetter(bool mayBlock,
+ C2P<C2StreamHdr10PlusInfo::input> &me) {
+ (void)mayBlock;
+ (void)me; // TODO: validate
+ return C2R::Ok();
+ }
+
+ static C2R Hdr10PlusInfoOutputSetter(bool mayBlock,
+ C2P<C2StreamHdr10PlusInfo::output> &me) {
+ (void)mayBlock;
+ (void)me; // TODO: validate
+ return C2R::Ok();
+ }
+
+ private:
+ std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
+ std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+ std::shared_ptr<C2StreamMaxPictureSizeTuning::output> mMaxSize;
+ std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
+ std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
+ std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+ std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
+ std::shared_ptr<C2StreamHdr10PlusInfo::input> mHdr10PlusInfoInput;
+ std::shared_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfoOutput;
+};
+
+C2SoftGav1Dec::C2SoftGav1Dec(const char *name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl> &intfImpl)
+ : SimpleC2Component(
+ std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+ mIntf(intfImpl),
+ mCodecCtx(nullptr) {
+ gettimeofday(&mTimeStart, nullptr);
+ gettimeofday(&mTimeEnd, nullptr);
+}
+
+C2SoftGav1Dec::~C2SoftGav1Dec() { onRelease(); }
+
+c2_status_t C2SoftGav1Dec::onInit() {
+ return initDecoder() ? C2_OK : C2_CORRUPTED;
+}
+
+c2_status_t C2SoftGav1Dec::onStop() {
+ mSignalledError = false;
+ mSignalledOutputEos = false;
+ return C2_OK;
+}
+
+void C2SoftGav1Dec::onReset() {
+ (void)onStop();
+ c2_status_t err = onFlush_sm();
+ if (err != C2_OK) {
+ ALOGW("Failed to flush the av1 decoder. Trying to hard reset.");
+ destroyDecoder();
+ if (!initDecoder()) {
+ ALOGE("Hard reset failed.");
+ }
+ }
+}
+
+void C2SoftGav1Dec::onRelease() { destroyDecoder(); }
+
+c2_status_t C2SoftGav1Dec::onFlush_sm() {
+ Libgav1StatusCode status =
+ mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+ /*user_private_data=*/0);
+ if (status != kLibgav1StatusOk) {
+ ALOGE("Failed to flush av1 decoder. status: %d.", status);
+ return C2_CORRUPTED;
+ }
+
+ // Dequeue frame (if any) that was enqueued previously.
+ const libgav1::DecoderBuffer *buffer;
+ status = mCodecCtx->DequeueFrame(&buffer);
+ if (status != kLibgav1StatusOk) {
+ ALOGE("Failed to dequeue frame after flushing the av1 decoder. status: %d",
+ status);
+ return C2_CORRUPTED;
+ }
+
+ mSignalledError = false;
+ mSignalledOutputEos = false;
+
+ return C2_OK;
+}
+
+static int GetCPUCoreCount() {
+ int cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+ cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ // _SC_NPROC_ONLN must be defined...
+ cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+ CHECK(cpuCoreCount >= 1);
+ ALOGV("Number of CPU cores: %d", cpuCoreCount);
+ return cpuCoreCount;
+}
+
+bool C2SoftGav1Dec::initDecoder() {
+ mSignalledError = false;
+ mSignalledOutputEos = false;
+ mCodecCtx.reset(new libgav1::Decoder());
+
+ if (mCodecCtx == nullptr) {
+ ALOGE("mCodecCtx is null");
+ return false;
+ }
+
+ libgav1::DecoderSettings settings = {};
+ settings.threads = GetCPUCoreCount();
+
+ ALOGV("Using libgav1 AV1 software decoder.");
+ Libgav1StatusCode status = mCodecCtx->Init(&settings);
+ if (status != kLibgav1StatusOk) {
+ ALOGE("av1 decoder failed to initialize. status: %d.", status);
+ return false;
+ }
+
+ return true;
+}
+
+void C2SoftGav1Dec::destroyDecoder() { mCodecCtx = nullptr; }
+
+void fillEmptyWork(const std::unique_ptr<C2Work> &work) {
+ uint32_t flags = 0;
+ if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("signalling eos");
+ }
+ work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->workletsProcessed = 1u;
+}
+
+void C2SoftGav1Dec::finishWork(uint64_t index,
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2GraphicBlock> &block) {
+ std::shared_ptr<C2Buffer> buffer =
+ createGraphicBuffer(block, C2Rect(mWidth, mHeight));
+ auto fillWork = [buffer, index](const std::unique_ptr<C2Work> &work) {
+ uint32_t flags = 0;
+ if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+ (c2_cntr64_t(index) == work->input.ordinal.frameIndex)) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("signalling eos");
+ }
+ work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.buffers.push_back(buffer);
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->workletsProcessed = 1u;
+ };
+ if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
+ fillWork(work);
+ } else {
+ finish(index, fillWork);
+ }
+}
+
+void C2SoftGav1Dec::process(const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ work->result = C2_OK;
+ work->workletsProcessed = 0u;
+ work->worklets.front()->output.configUpdate.clear();
+ work->worklets.front()->output.flags = work->input.flags;
+ if (mSignalledError || mSignalledOutputEos) {
+ work->result = C2_BAD_VALUE;
+ return;
+ }
+
+ size_t inOffset = 0u;
+ size_t inSize = 0u;
+ C2ReadView rView = mDummyReadView;
+ if (!work->input.buffers.empty()) {
+ rView = work->input.buffers[0]->data().linearBlocks().front().map().get();
+ inSize = rView.capacity();
+ if (inSize && rView.error()) {
+ ALOGE("read view map failed %d", rView.error());
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ }
+
+ bool codecConfig =
+ ((work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) != 0);
+ bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+
+ ALOGV("in buffer attr. size %zu timestamp %d frameindex %d, flags %x", inSize,
+ (int)work->input.ordinal.timestamp.peeku(),
+ (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+
+ if (codecConfig) {
+ fillEmptyWork(work);
+ return;
+ }
+
+ int64_t frameIndex = work->input.ordinal.frameIndex.peekll();
+ if (inSize) {
+ uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
+ int32_t decodeTime = 0;
+ int32_t delay = 0;
+
+ GETTIME(&mTimeStart, nullptr);
+ TIME_DIFF(mTimeEnd, mTimeStart, delay);
+
+ const Libgav1StatusCode status =
+ mCodecCtx->EnqueueFrame(bitstream, inSize, frameIndex);
+
+ GETTIME(&mTimeEnd, nullptr);
+ TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
+ ALOGV("decodeTime=%4d delay=%4d\n", decodeTime, delay);
+
+ if (status != kLibgav1StatusOk) {
+ ALOGE("av1 decoder failed to decode frame. status: %d.", status);
+ work->result = C2_CORRUPTED;
+ work->workletsProcessed = 1u;
+ mSignalledError = true;
+ return;
+ }
+
+ } else {
+ const Libgav1StatusCode status =
+ mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+ /*user_private_data=*/0);
+ if (status != kLibgav1StatusOk) {
+ ALOGE("Failed to flush av1 decoder. status: %d.", status);
+ work->result = C2_CORRUPTED;
+ work->workletsProcessed = 1u;
+ mSignalledError = true;
+ return;
+ }
+ }
+
+ (void)outputBuffer(pool, work);
+
+ if (eos) {
+ drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
+ mSignalledOutputEos = true;
+ } else if (!inSize) {
+ fillEmptyWork(work);
+ }
+}
+
+static void copyOutputBufferToYV12Frame(uint8_t *dst, const uint8_t *srcY,
+ const uint8_t *srcU,
+ const uint8_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride,
+ uint32_t width, uint32_t height) {
+ const size_t dstYStride = align(width, 16);
+ const size_t dstUVStride = align(dstYStride / 2, 16);
+ uint8_t *const dstStart = dst;
+
+ for (size_t i = 0; i < height; ++i) {
+ memcpy(dst, srcY, width);
+ srcY += srcYStride;
+ dst += dstYStride;
+ }
+
+ dst = dstStart + dstYStride * height;
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dst, srcV, width / 2);
+ srcV += srcVStride;
+ dst += dstUVStride;
+ }
+
+ dst = dstStart + (dstYStride * height) + (dstUVStride * height / 2);
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dst, srcU, width / 2);
+ srcU += srcUStride;
+ dst += dstUVStride;
+ }
+}
+
+static void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride,
+ size_t dstStride, size_t width,
+ size_t height) {
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < height; y += 2) {
+ uint32_t *dstTop = (uint32_t *)dst;
+ uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+ uint16_t *ySrcTop = (uint16_t *)srcY;
+ uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+ uint16_t *uSrc = (uint16_t *)srcU;
+ uint16_t *vSrc = (uint16_t *)srcV;
+
+ uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+ size_t x = 0;
+ for (; x < width - 3; x += 4) {
+ u01 = *((uint32_t *)uSrc);
+ uSrc += 2;
+ v01 = *((uint32_t *)vSrc);
+ vSrc += 2;
+
+ y01 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y23 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y45 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+ y67 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+ *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+ *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+ *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+ *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+ }
+
+ // There should be at most 2 more pixels to process. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (x < width) {
+ u01 = *uSrc;
+ v01 = *vSrc;
+ y01 = *((uint32_t *)ySrcTop);
+ y45 = *((uint32_t *)ySrcBot);
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = ((y01 >> 16) << 10) | uv0;
+ *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = ((y45 >> 16) << 10) | uv0;
+ }
+
+ srcY += srcYStride * 2;
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dst += dstStride * 2;
+ }
+}
+
+static void convertYUV420Planar16ToYUV420Planar(
+ uint8_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+ uint8_t *dstY = (uint8_t *)dst;
+ size_t dstYSize = dstStride * height;
+ size_t dstUVStride = align(dstStride / 2, 16);
+ size_t dstUVSize = dstUVStride * height / 2;
+ uint8_t *dstV = dstY + dstYSize;
+ uint8_t *dstU = dstV + dstUVSize;
+
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = (uint8_t)(srcY[x] >> 2);
+ }
+
+ srcY += srcYStride;
+ dstY += dstStride;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstU[x] = (uint8_t)(srcU[x] >> 2);
+ dstV[x] = (uint8_t)(srcV[x] >> 2);
+ }
+
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstU += dstUVStride;
+ dstV += dstUVStride;
+ }
+}
+
+bool C2SoftGav1Dec::outputBuffer(const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work) {
+ if (!(work && pool)) return false;
+
+ const libgav1::DecoderBuffer *buffer;
+ const Libgav1StatusCode status = mCodecCtx->DequeueFrame(&buffer);
+
+ if (status != kLibgav1StatusOk) {
+ ALOGE("av1 decoder DequeueFrame failed. status: %d.", status);
+ return false;
+ }
+
+ // |buffer| can be NULL if status was equal to kLibgav1StatusOk. This is not
+ // an error. This could mean one of two things:
+ // - The EnqueueFrame() call was either a flush (called with nullptr).
+ // - The enqueued frame did not have any displayable frames.
+ if (!buffer) {
+ return false;
+ }
+
+ const int width = buffer->displayed_width[0];
+ const int height = buffer->displayed_height[0];
+ if (width != mWidth || height != mHeight) {
+ mWidth = width;
+ mHeight = height;
+
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
+ if (err == C2_OK) {
+ work->worklets.front()->output.configUpdate.push_back(
+ C2Param::Copy(size));
+ } else {
+ ALOGE("Config update size failed");
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ work->workletsProcessed = 1u;
+ return false;
+ }
+ }
+
+ // TODO(vigneshv): Add support for monochrome videos since AV1 supports it.
+ CHECK(buffer->image_format == libgav1::kImageFormatYuv420);
+
+ std::shared_ptr<C2GraphicBlock> block;
+ uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ if (buffer->bitdepth == 10) {
+ IntfImpl::Lock lock = mIntf->lock();
+ std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects =
+ mIntf->getDefaultColorAspects_l();
+
+ if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
+ defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
+ defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
+ format = HAL_PIXEL_FORMAT_RGBA_1010102;
+ }
+ }
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+
+ c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format,
+ usage, &block);
+
+ if (err != C2_OK) {
+ ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+ work->result = err;
+ return false;
+ }
+
+ C2GraphicView wView = block->map().get();
+
+ if (wView.error()) {
+ ALOGE("graphic view map failed %d", wView.error());
+ work->result = C2_CORRUPTED;
+ return false;
+ }
+
+ ALOGV("provided (%dx%d) required (%dx%d), out frameindex %d", block->width(),
+ block->height(), mWidth, mHeight, (int)buffer->user_private_data);
+
+ uint8_t *dst = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
+ size_t srcYStride = buffer->stride[0];
+ size_t srcUStride = buffer->stride[1];
+ size_t srcVStride = buffer->stride[2];
+
+ if (buffer->bitdepth == 10) {
+ const uint16_t *srcY = (const uint16_t *)buffer->plane[0];
+ const uint16_t *srcU = (const uint16_t *)buffer->plane[1];
+ const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
+
+ if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
+ convertYUV420Planar16ToY410(
+ (uint32_t *)dst, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
+ srcVStride / 2, align(mWidth, 16), mWidth, mHeight);
+ } else {
+ convertYUV420Planar16ToYUV420Planar(dst, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ align(mWidth, 16), mWidth, mHeight);
+ }
+ } else {
+ const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
+ const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
+ const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
+ copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, mWidth, mHeight);
+ }
+ finishWork(buffer->user_private_data, work, std::move(block));
+ block = nullptr;
+ return true;
+}
+
+c2_status_t C2SoftGav1Dec::drainInternal(
+ uint32_t drainMode, const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work) {
+ if (drainMode == NO_DRAIN) {
+ ALOGW("drain with NO_DRAIN: no-op");
+ return C2_OK;
+ }
+ if (drainMode == DRAIN_CHAIN) {
+ ALOGW("DRAIN_CHAIN not supported");
+ return C2_OMITTED;
+ }
+
+ Libgav1StatusCode status =
+ mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+ /*user_private_data=*/0);
+ if (status != kLibgav1StatusOk) {
+ ALOGE("Failed to flush av1 decoder. status: %d.", status);
+ return C2_CORRUPTED;
+ }
+
+ while (outputBuffer(pool, work)) {
+ }
+
+ if (drainMode == DRAIN_COMPONENT_WITH_EOS && work &&
+ work->workletsProcessed == 0u) {
+ fillEmptyWork(work);
+ }
+
+ return C2_OK;
+}
+
+c2_status_t C2SoftGav1Dec::drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ return drainInternal(drainMode, pool, nullptr);
+}
+
+class C2SoftGav1Factory : public C2ComponentFactory {
+ public:
+ C2SoftGav1Factory()
+ : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+ GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *const component,
+ std::function<void(C2Component *)> deleter) override {
+ *component = std::shared_ptr<C2Component>(
+ new C2SoftGav1Dec(COMPONENT_NAME, id,
+ std::make_shared<C2SoftGav1Dec::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *const interface,
+ std::function<void(C2ComponentInterface *)> deleter) override {
+ *interface = std::shared_ptr<C2ComponentInterface>(
+ new SimpleInterface<C2SoftGav1Dec::IntfImpl>(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftGav1Dec::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual ~C2SoftGav1Factory() override = default;
+
+ private:
+ std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory *CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
+ return new ::android::C2SoftGav1Factory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory *factory) {
+ ALOGV("in %s", __func__);
+ delete factory;
+}
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
new file mode 100644
index 0000000..a7c08bb
--- /dev/null
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_GAV1_DEC_H_
+#define ANDROID_C2_SOFT_GAV1_DEC_H_
+
+#include <SimpleC2Component.h>
+#include "libgav1/src/decoder.h"
+#include "libgav1/src/decoder_settings.h"
+
+#define GETTIME(a, b) gettimeofday(a, b);
+#define TIME_DIFF(start, end, diff) \
+ diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
+ ((end).tv_usec - (start).tv_usec);
+
+namespace android {
+
+struct C2SoftGav1Dec : public SimpleC2Component {
+ class IntfImpl;
+
+ C2SoftGav1Dec(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl);
+ ~C2SoftGav1Dec();
+
+ // Begin SimpleC2Component overrides.
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+ c2_status_t drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+ // End SimpleC2Component overrides.
+
+ private:
+ std::shared_ptr<IntfImpl> mIntf;
+ std::unique_ptr<libgav1::Decoder> mCodecCtx;
+
+ uint32_t mWidth;
+ uint32_t mHeight;
+ bool mSignalledOutputEos;
+ bool mSignalledError;
+
+ struct timeval mTimeStart; // Time at the start of decode()
+ struct timeval mTimeEnd; // Time at the end of decode()
+
+ bool initDecoder();
+ void destroyDecoder();
+ void finishWork(uint64_t index, const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2GraphicBlock>& block);
+ bool outputBuffer(const std::shared_ptr<C2BlockPool>& pool,
+ const std::unique_ptr<C2Work>& work);
+ c2_status_t drainInternal(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool,
+ const std::unique_ptr<C2Work>& work);
+
+ C2_DO_NOT_COPY(C2SoftGav1Dec);
+};
+
+} // namespace android
+
+#endif // ANDROID_C2_SOFT_GAV1_DEC_H_
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index a3ebadb..60ae93c 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -1309,69 +1309,84 @@
&ui_exec_done);
RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
- if (ui_exec_done != 1) {
- VOID* p_array; // ITTIAM:buffer to handle gain payload
- WORD32 buf_size = 0; // ITTIAM:gain payload length
- WORD32 bit_str_fmt = 1;
- WORD32 gain_stream_flag = 1;
-
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
- IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
- RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
-
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
- IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
- RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
-
- if (buf_size > 0) {
- /*Set bitstream_split_format */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
- IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
- memcpy(mDrcInBuf, p_array, buf_size);
- /* Set number of bytes to be processed */
- err_code =
- ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
- IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
- /* Execute process */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
- IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
- mMpegDDRCPresent = 1;
- }
- }
-
- /* How much buffer is used in input buffers */
+ int32_t num_preroll = 0;
err_code = ixheaacd_dec_api(mXheaacCodecHandle,
- IA_API_CMD_GET_CURIDX_INPUT_BUF,
- 0,
- bytesConsumed);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+ &num_preroll);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
- /* Get the output bytes */
- err_code = ixheaacd_dec_api(mXheaacCodecHandle,
- IA_API_CMD_GET_OUTPUT_BYTES,
- 0,
- outBytes);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+ {
+ int32_t preroll_frame_offset = 0;
- if (mMpegDDRCPresent == 1) {
- memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+ do {
+ if (ui_exec_done != 1) {
+ VOID* p_array; // ITTIAM:buffer to handle gain payload
+ WORD32 buf_size = 0; // ITTIAM:gain payload length
+ WORD32 bit_str_fmt = 1;
+ WORD32 gain_stream_flag = 1;
- err_code =
- ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
- RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
- memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+
+ if (buf_size > 0) {
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ memcpy(mDrcInBuf, p_array, buf_size);
+ /* Set number of bytes to be processed */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ mMpegDDRCPresent = 1;
+ }
+ }
+
+ /* How much buffer is used in input buffers */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CURIDX_INPUT_BUF,
+ 0,
+ bytesConsumed);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+ /* Get the output bytes */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_OUTPUT_BYTES,
+ 0,
+ outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+
+ if (mMpegDDRCPresent == 1) {
+ memcpy(mDrcInBuf, mOutputBuffer + preroll_frame_offset, *outBytes);
+ preroll_frame_offset += *outBytes;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+ memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+ }
+ num_preroll--;
+ } while (num_preroll > 0);
}
return IA_NO_ERROR;
}
diff --git a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
index a023a05..8c0d0a4 100644
--- a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
+++ b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
@@ -70,7 +70,7 @@
<< ".";
std::lock_guard<std::mutex> lock(mMutex);
- std::set<TrackedBuffer> &bufferIds =
+ std::set<TrackedBuffer*> &bufferIds =
mTrackedBuffersMap[listener][frameIndex];
for (size_t i = 0; i < input.buffers.size(); ++i) {
@@ -79,13 +79,14 @@
<< "Input buffer at index " << i << " is null.";
continue;
}
- const TrackedBuffer &bufferId =
- *bufferIds.emplace(listener, frameIndex, i, input.buffers[i]).
- first;
+ TrackedBuffer *bufferId =
+ new TrackedBuffer(listener, frameIndex, i, input.buffers[i]);
+ mTrackedBufferCache.emplace(bufferId);
+ bufferIds.emplace(bufferId);
c2_status_t status = input.buffers[i]->registerOnDestroyNotify(
onBufferDestroyed,
- const_cast<void*>(reinterpret_cast<const void*>(&bufferId)));
+ reinterpret_cast<void*>(bufferId));
if (status != C2_OK) {
LOG(DEBUG) << "InputBufferManager::_registerFrameData -- "
<< "registerOnDestroyNotify() failed "
@@ -119,31 +120,32 @@
auto findListener = mTrackedBuffersMap.find(listener);
if (findListener != mTrackedBuffersMap.end()) {
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+ std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
= findListener->second;
auto findFrameIndex = frameIndex2BufferIds.find(frameIndex);
if (findFrameIndex != frameIndex2BufferIds.end()) {
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- for (const TrackedBuffer& bufferId : bufferIds) {
- std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+ std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+ for (TrackedBuffer* bufferId : bufferIds) {
+ std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
if (buffer) {
c2_status_t status = buffer->unregisterOnDestroyNotify(
onBufferDestroyed,
- const_cast<void*>(
- reinterpret_cast<const void*>(&bufferId)));
+ reinterpret_cast<void*>(bufferId));
if (status != C2_OK) {
LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
<< "-- unregisterOnDestroyNotify() failed "
<< "(listener @ 0x"
<< std::hex
- << bufferId.listener.unsafe_get()
+ << bufferId->listener.unsafe_get()
<< ", frameIndex = "
- << std::dec << bufferId.frameIndex
- << ", bufferIndex = " << bufferId.bufferIndex
+ << std::dec << bufferId->frameIndex
+ << ", bufferIndex = " << bufferId->bufferIndex
<< ") => status = " << status
<< ".";
}
}
+ mTrackedBufferCache.erase(bufferId);
+ delete bufferId;
}
frameIndex2BufferIds.erase(findFrameIndex);
@@ -179,31 +181,32 @@
auto findListener = mTrackedBuffersMap.find(listener);
if (findListener != mTrackedBuffersMap.end()) {
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds =
+ std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds =
findListener->second;
for (auto findFrameIndex = frameIndex2BufferIds.begin();
findFrameIndex != frameIndex2BufferIds.end();
++findFrameIndex) {
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- for (const TrackedBuffer& bufferId : bufferIds) {
- std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+ std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+ for (TrackedBuffer* bufferId : bufferIds) {
+ std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
if (buffer) {
c2_status_t status = buffer->unregisterOnDestroyNotify(
onBufferDestroyed,
- const_cast<void*>(
- reinterpret_cast<const void*>(&bufferId)));
+ reinterpret_cast<void*>(bufferId));
if (status != C2_OK) {
LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
<< "-- unregisterOnDestroyNotify() failed "
<< "(listener @ 0x"
<< std::hex
- << bufferId.listener.unsafe_get()
+ << bufferId->listener.unsafe_get()
<< ", frameIndex = "
- << std::dec << bufferId.frameIndex
- << ", bufferIndex = " << bufferId.bufferIndex
+ << std::dec << bufferId->frameIndex
+ << ", bufferIndex = " << bufferId->bufferIndex
<< ") => status = " << status
<< ".";
}
+ mTrackedBufferCache.erase(bufferId);
+ delete bufferId;
}
}
}
@@ -236,50 +239,59 @@
<< std::dec << ".";
return;
}
- TrackedBuffer id(*reinterpret_cast<TrackedBuffer*>(arg));
+
+ std::lock_guard<std::mutex> lock(mMutex);
+ TrackedBuffer *bufferId = reinterpret_cast<TrackedBuffer*>(arg);
+
+ if (mTrackedBufferCache.find(bufferId) == mTrackedBufferCache.end()) {
+ LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
+ << "unregistered buffer: "
+ << "buf @ 0x" << std::hex << buf
+ << ", arg @ 0x" << std::hex << arg
+ << std::dec << ".";
+ return;
+ }
+
LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
<< "buf @ 0x" << std::hex << buf
<< ", arg @ 0x" << std::hex << arg
<< std::dec << " -- "
- << "listener @ 0x" << std::hex << id.listener.unsafe_get()
- << ", frameIndex = " << std::dec << id.frameIndex
- << ", bufferIndex = " << id.bufferIndex
+ << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+ << ", frameIndex = " << std::dec << bufferId->frameIndex
+ << ", bufferIndex = " << bufferId->bufferIndex
<< ".";
-
- std::lock_guard<std::mutex> lock(mMutex);
-
- auto findListener = mTrackedBuffersMap.find(id.listener);
+ auto findListener = mTrackedBuffersMap.find(bufferId->listener);
if (findListener == mTrackedBuffersMap.end()) {
- LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
- << "received invalid listener: "
- << "listener @ 0x" << std::hex << id.listener.unsafe_get()
- << " (frameIndex = " << std::dec << id.frameIndex
- << ", bufferIndex = " << id.bufferIndex
- << ").";
+ LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- "
+ << "received invalid listener: "
+ << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+ << " (frameIndex = " << std::dec << bufferId->frameIndex
+ << ", bufferIndex = " << bufferId->bufferIndex
+ << ").";
return;
}
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+ std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
= findListener->second;
- auto findFrameIndex = frameIndex2BufferIds.find(id.frameIndex);
+ auto findFrameIndex = frameIndex2BufferIds.find(bufferId->frameIndex);
if (findFrameIndex == frameIndex2BufferIds.end()) {
LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
<< "received invalid frame index: "
- << "frameIndex = " << id.frameIndex
- << " (listener @ 0x" << std::hex << id.listener.unsafe_get()
- << ", bufferIndex = " << std::dec << id.bufferIndex
+ << "frameIndex = " << bufferId->frameIndex
+ << " (listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+ << ", bufferIndex = " << std::dec << bufferId->bufferIndex
<< ").";
return;
}
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- auto findBufferId = bufferIds.find(id);
+ std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+ auto findBufferId = bufferIds.find(bufferId);
if (findBufferId == bufferIds.end()) {
LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
<< "received invalid buffer index: "
- << "bufferIndex = " << id.bufferIndex
- << " (frameIndex = " << id.frameIndex
- << ", listener @ 0x" << std::hex << id.listener.unsafe_get()
+ << "bufferIndex = " << bufferId->bufferIndex
+ << " (frameIndex = " << bufferId->frameIndex
+ << ", listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
<< std::dec << ").";
return;
}
@@ -292,10 +304,13 @@
}
}
- DeathNotifications &deathNotifications = mDeathNotifications[id.listener];
- deathNotifications.indices[id.frameIndex].emplace_back(id.bufferIndex);
+ DeathNotifications &deathNotifications = mDeathNotifications[bufferId->listener];
+ deathNotifications.indices[bufferId->frameIndex].emplace_back(bufferId->bufferIndex);
++deathNotifications.count;
mOnBufferDestroyed.notify_one();
+
+ mTrackedBufferCache.erase(bufferId);
+ delete bufferId;
}
// Notify the clients about buffer destructions.
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
index b6857d5..42fa557 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
@@ -196,13 +196,9 @@
frameIndex(frameIndex),
bufferIndex(bufferIndex),
buffer(buffer) {}
- TrackedBuffer(const TrackedBuffer&) = default;
- bool operator<(const TrackedBuffer& other) const {
- return bufferIndex < other.bufferIndex;
- }
};
- // Map: listener -> frameIndex -> set<TrackedBuffer>.
+ // Map: listener -> frameIndex -> set<TrackedBuffer*>.
// Essentially, this is used to store triples (listener, frameIndex,
// bufferIndex) that's searchable by listener and (listener, frameIndex).
// However, the value of the innermost map is TrackedBuffer, which also
@@ -210,7 +206,7 @@
// because onBufferDestroyed() needs to know listener and frameIndex too.
typedef std::map<wp<IComponentListener>,
std::map<uint64_t,
- std::set<TrackedBuffer>>> TrackedBuffersMap;
+ std::set<TrackedBuffer*>>> TrackedBuffersMap;
// Storage for pending (unsent) death notifications for one listener.
// Each pair in member named "indices" are (frameIndex, bufferIndex) from
@@ -247,6 +243,16 @@
// Mutex for the management of all input buffers.
std::mutex mMutex;
+ // Cache for all TrackedBuffers.
+ //
+ // Whenever registerOnDestroyNotify() is called, an argument of type
+ // TrackedBuffer is created and stored into this cache.
+ // Whenever unregisterOnDestroyNotify() or onBufferDestroyed() is called,
+ // the TrackedBuffer is removed from this cache.
+ //
+ // mTrackedBuffersMap stores references to TrackedBuffers inside this cache.
+ std::set<TrackedBuffer*> mTrackedBufferCache;
+
// Tracked input buffers.
TrackedBuffersMap mTrackedBuffersMap;
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 07dbf67..04fa59c 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -1434,6 +1434,11 @@
d->type = C2BaseBlock::GRAPHIC;
return true;
}
+ if (cHandle) {
+ // Though we got cloned handle, creating block failed.
+ native_handle_close(cHandle);
+ native_handle_delete(cHandle);
+ }
LOG(ERROR) << "Unknown handle type in BaseBlock::pooledBlock.";
return false;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 0cbf62b..0e1bb0a 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1080,8 +1080,7 @@
outputGeneration);
}
- if (oStreamFormat.value == C2BufferData::LINEAR
- && mComponentName.find("c2.qti.") == std::string::npos) {
+ if (oStreamFormat.value == C2BufferData::LINEAR) {
// WORKAROUND: if we're using early CSD workaround we convert to
// array mode, to appease apps assuming the output
// buffers to be of the same size.
@@ -1133,8 +1132,9 @@
}
C2StreamBufferTypeSetting::output oStreamFormat(0u);
- c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
- if (err != C2_OK) {
+ C2PrependHeaderModeSetting prepend(PREPEND_HEADER_TO_NONE);
+ c2_status_t err = mComponent->query({ &oStreamFormat, &prepend }, {}, C2_DONT_BLOCK, nullptr);
+ if (err != C2_OK && err != C2_BAD_INDEX) {
return UNKNOWN_ERROR;
}
size_t numInputSlots = mInput.lock()->numSlots;
@@ -1174,7 +1174,7 @@
mName, buffer->capacity(), config->size());
}
} else if (oStreamFormat.value == C2BufferData::LINEAR && i == 0
- && mComponentName.find("c2.qti.") == std::string::npos) {
+ && (!prepend || prepend.value == PREPEND_HEADER_TO_NONE)) {
// WORKAROUND: Some apps expect CSD available without queueing
// any input. Queue an empty buffer to get the CSD.
buffer->setRange(0, 0);
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 40160c7..7334834 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -629,7 +629,7 @@
// static
std::shared_ptr<C2Mapper::ProfileLevelMapper>
C2Mapper::GetProfileLevelMapper(std::string mediaType) {
- std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+ std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
if (mediaType == MIMETYPE_AUDIO_AAC) {
return std::make_shared<AacProfileLevelMapper>();
} else if (mediaType == MIMETYPE_VIDEO_AVC) {
@@ -657,7 +657,7 @@
// static
std::shared_ptr<C2Mapper::ProfileLevelMapper>
C2Mapper::GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus) {
- std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+ std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
if (mediaType == MIMETYPE_VIDEO_HEVC) {
return std::make_shared<HevcProfileLevelMapper>(true, isHdr10Plus);
} else if (mediaType == MIMETYPE_VIDEO_VP9) {
diff --git a/media/codec2/vndk/C2Buffer.cpp b/media/codec2/vndk/C2Buffer.cpp
index 710b536..2d99b53 100644
--- a/media/codec2/vndk/C2Buffer.cpp
+++ b/media/codec2/vndk/C2Buffer.cpp
@@ -413,17 +413,14 @@
std::shared_ptr<C2LinearAllocation> alloc;
if (C2AllocatorIon::isValid(cHandle)) {
- native_handle_t *handle = native_handle_clone(cHandle);
- if (handle) {
- c2_status_t err = sAllocator->priorLinearAllocation(handle, &alloc);
- const std::shared_ptr<C2PooledBlockPoolData> poolData =
- std::make_shared<C2PooledBlockPoolData>(data);
- if (err == C2_OK && poolData) {
- // TODO: config params?
- std::shared_ptr<C2LinearBlock> block =
- _C2BlockFactory::CreateLinearBlock(alloc, poolData);
- return block;
- }
+ c2_status_t err = sAllocator->priorLinearAllocation(cHandle, &alloc);
+ const std::shared_ptr<C2PooledBlockPoolData> poolData =
+ std::make_shared<C2PooledBlockPoolData>(data);
+ if (err == C2_OK && poolData) {
+ // TODO: config params?
+ std::shared_ptr<C2LinearBlock> block =
+ _C2BlockFactory::CreateLinearBlock(alloc, poolData);
+ return block;
}
}
return nullptr;
@@ -674,17 +671,14 @@
ResultStatus status = mBufferPoolManager->allocate(
mConnectionId, params, &cHandle, &bufferPoolData);
if (status == ResultStatus::OK) {
- native_handle_t *handle = native_handle_clone(cHandle);
- if (handle) {
- std::shared_ptr<C2LinearAllocation> alloc;
- std::shared_ptr<C2PooledBlockPoolData> poolData =
- std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
- c2_status_t err = mAllocator->priorLinearAllocation(handle, &alloc);
- if (err == C2_OK && poolData && alloc) {
- *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
- if (*block) {
- return C2_OK;
- }
+ std::shared_ptr<C2LinearAllocation> alloc;
+ std::shared_ptr<C2PooledBlockPoolData> poolData =
+ std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+ c2_status_t err = mAllocator->priorLinearAllocation(cHandle, &alloc);
+ if (err == C2_OK && poolData && alloc) {
+ *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
+ if (*block) {
+ return C2_OK;
}
}
return C2_NO_MEMORY;
@@ -710,19 +704,16 @@
ResultStatus status = mBufferPoolManager->allocate(
mConnectionId, params, &cHandle, &bufferPoolData);
if (status == ResultStatus::OK) {
- native_handle_t *handle = native_handle_clone(cHandle);
- if (handle) {
- std::shared_ptr<C2GraphicAllocation> alloc;
- std::shared_ptr<C2PooledBlockPoolData> poolData =
- std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
- c2_status_t err = mAllocator->priorGraphicAllocation(
- handle, &alloc);
- if (err == C2_OK && poolData && alloc) {
- *block = _C2BlockFactory::CreateGraphicBlock(
- alloc, poolData, C2Rect(width, height));
- if (*block) {
- return C2_OK;
- }
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ std::shared_ptr<C2PooledBlockPoolData> poolData =
+ std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+ c2_status_t err = mAllocator->priorGraphicAllocation(
+ cHandle, &alloc);
+ if (err == C2_OK && poolData && alloc) {
+ *block = _C2BlockFactory::CreateGraphicBlock(
+ alloc, poolData, C2Rect(width, height));
+ if (*block) {
+ return C2_OK;
}
}
return C2_NO_MEMORY;
@@ -1117,17 +1108,14 @@
std::shared_ptr<C2GraphicAllocation> alloc;
if (C2AllocatorGralloc::isValid(cHandle)) {
- native_handle_t *handle = native_handle_clone(cHandle);
- if (handle) {
- c2_status_t err = sAllocator->priorGraphicAllocation(handle, &alloc);
- const std::shared_ptr<C2PooledBlockPoolData> poolData =
- std::make_shared<C2PooledBlockPoolData>(data);
- if (err == C2_OK && poolData) {
- // TODO: config setup?
- std::shared_ptr<C2GraphicBlock> block =
- _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
- return block;
- }
+ c2_status_t err = sAllocator->priorGraphicAllocation(cHandle, &alloc);
+ const std::shared_ptr<C2PooledBlockPoolData> poolData =
+ std::make_shared<C2PooledBlockPoolData>(data);
+ if (err == C2_OK && poolData) {
+ // TODO: config setup?
+ std::shared_ptr<C2GraphicBlock> block =
+ _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
+ return block;
}
}
return nullptr;
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index f8afa7c..6b4ed35 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -849,6 +849,7 @@
emplace("libcodec2_soft_amrwbdec.so");
emplace("libcodec2_soft_amrwbenc.so");
emplace("libcodec2_soft_av1dec.so");
+ emplace("libcodec2_soft_gav1dec.so");
emplace("libcodec2_soft_avcdec.so");
emplace("libcodec2_soft_avcenc.so");
emplace("libcodec2_soft_flacdec.so");
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 1744d3d..38821fd 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -12,10 +12,10 @@
shared_libs: [
"liblog",
"libmediandk",
+ "libstagefright_flacdec",
],
static_libs: [
- "libstagefright_flacdec",
"libstagefright_foundation",
"libstagefright_metadatautils",
"libwebm",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 9d5890c..36cab1d 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -4993,8 +4993,11 @@
}
status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(
- off64_t offset, off64_t /* size */) {
+ off64_t offset, off64_t size) {
ALOGV("parseSampleAuxiliaryInformationSizes");
+ if (size < 9) {
+ return -EINVAL;
+ }
// 14496-12 8.7.12
uint8_t version;
if (mDataSource->readAt(
@@ -5007,25 +5010,32 @@
return ERROR_UNSUPPORTED;
}
offset++;
+ size--;
uint32_t flags;
if (!mDataSource->getUInt24(offset, &flags)) {
return ERROR_IO;
}
offset += 3;
+ size -= 3;
if (flags & 1) {
+ if (size < 13) {
+ return -EINVAL;
+ }
uint32_t tmp;
if (!mDataSource->getUInt32(offset, &tmp)) {
return ERROR_MALFORMED;
}
mCurrentAuxInfoType = tmp;
offset += 4;
+ size -= 4;
if (!mDataSource->getUInt32(offset, &tmp)) {
return ERROR_MALFORMED;
}
mCurrentAuxInfoTypeParameter = tmp;
offset += 4;
+ size -= 4;
}
uint8_t defsize;
@@ -5034,6 +5044,7 @@
}
mCurrentDefaultSampleInfoSize = defsize;
offset++;
+ size--;
uint32_t smplcnt;
if (!mDataSource->getUInt32(offset, &smplcnt)) {
@@ -5041,11 +5052,16 @@
}
mCurrentSampleInfoCount = smplcnt;
offset += 4;
-
+ size -= 4;
if (mCurrentDefaultSampleInfoSize != 0) {
ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize);
return OK;
}
+ if(smplcnt > size) {
+ ALOGW("b/124525515 - smplcnt(%u) > size(%ld)", (unsigned int)smplcnt, (unsigned long)size);
+ android_errorWriteLog(0x534e4554, "124525515");
+ return -EINVAL;
+ }
if (smplcnt > mCurrentSampleInfoAllocSize) {
uint8_t * newPtr = (uint8_t*) realloc(mCurrentSampleInfoSizes, smplcnt);
if (newPtr == NULL) {
@@ -5061,26 +5077,32 @@
}
status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(
- off64_t offset, off64_t /* size */) {
+ off64_t offset, off64_t size) {
ALOGV("parseSampleAuxiliaryInformationOffsets");
+ if (size < 8) {
+ return -EINVAL;
+ }
// 14496-12 8.7.13
uint8_t version;
if (mDataSource->readAt(offset, &version, sizeof(version)) != 1) {
return ERROR_IO;
}
offset++;
+ size--;
uint32_t flags;
if (!mDataSource->getUInt24(offset, &flags)) {
return ERROR_IO;
}
offset += 3;
+ size -= 3;
uint32_t entrycount;
if (!mDataSource->getUInt32(offset, &entrycount)) {
return ERROR_IO;
}
offset += 4;
+ size -= 4;
if (entrycount == 0) {
return OK;
}
@@ -5106,19 +5128,31 @@
for (size_t i = 0; i < entrycount; i++) {
if (version == 0) {
+ if (size < 4) {
+ ALOGW("b/124526959");
+ android_errorWriteLog(0x534e4554, "124526959");
+ return -EINVAL;
+ }
uint32_t tmp;
if (!mDataSource->getUInt32(offset, &tmp)) {
return ERROR_IO;
}
mCurrentSampleInfoOffsets[i] = tmp;
offset += 4;
+ size -= 4;
} else {
+ if (size < 8) {
+ ALOGW("b/124526959");
+ android_errorWriteLog(0x534e4554, "124526959");
+ return -EINVAL;
+ }
uint64_t tmp;
if (!mDataSource->getUInt64(offset, &tmp)) {
return ERROR_IO;
}
mCurrentSampleInfoOffsets[i] = tmp;
offset += 8;
+ size -= 8;
}
}
@@ -5405,20 +5439,30 @@
if (flags & kSampleSizePresent) {
bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
- sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
} else {
sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
+#ifdef VERY_VERY_VERBOSE_LOGGING
+ // We don't expect this, but also want to avoid spamming the log if
+ // we hit this case.
+ if (!(mTrackFragmentHeaderInfo.mFlags
+ & TrackFragmentHeaderInfo::kDefaultSampleSizePresent)) {
+ ALOGW("No sample size specified");
+ }
+#endif
}
if (flags & kSampleFlagsPresent) {
bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
- sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
} else {
sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
+#ifdef VERY_VERY_VERBOSE_LOGGING
+ // We don't expect this, but also want to avoid spamming the log if
+ // we hit this case.
+ if (!(mTrackFragmentHeaderInfo.mFlags
+ & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent)) {
+ ALOGW("No sample flags specified");
+ }
+#endif
}
if (flags & kSampleCompositionTimeOffsetPresent) {
@@ -5440,16 +5484,12 @@
// apply some sanity (vs strict legality) checks
//
- // clamp the count of entries in the trun box, to avoid spending forever parsing
- // this box. Clamping (vs error) lets us play *something*.
- // 1 million is about 400 msecs on a Pixel3, should be no more than a couple seconds
- // on the slowest devices.
- static constexpr uint32_t kMaxTrunSampleCount = 1000000;
+ static constexpr uint32_t kMaxTrunSampleCount = 10000;
if (sampleCount > kMaxTrunSampleCount) {
- ALOGW("b/123389881 clamp sampleCount(%u) @ kMaxTrunSampleCount(%u)",
+ ALOGW("b/123389881 sampleCount(%u) > kMaxTrunSampleCount(%u)",
sampleCount, kMaxTrunSampleCount);
android_errorWriteLog(0x534e4554, "124389881 count");
-
+ return -EINVAL;
}
}
@@ -5493,7 +5533,12 @@
tmp.duration = sampleDuration;
tmp.compositionOffset = sampleCtsOffset;
memset(tmp.iv, 0, sizeof(tmp.iv));
- mCurrentSamples.add(tmp);
+ if (mCurrentSamples.add(tmp) < 0) {
+ ALOGW("b/123389881 failed saving sample(n=%zu)", mCurrentSamples.size());
+ android_errorWriteLog(0x534e4554, "124389881 allocation");
+ mCurrentSamples.clear();
+ return NO_MEMORY;
+ }
dataOffset += sampleSize;
}
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index bf29bf1..e7e8901 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -391,20 +391,11 @@
}
mTimeToSampleCount = U32_AT(&header[4]);
- if (mTimeToSampleCount > UINT32_MAX / (2 * sizeof(uint32_t))) {
- // Choose this bound because
- // 1) 2 * sizeof(uint32_t) is the amount of memory needed for one
- // time-to-sample entry in the time-to-sample table.
- // 2) mTimeToSampleCount is the number of entries of the time-to-sample
- // table.
- // 3) We hope that the table size does not exceed UINT32_MAX.
+ if (mTimeToSampleCount > (data_size - 8) / (2 * sizeof(uint32_t))) {
ALOGE("Time-to-sample table size too large.");
return ERROR_OUT_OF_RANGE;
}
- // Note: At this point, we know that mTimeToSampleCount * 2 will not
- // overflow because of the above condition.
-
uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
mTotalSize += allocSize;
if (mTotalSize > kMaxTotalSize) {
@@ -540,6 +531,12 @@
}
uint64_t allocSize = (uint64_t)numSyncSamples * sizeof(uint32_t);
+ if (allocSize > data_size - 8) {
+ ALOGW("b/124771364 - allocSize(%lu) > size(%lu)",
+ (unsigned long)allocSize, (unsigned long)(data_size - 8));
+ android_errorWriteLog(0x534e4554, "124771364");
+ return ERROR_MALFORMED;
+ }
if (allocSize > kMaxTotalSize) {
ALOGE("Sync sample table size too large.");
return ERROR_OUT_OF_RANGE;
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 72b94bb..298dab1 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1062,8 +1062,15 @@
size_t size = buffer->range_length();
if (size < kOpusHeaderSize
- || memcmp(data, "OpusHead", 8)
- || /* version = */ data[8] != 1) {
+ || memcmp(data, "OpusHead", 8)) {
+ return AMEDIA_ERROR_MALFORMED;
+ }
+ // allow both version 0 and 1. Per the opus specification:
+ // An earlier draft of the specification described a version 0, but the only difference
+ // between version 1 and version 0 is that version 0 did not specify the semantics for
+ // handling the version field
+ if ( /* version = */ data[8] > 1) {
+ ALOGW("no support for opus version %d", data[8]);
return AMEDIA_ERROR_MALFORMED;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index fb276c2..52eadd4 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -36,7 +36,6 @@
#include "binding/AAudioStreamConfiguration.h"
#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
-#include "core/AudioGlobal.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
#include "utility/AudioClock.h"
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
index 0e1dfac..cff72fd 100644
--- a/media/libaudioclient/AudioProductStrategy.cpp
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -70,6 +70,7 @@
return NO_ERROR;
}
+// Keep in sync with android/media/audiopolicy/AudioProductStrategy#attributeMatches
bool AudioProductStrategy::attributesMatches(const audio_attributes_t refAttributes,
const audio_attributes_t clientAttritubes)
{
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
deleted file mode 100644
index 783eef3..0000000
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_MIXER_H
-#define ANDROID_AUDIO_MIXER_H
-
-#include <map>
-#include <pthread.h>
-#include <sstream>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unordered_map>
-#include <vector>
-
-#include <android/os/IExternalVibratorService.h>
-#include <media/AudioBufferProvider.h>
-#include <media/AudioResampler.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/BufferProviders.h>
-#include <system/audio.h>
-#include <utils/Compat.h>
-#include <utils/threads.h>
-
-// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
-#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
-
-// This must match frameworks/av/services/audioflinger/Configuration.h
-#define FLOAT_AUX
-
-namespace android {
-
-namespace NBLog {
-class Writer;
-} // namespace NBLog
-
-// ----------------------------------------------------------------------------
-
-class AudioMixer
-{
-public:
- // Do not change these unless underlying code changes.
- // This mixer has a hard-coded upper limit of 8 channels for output.
- static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
- static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
- // maximum number of channels supported for the content
- static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
-
- static const uint16_t UNITY_GAIN_INT = 0x1000;
- static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
-
- enum { // names
- // setParameter targets
- TRACK = 0x3000,
- RESAMPLE = 0x3001,
- RAMP_VOLUME = 0x3002, // ramp to new volume
- VOLUME = 0x3003, // don't ramp
- TIMESTRETCH = 0x3004,
-
- // set Parameter names
- // for target TRACK
- CHANNEL_MASK = 0x4000,
- FORMAT = 0x4001,
- MAIN_BUFFER = 0x4002,
- AUX_BUFFER = 0x4003,
- DOWNMIX_TYPE = 0X4004,
- MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
- // for haptic
- HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
- HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
- // for target RESAMPLE
- SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
- // parameter 'value' is the new sample rate in Hz.
- // Only creates a sample rate converter the first time that
- // the track sample rate is different from the mix sample rate.
- // If the new sample rate is the same as the mix sample rate,
- // and a sample rate converter already exists,
- // then the sample rate converter remains present but is a no-op.
- RESET = 0x4101, // Reset sample rate converter without changing sample rate.
- // This clears out the resampler's input buffer.
- REMOVE = 0x4102, // Remove the sample rate converter on this track name;
- // the track is restored to the mix sample rate.
- // for target RAMP_VOLUME and VOLUME (8 channels max)
- // FIXME use float for these 3 to improve the dynamic range
- VOLUME0 = 0x4200,
- VOLUME1 = 0x4201,
- AUXLEVEL = 0x4210,
- // for target TIMESTRETCH
- PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
- // parameter 'value' is a pointer to the new playback rate.
- };
-
- typedef enum { // Haptic intensity, should keep consistent with VibratorService
- HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
- HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
- HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
- HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
- HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
- HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
- } haptic_intensity_t;
- static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
- static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
- static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
-
- static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
- switch (hapticIntensity) {
- case HAPTIC_SCALE_MUTE:
- case HAPTIC_SCALE_VERY_LOW:
- case HAPTIC_SCALE_LOW:
- case HAPTIC_SCALE_NONE:
- case HAPTIC_SCALE_HIGH:
- case HAPTIC_SCALE_VERY_HIGH:
- return true;
- default:
- return false;
- }
- }
-
- AudioMixer(size_t frameCount, uint32_t sampleRate)
- : mSampleRate(sampleRate)
- , mFrameCount(frameCount) {
- pthread_once(&sOnceControl, &sInitRoutine);
- }
-
- // Create a new track in the mixer.
- //
- // \param name a unique user-provided integer associated with the track.
- // If name already exists, the function will abort.
- // \param channelMask output channel mask.
- // \param format PCM format
- // \param sessionId Session id for the track. Tracks with the same
- // session id will be submixed together.
- //
- // \return OK on success.
- // BAD_VALUE if the format does not satisfy isValidFormat()
- // or the channelMask does not satisfy isValidChannelMask().
- status_t create(
- int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
-
- bool exists(int name) const {
- return mTracks.count(name) > 0;
- }
-
- // Free an allocated track by name.
- void destroy(int name);
-
- // Enable or disable an allocated track by name
- void enable(int name);
- void disable(int name);
-
- void setParameter(int name, int target, int param, void *value);
-
- void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-
- void process() {
- for (const auto &pair : mTracks) {
- // Clear contracted buffer before processing if contracted channels are saved
- const std::shared_ptr<Track> &t = pair.second;
- if (t->mKeepContractedChannels) {
- t->clearContractedBuffer();
- }
- }
- (this->*mHook)();
- processHapticData();
- }
-
- size_t getUnreleasedFrames(int name) const;
-
- std::string trackNames() const {
- std::stringstream ss;
- for (const auto &pair : mTracks) {
- ss << pair.first << " ";
- }
- return ss.str();
- }
-
- void setNBLogWriter(NBLog::Writer *logWriter) {
- mNBLogWriter = logWriter;
- }
-
- static inline bool isValidFormat(audio_format_t format) {
- switch (format) {
- case AUDIO_FORMAT_PCM_8_BIT:
- case AUDIO_FORMAT_PCM_16_BIT:
- case AUDIO_FORMAT_PCM_24_BIT_PACKED:
- case AUDIO_FORMAT_PCM_32_BIT:
- case AUDIO_FORMAT_PCM_FLOAT:
- return true;
- default:
- return false;
- }
- }
-
- static inline bool isValidChannelMask(audio_channel_mask_t channelMask) {
- return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
- }
-
-private:
-
- /* For multi-format functions (calls template functions
- * in AudioMixerOps.h). The template parameters are as follows:
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-
- enum {
- // FIXME this representation permits up to 8 channels
- NEEDS_CHANNEL_COUNT__MASK = 0x00000007,
- };
-
- enum {
- NEEDS_CHANNEL_1 = 0x00000000, // mono
- NEEDS_CHANNEL_2 = 0x00000001, // stereo
-
- // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
-
- NEEDS_MUTE = 0x00000100,
- NEEDS_RESAMPLE = 0x00001000,
- NEEDS_AUX = 0x00010000,
- };
-
- // hook types
- enum {
- PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
- };
-
- enum {
- TRACKTYPE_NOP,
- TRACKTYPE_RESAMPLE,
- TRACKTYPE_NORESAMPLE,
- TRACKTYPE_NORESAMPLEMONO,
- };
-
- // process hook functionality
- using process_hook_t = void(AudioMixer::*)();
-
- struct Track;
- using hook_t = void(Track::*)(int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
-
- struct Track {
- Track()
- : bufferProvider(nullptr)
- {
- // TODO: move additional initialization here.
- }
-
- ~Track()
- {
- // bufferProvider, mInputBufferProvider need not be deleted.
- mResampler.reset(nullptr);
- // Ensure the order of destruction of buffer providers as they
- // release the upstream provider in the destructor.
- mTimestretchBufferProvider.reset(nullptr);
- mPostDownmixReformatBufferProvider.reset(nullptr);
- mDownmixerBufferProvider.reset(nullptr);
- mReformatBufferProvider.reset(nullptr);
- mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
- mAdjustChannelsBufferProvider.reset(nullptr);
- }
-
- bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
- bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
- bool doesResample() const { return mResampler.get() != nullptr; }
- void resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
- void adjustVolumeRamp(bool aux, bool useFloat = false);
- size_t getUnreleasedFrames() const { return mResampler.get() != nullptr ?
- mResampler->getUnreleasedFrames() : 0; };
-
- status_t prepareForDownmix();
- void unprepareForDownmix();
- status_t prepareForReformat();
- void unprepareForReformat();
- status_t prepareForAdjustChannels();
- void unprepareForAdjustChannels();
- status_t prepareForAdjustChannelsNonDestructive(size_t frames);
- void unprepareForAdjustChannelsNonDestructive();
- void clearContractedBuffer();
- bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
- void reconfigureBufferProviders();
-
- static hook_t getTrackHook(int trackType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
- void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
- template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
- typename TO, typename TI, typename TA>
- void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
-
- uint32_t needs;
-
- // TODO: Eventually remove legacy integer volume settings
- union {
- int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
- int32_t volumeRL;
- };
-
- int32_t prevVolume[MAX_NUM_VOLUMES];
- int32_t volumeInc[MAX_NUM_VOLUMES];
- int32_t auxInc;
- int32_t prevAuxLevel;
- int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
-
- uint16_t frameCount;
-
- uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
- uint8_t unused_padding; // formerly format, was always 16
- uint16_t enabled; // actually bool
- audio_channel_mask_t channelMask;
-
- // actual buffer provider used by the track hooks, see DownmixerBufferProvider below
- // for how the Track buffer provider is wrapped by another one when dowmixing is required
- AudioBufferProvider* bufferProvider;
-
- mutable AudioBufferProvider::Buffer buffer; // 8 bytes
-
- hook_t hook;
- const void *mIn; // current location in buffer
-
- std::unique_ptr<AudioResampler> mResampler;
- uint32_t sampleRate;
- int32_t* mainBuffer;
- int32_t* auxBuffer;
-
- /* Buffer providers are constructed to translate the track input data as needed.
- *
- * TODO: perhaps make a single PlaybackConverterProvider class to move
- * all pre-mixer track buffer conversions outside the AudioMixer class.
- *
- * 1) mInputBufferProvider: The AudioTrack buffer provider.
- * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
- * channel format to another. Expanded channels are filled with zeros and put at the end
- * of each audio frame. Contracted channels are copied to the end of the buffer.
- * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
- * This is currently using at audio-haptic coupled playback to separate audio and haptic
- * data. Contracted channels could be written to given buffer.
- * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
- * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
- * requires reformat. For example, it may convert floating point input to
- * PCM_16_bit if that's required by the downmixer.
- * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
- * the number of channels required by the mixer sink.
- * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
- * the downmixer requirements to the mixer engine input requirements.
- * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
- */
- AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- // TODO: combine mAdjustChannelsBufferProvider and
- // mContractChannelsNonDestructiveBufferProvider
- std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
-
- int32_t sessionId;
-
- audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- audio_format_t mFormat; // input track format
- audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- // each track must be converted to this format.
- audio_format_t mDownmixRequiresFormat; // required downmixer format
- // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
- // AUDIO_FORMAT_INVALID if no required format
-
- float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
- float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
- float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment
-
- float mAuxLevel; // floating point set aux level
- float mPrevAuxLevel; // floating point prev aux level
- float mAuxInc; // floating point aux increment
-
- audio_channel_mask_t mMixerChannelMask;
- uint32_t mMixerChannelCount;
-
- AudioPlaybackRate mPlaybackRate;
-
- // Haptic
- bool mHapticPlaybackEnabled;
- haptic_intensity_t mHapticIntensity;
- audio_channel_mask_t mHapticChannelMask;
- uint32_t mHapticChannelCount;
- audio_channel_mask_t mMixerHapticChannelMask;
- uint32_t mMixerHapticChannelCount;
- uint32_t mAdjustInChannelCount;
- uint32_t mAdjustOutChannelCount;
- uint32_t mAdjustNonDestructiveInChannelCount;
- uint32_t mAdjustNonDestructiveOutChannelCount;
- bool mKeepContractedChannels;
-
- float getHapticScaleGamma() const {
- // Need to keep consistent with the value in VibratorService.
- switch (mHapticIntensity) {
- case HAPTIC_SCALE_VERY_LOW:
- return 2.0f;
- case HAPTIC_SCALE_LOW:
- return 1.5f;
- case HAPTIC_SCALE_HIGH:
- return 0.5f;
- case HAPTIC_SCALE_VERY_HIGH:
- return 0.25f;
- default:
- return 1.0f;
- }
- }
-
- float getHapticMaxAmplitudeRatio() const {
- // Need to keep consistent with the value in VibratorService.
- switch (mHapticIntensity) {
- case HAPTIC_SCALE_VERY_LOW:
- return HAPTIC_SCALE_VERY_LOW_RATIO;
- case HAPTIC_SCALE_LOW:
- return HAPTIC_SCALE_LOW_RATIO;
- case HAPTIC_SCALE_NONE:
- case HAPTIC_SCALE_HIGH:
- case HAPTIC_SCALE_VERY_HIGH:
- return 1.0f;
- default:
- return 0.0f;
- }
- }
-
- private:
- // hooks
- void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
- void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
- void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
-
- // multi-format track hooks
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
- };
-
- // TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
- static constexpr int BLOCKSIZE = 16;
-
- bool setChannelMasks(int name,
- audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
-
- // Called when track info changes and a new process hook should be determined.
- void invalidate() {
- mHook = &AudioMixer::process__validate;
- }
-
- void process__validate();
- void process__nop();
- void process__genericNoResampling();
- void process__genericResampling();
- void process__oneTrack16BitsStereoNoResampling();
-
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- void process__noResampleOneTrack();
-
- void processHapticData();
-
- static process_hook_t getProcessHook(int processType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
- static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
- void *in, audio_format_t mixerInFormat, size_t sampleCount);
-
- static void sInitRoutine();
-
- // initialization constants
- const uint32_t mSampleRate;
- const size_t mFrameCount;
-
- NBLog::Writer *mNBLogWriter = nullptr; // associated NBLog::Writer
-
- process_hook_t mHook = &AudioMixer::process__nop; // one of process__*, never nullptr
-
- // the size of the type (int32_t) should be the largest of all types supported
- // by the mixer.
- std::unique_ptr<int32_t[]> mOutputTemp;
- std::unique_ptr<int32_t[]> mResampleTemp;
-
- // track names grouped by main buffer, in no particular order of main buffer.
- // however names for a particular main buffer are in order (by construction).
- std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
-
- // track names that are enabled, in increasing order (by construction).
- std::vector<int /* name */> mEnabled;
-
- // track smart pointers, by name, in increasing order of name.
- std::map<int /* name */, std::shared_ptr<Track>> mTracks;
-
- static pthread_once_t sOnceControl; // initialized in constructor by first new
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 24837e3..7469976 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -67,9 +67,9 @@
// keyAudioLanguagePreferred: Preferred audio language
static const char * const keyAudioLanguagePreferred;
- // keyStreamConnect / Disconnect: value is an int in audio_devices_t
- static const char * const keyStreamConnect;
- static const char * const keyStreamDisconnect;
+ // keyDeviceConnect / Disconnect: value is an int in audio_devices_t
+ static const char * const keyDeviceConnect;
+ static const char * const keyDeviceDisconnect;
// For querying stream capabilities. All the returned values are lists.
// keyStreamSupportedFormats: audio_format_t
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
new file mode 100644
index 0000000..5045d87
--- /dev/null
+++ b/media/libaudiofoundation/Android.bp
@@ -0,0 +1,33 @@
+cc_library_headers {
+ name: "libaudiofoundation_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
+cc_library_shared {
+ name: "libaudiofoundation",
+ vendor_available: true,
+
+ srcs: [
+ "AudioGain.cpp",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libbinder",
+ "liblog",
+ "libutils",
+ ],
+
+ header_libs: [
+ "libaudio_system_headers",
+ "libaudiofoundation_headers",
+ ],
+
+ export_header_lib_headers: ["libaudiofoundation_headers"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
new file mode 100644
index 0000000..9d1d6db
--- /dev/null
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioGain"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include <android-base/stringprintf.h>
+#include <media/AudioGain.h>
+#include <utils/Log.h>
+
+#include <math.h>
+
+namespace android {
+
+AudioGain::AudioGain(int index, bool useInChannelMask)
+{
+ mIndex = index;
+ mUseInChannelMask = useInChannelMask;
+ memset(&mGain, 0, sizeof(struct audio_gain));
+}
+
+void AudioGain::getDefaultConfig(struct audio_gain_config *config)
+{
+ config->index = mIndex;
+ config->mode = mGain.mode;
+ config->channel_mask = mGain.channel_mask;
+ if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+ config->values[0] = mGain.default_value;
+ } else {
+ uint32_t numValues;
+ if (mUseInChannelMask) {
+ numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
+ } else {
+ numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
+ }
+ for (size_t i = 0; i < numValues; i++) {
+ config->values[i] = mGain.default_value;
+ }
+ }
+ if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+ config->ramp_duration_ms = mGain.min_ramp_ms;
+ }
+}
+
+status_t AudioGain::checkConfig(const struct audio_gain_config *config)
+{
+ if ((config->mode & ~mGain.mode) != 0) {
+ return BAD_VALUE;
+ }
+ if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+ if ((config->values[0] < mGain.min_value) ||
+ (config->values[0] > mGain.max_value)) {
+ return BAD_VALUE;
+ }
+ } else {
+ if ((config->channel_mask & ~mGain.channel_mask) != 0) {
+ return BAD_VALUE;
+ }
+ uint32_t numValues;
+ if (mUseInChannelMask) {
+ numValues = audio_channel_count_from_in_mask(config->channel_mask);
+ } else {
+ numValues = audio_channel_count_from_out_mask(config->channel_mask);
+ }
+ for (size_t i = 0; i < numValues; i++) {
+ if ((config->values[i] < mGain.min_value) ||
+ (config->values[i] > mGain.max_value)) {
+ return BAD_VALUE;
+ }
+ }
+ }
+ if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+ if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
+ (config->ramp_duration_ms > mGain.max_ramp_ms)) {
+ return BAD_VALUE;
+ }
+ }
+ return NO_ERROR;
+}
+
+void AudioGain::dump(std::string *dst, int spaces, int index) const
+{
+ dst->append(base::StringPrintf("%*sGain %d:\n", spaces, "", index+1));
+ dst->append(base::StringPrintf("%*s- mode: %08x\n", spaces, "", mGain.mode));
+ dst->append(base::StringPrintf("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask));
+ dst->append(base::StringPrintf("%*s- min_value: %d mB\n", spaces, "", mGain.min_value));
+ dst->append(base::StringPrintf("%*s- max_value: %d mB\n", spaces, "", mGain.max_value));
+ dst->append(base::StringPrintf("%*s- default_value: %d mB\n", spaces, "", mGain.default_value));
+ dst->append(base::StringPrintf("%*s- step_value: %d mB\n", spaces, "", mGain.step_value));
+ dst->append(base::StringPrintf("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms));
+ dst->append(base::StringPrintf("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms));
+}
+
+status_t AudioGain::writeToParcel(android::Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeInt32(mIndex)) != NO_ERROR) return status;
+ if ((status = parcel->writeBool(mUseInChannelMask)) != NO_ERROR) return status;
+ if ((status = parcel->writeBool(mUseForVolume)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.mode)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.channel_mask)) != NO_ERROR) return status;
+ if ((status = parcel->writeInt32(mGain.min_value)) != NO_ERROR) return status;
+ if ((status = parcel->writeInt32(mGain.max_value)) != NO_ERROR) return status;
+ if ((status = parcel->writeInt32(mGain.default_value)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.step_value)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.min_ramp_ms)) != NO_ERROR) return status;
+ status = parcel->writeUint32(mGain.max_ramp_ms);
+ return status;
+}
+
+status_t AudioGain::readFromParcel(const android::Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->readInt32(&mIndex)) != NO_ERROR) return status;
+ if ((status = parcel->readBool(&mUseInChannelMask)) != NO_ERROR) return status;
+ if ((status = parcel->readBool(&mUseForVolume)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+ if ((status = parcel->readInt32(&mGain.min_value)) != NO_ERROR) return status;
+ if ((status = parcel->readInt32(&mGain.max_value)) != NO_ERROR) return status;
+ if ((status = parcel->readInt32(&mGain.default_value)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.step_value)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.min_ramp_ms)) != NO_ERROR) return status;
+ status = parcel->readUint32(&mGain.max_ramp_ms);
+ return status;
+}
+
+status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeUint64(this->size())) != NO_ERROR) return status;
+ for (const auto &audioGain : *this) {
+ if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
+ break;
+ }
+ }
+ return status;
+}
+
+status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
+ status_t status = NO_ERROR;
+ uint64_t count;
+ if ((status = parcel->readUint64(&count)) != NO_ERROR) return status;
+ for (uint64_t i = 0; i < count; i++) {
+ sp<AudioGain> audioGain = new AudioGain(0, false);
+ if ((status = parcel->readParcelable(audioGain.get())) != NO_ERROR) {
+ this->clear();
+ break;
+ }
+ this->push_back(audioGain);
+ }
+ return status;
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
similarity index 84%
rename from services/audiopolicy/common/managerdefinitions/include/AudioGain.h
rename to media/libaudiofoundation/include/media/AudioGain.h
index 4af93e1..6a7fb55 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,15 +16,17 @@
#pragma once
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
-#include <utils/String8.h>
#include <system/audio.h>
+#include <string>
#include <vector>
namespace android {
-class AudioGain: public RefBase
+class AudioGain: public RefBase, public Parcelable
{
public:
AudioGain(int index, bool useInChannelMask);
@@ -55,7 +57,7 @@
int getMaxRampInMs() const { return mGain.max_ramp_ms; }
// TODO: remove dump from here (split serialization)
- void dump(String8 *dst, int spaces, int index) const;
+ void dump(std::string *dst, int spaces, int index) const;
void getDefaultConfig(struct audio_gain_config *config);
status_t checkConfig(const struct audio_gain_config *config);
@@ -65,6 +67,9 @@
const struct audio_gain &getGain() const { return mGain; }
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
private:
int mIndex;
struct audio_gain mGain;
@@ -72,7 +77,7 @@
bool mUseForVolume = false;
};
-class AudioGains : public std::vector<sp<AudioGain> >
+class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
{
public:
bool canUseForVolume() const
@@ -90,6 +95,9 @@
push_back(gain);
return 0;
}
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
};
} // namespace android
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 584c2c0..9803473 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -13,12 +13,6 @@
],
shared_libs: [
- "android.hardware.audio.effect@2.0",
- "android.hardware.audio.effect@4.0",
- "android.hardware.audio.effect@5.0",
- "android.hardware.audio@2.0",
- "android.hardware.audio@4.0",
- "android.hardware.audio@5.0",
"libaudiohal@2.0",
"libaudiohal@4.0",
"libaudiohal@5.0",
@@ -26,7 +20,8 @@
],
header_libs: [
- "libaudiohal_headers"
+ "libaudiohal_headers",
+ "libbase_headers",
]
}
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index f86009c..d5336fa 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -14,26 +14,16 @@
* limitations under the License.
*/
-#include <android/hardware/audio/2.0/IDevicesFactory.h>
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <android/hardware/audio/5.0/IDevicesFactory.h>
-
#include <libaudiohal/FactoryHalHidl.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+
namespace android {
// static
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
- if (hardware::audio::V5_0::IDevicesFactory::getService() != nullptr) {
- return V5_0::createDevicesFactoryHal();
- }
- if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
- return V4_0::createDevicesFactoryHal();
- }
- if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
- return V2_0::createDevicesFactoryHal();
- }
- return nullptr;
+ return createPreferedImpl<DevicesFactoryHalInterface>();
}
} // namespace android
+
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index bd3ef61..d15b14e 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,26 +14,15 @@
* limitations under the License.
*/
-#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/5.0/IEffectsFactory.h>
-
#include <libaudiohal/FactoryHalHidl.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
namespace android {
// static
sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
- if (hardware::audio::effect::V5_0::IEffectsFactory::getService() != nullptr) {
- return effect::V5_0::createEffectsFactoryHal();
- }
- if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
- return effect::V4_0::createEffectsFactoryHal();
- }
- if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
- return effect::V2_0::createEffectsFactoryHal();
- }
- return nullptr;
+ return createPreferedImpl<EffectsFactoryHalInterface>();
}
// static
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index b25f82e..b07f21d 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -322,6 +322,14 @@
const struct audio_port_config *sinks,
audio_patch_handle_t *patch) {
if (mDevice == 0) return NO_INIT;
+ if (patch == nullptr) return BAD_VALUE;
+
+ if (*patch != AUDIO_PATCH_HANDLE_NONE) {
+ status_t status = releaseAudioPatch(*patch);
+ ALOGW_IF(status != NO_ERROR, "%s error %d releasing patch handle %d",
+ __func__, status, *patch);
+ }
+
hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 5e01e42..1335a0c 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -35,13 +35,10 @@
namespace android {
namespace CPP_VERSION {
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
- if (!defaultFactory) {
- ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
- exit(1);
- }
- mDeviceFactories.push_back(defaultFactory);
+DevicesFactoryHalHidl::DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory) {
+ ALOG_ASSERT(devicesFactory != nullptr, "Provided IDevicesFactory service is NULL");
+
+ mDeviceFactories.push_back(devicesFactory);
if (MAJOR_VERSION >= 4) {
// The MSD factory is optional and only available starting at HAL 4.0
sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 27e0649..8775e7b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -32,18 +32,14 @@
class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
{
public:
+ DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory);
+
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
private:
- friend class DevicesFactoryHalHybrid;
-
std::vector<sp<IDevicesFactory>> mDeviceFactories;
- // Can not be constructed directly by clients.
- DevicesFactoryHalHidl();
-
virtual ~DevicesFactoryHalHidl() = default;
};
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index f337a8b..0e1f1bb 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -17,16 +17,17 @@
#define LOG_TAG "DevicesFactoryHalHybrid"
//#define LOG_NDEBUG 0
+#include "DevicesFactoryHalHidl.h"
#include "DevicesFactoryHalHybrid.h"
#include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
+#include <libaudiohal/FactoryHalHidl.h>
namespace android {
namespace CPP_VERSION {
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory)
: mLocalFactory(new DevicesFactoryHalLocal()),
- mHidlFactory(new DevicesFactoryHalHidl()) {
+ mHidlFactory(new DevicesFactoryHalHidl(hidlFactory)) {
}
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
@@ -36,6 +37,12 @@
}
return mLocalFactory->openDevice(name, device);
}
-
} // namespace CPP_VERSION
+
+template <>
+sp<DevicesFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+ auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
+ return service ? new CPP_VERSION::DevicesFactoryHalHybrid(service) : nullptr;
+}
+
} // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 5ac0d0d..545bb70 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -17,17 +17,20 @@
#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
+#include PATH(android/hardware/audio/FILE_VERSION/IDevicesFactory.h)
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
+
namespace android {
namespace CPP_VERSION {
class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
{
public:
- DevicesFactoryHalHybrid();
+ DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory);
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
@@ -38,10 +41,6 @@
sp<DevicesFactoryHalInterface> mHidlFactory;
};
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
- return new DevicesFactoryHalHybrid();
-}
-
} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 7fd6bde..ba7b195 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -19,11 +19,12 @@
#include <cutils/native_handle.h>
-#include "EffectsFactoryHalHidl.h"
#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
#include "HidlUtils.h"
+#include <libaudiohal/FactoryHalHidl.h>
using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::Return;
@@ -35,12 +36,10 @@
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
- mEffectsFactory = IEffectsFactory::getService();
- if (mEffectsFactory == 0) {
- ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
- exit(1);
- }
+EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
+ : ConversionHelperHidl("EffectsFactory") {
+ ALOG_ASSERT(effectsFactory != nullptr, "Provided IDevicesFactory service is NULL");
+ mEffectsFactory = effectsFactory;
}
status_t EffectsFactoryHalHidl::queryAllDescriptors() {
@@ -147,4 +146,11 @@
} // namespace CPP_VERSION
} // namespace effect
+
+template<>
+sp<EffectsFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+ auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
+ return service ? new effect::CPP_VERSION::EffectsFactoryHalHidl(service) : nullptr;
+}
+
} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 01178ff..2828513 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,7 +18,6 @@
#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
-#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include "ConversionHelperHidl.h"
@@ -34,7 +33,7 @@
class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
{
public:
- EffectsFactoryHalHidl();
+ EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory);
// Returns the number of different effects in all loaded libraries.
virtual status_t queryNumberEffects(uint32_t *pNumEffects);
@@ -66,10 +65,6 @@
status_t queryAllDescriptors();
};
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
- return new EffectsFactoryHalHidl();
-}
-
} // namespace CPP_VERSION
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index c7319d0..829f99c 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -23,33 +23,42 @@
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <utils/StrongPointer.h>
+#include <array>
+#include <utility>
+
namespace android {
-namespace effect {
-namespace V2_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V2_0
+/** Supported HAL versions, in order of preference.
+ * Implementation should use specialize the `create*FactoryHal` for their version.
+ * Client should use `createPreferedImpl<*FactoryHal>()` to instantiate
+ * the preferred available impl.
+ */
+enum class AudioHALVersion {
+ V5_0,
+ V4_0,
+ V2_0,
+ end, // used for iterating over supported versions
+};
-namespace V4_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V4_0
+/** Template function to fully specialized for each version and each Interface. */
+template <AudioHALVersion, class Interface>
+sp<Interface> createFactoryHal();
-namespace V5_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V5_0
-} // namespace effect
+/** @Return the preferred available implementation or nullptr if none are available. */
+template <class Interface, AudioHALVersion version = AudioHALVersion{}>
+static sp<Interface> createPreferedImpl() {
+ if constexpr (version == AudioHALVersion::end) {
+ return nullptr; // tried all version, all returned nullptr
+ } else {
+ if (auto created = createFactoryHal<version, Interface>(); created != nullptr) {
+ return created;
+ }
-namespace V2_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V2_0
+ using Raw = std::underlying_type_t<AudioHALVersion>; // cast as enum class do not support ++
+ return createPreferedImpl<Interface, AudioHALVersion(Raw(version) + 1)>();
+ }
+}
-namespace V4_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V4_0
-
-namespace V5_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V5_0
} // namespace android
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index cb78063..e8aa700 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -3,20 +3,13 @@
export_include_dirs: ["include"],
+ header_libs: ["libaudioclient_headers"],
+
shared_libs: [
- "libaudiohal",
"libaudioutils",
"libcutils",
"liblog",
- "libnbaio",
- "libnblog",
- "libsonic",
"libutils",
- "libvibrator",
- ],
-
- header_libs: [
- "libbase_headers",
],
cflags: [
@@ -33,18 +26,31 @@
defaults: ["libaudioprocessing_defaults"],
srcs: [
+ "AudioMixer.cpp",
"BufferProviders.cpp",
"RecordBufferConverter.cpp",
],
- whole_static_libs: ["libaudioprocessing_arm"],
+
+ header_libs: [
+ "libbase_headers",
+ ],
+
+ shared_libs: [
+ "libaudiohal",
+ "libsonic",
+ "libvibrator",
+ ],
+
+ whole_static_libs: ["libaudioprocessing_base"],
}
cc_library_static {
- name: "libaudioprocessing_arm",
+ name: "libaudioprocessing_base",
defaults: ["libaudioprocessing_defaults"],
+ vendor_available: true,
srcs: [
- "AudioMixer.cpp",
+ "AudioMixerBase.cpp",
"AudioResampler.cpp",
"AudioResamplerCubic.cpp",
"AudioResamplerSinc.cpp",
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f7cc096..c0b11a4 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "AudioMixer"
//#define LOG_NDEBUG 0
+#include <sstream>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
@@ -27,9 +28,6 @@
#include <utils/Errors.h>
#include <utils/Log.h>
-#include <cutils/compiler.h>
-#include <utils/Debug.h>
-
#include <system/audio.h>
#include <audio_utils/primitives.h>
@@ -58,138 +56,15 @@
#define ALOGVV(a...) do { } while (0)
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
-// original code will be used for stereo sinks, the new mixer for multichannel.
-static constexpr bool kUseNewMixer = true;
-
-// Set kUseFloat to true to allow floating input into the mixer engine.
-// If kUseNewMixer is false, this is ignored or may be overridden internally
-// because of downmix/upmix support.
-static constexpr bool kUseFloat = true;
-
-#ifdef FLOAT_AUX
-using TYPE_AUX = float;
-static_assert(kUseNewMixer && kUseFloat,
- "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
-#else
-using TYPE_AUX = int32_t; // q4.27
-#endif
-
// Set to default copy buffer size in frames for input processing.
-static const size_t kCopyBufferFrameCount = 256;
+static constexpr size_t kCopyBufferFrameCount = 256;
namespace android {
// ----------------------------------------------------------------------------
-static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
- return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-}
-
-status_t AudioMixer::create(
- int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
-{
- LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
-
- if (!isValidChannelMask(channelMask)) {
- ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
- return BAD_VALUE;
- }
- if (!isValidFormat(format)) {
- ALOGE("%s invalid format: %#x", __func__, format);
- return BAD_VALUE;
- }
-
- auto t = std::make_shared<Track>();
- {
- // TODO: move initialization to the Track constructor.
- // assume default parameters for the track, except where noted below
- t->needs = 0;
-
- // Integer volume.
- // Currently integer volume is kept for the legacy integer mixer.
- // Will be removed when the legacy mixer path is removed.
- t->volume[0] = 0;
- t->volume[1] = 0;
- t->prevVolume[0] = 0 << 16;
- t->prevVolume[1] = 0 << 16;
- t->volumeInc[0] = 0;
- t->volumeInc[1] = 0;
- t->auxLevel = 0;
- t->auxInc = 0;
- t->prevAuxLevel = 0;
-
- // Floating point volume.
- t->mVolume[0] = 0.f;
- t->mVolume[1] = 0.f;
- t->mPrevVolume[0] = 0.f;
- t->mPrevVolume[1] = 0.f;
- t->mVolumeInc[0] = 0.;
- t->mVolumeInc[1] = 0.;
- t->mAuxLevel = 0.;
- t->mAuxInc = 0.;
- t->mPrevAuxLevel = 0.;
-
- // no initialization needed
- // t->frameCount
- t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
- t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
- channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
- t->channelCount = audio_channel_count_from_out_mask(channelMask);
- t->enabled = false;
- ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
- "Non-stereo channel mask: %d\n", channelMask);
- t->channelMask = channelMask;
- t->sessionId = sessionId;
- // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
- t->bufferProvider = NULL;
- t->buffer.raw = NULL;
- // no initialization needed
- // t->buffer.frameCount
- t->hook = NULL;
- t->mIn = NULL;
- t->sampleRate = mSampleRate;
- // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
- t->mainBuffer = NULL;
- t->auxBuffer = NULL;
- t->mInputBufferProvider = NULL;
- t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
- t->mFormat = format;
- t->mMixerInFormat = selectMixerInFormat(format);
- t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
- t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
- AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
- t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
- t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
- // haptic
- t->mHapticPlaybackEnabled = false;
- t->mHapticIntensity = HAPTIC_SCALE_NONE;
- t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
- t->mMixerHapticChannelCount = 0;
- t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
- t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
- t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
- t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
- t->mKeepContractedChannels = false;
- // Check the downmixing (or upmixing) requirements.
- status_t status = t->prepareForDownmix();
- if (status != OK) {
- ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
- return BAD_VALUE;
- }
- // prepareForDownmix() may change mDownmixRequiresFormat
- ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
- t->prepareForReformat();
- t->prepareForAdjustChannelsNonDestructive(mFrameCount);
- t->prepareForAdjustChannels();
-
- mTracks[name] = t;
- return OK;
- }
+bool AudioMixer::isValidChannelMask(audio_channel_mask_t channelMask) const {
+ return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
}
// Called when channel masks have changed for a track name
@@ -198,7 +73,7 @@
bool AudioMixer::setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- const std::shared_ptr<Track> &track = mTracks[name];
+ const std::shared_ptr<Track> &track = getTrack(name);
if (trackChannelMask == (track->channelMask | track->mHapticChannelMask)
&& mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
@@ -255,14 +130,8 @@
track->prepareForAdjustChannelsNonDestructive(mFrameCount);
track->prepareForAdjustChannels();
- if (track->mResampler.get() != nullptr) {
- // resampler channels may have changed.
- const uint32_t resetToSampleRate = track->sampleRate;
- track->mResampler.reset(nullptr);
- track->sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
- // recreate the resampler with updated format, channels, saved sampleRate.
- track->setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
- }
+ // Resampler channels may have changed.
+ track->recreateResampler(mSampleRate);
return true;
}
@@ -477,171 +346,10 @@
}
}
-void AudioMixer::destroy(int name)
-{
- LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- ALOGV("deleteTrackName(%d)", name);
-
- if (mTracks[name]->enabled) {
- invalidate();
- }
- mTracks.erase(name); // deallocate track
-}
-
-void AudioMixer::enable(int name)
-{
- LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- const std::shared_ptr<Track> &track = mTracks[name];
-
- if (!track->enabled) {
- track->enabled = true;
- ALOGV("enable(%d)", name);
- invalidate();
- }
-}
-
-void AudioMixer::disable(int name)
-{
- LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- const std::shared_ptr<Track> &track = mTracks[name];
-
- if (track->enabled) {
- track->enabled = false;
- ALOGV("disable(%d)", name);
- invalidate();
- }
-}
-
-/* Sets the volume ramp variables for the AudioMixer.
- *
- * The volume ramp variables are used to transition from the previous
- * volume to the set volume. ramp controls the duration of the transition.
- * Its value is typically one state framecount period, but may also be 0,
- * meaning "immediate."
- *
- * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
- * even if there is a nonzero floating point increment (in that case, the volume
- * change is immediate). This restriction should be changed when the legacy mixer
- * is removed (see #2).
- * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
- * when no longer needed.
- *
- * @param newVolume set volume target in floating point [0.0, 1.0].
- * @param ramp number of frames to increment over. if ramp is 0, the volume
- * should be set immediately. Currently ramp should not exceed 65535 (frames).
- * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
- * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
- * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
- * @param pSetVolume pointer to the float target volume, set on return.
- * @param pPrevVolume pointer to the float previous volume, set on return.
- * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
- * @return true if the volume has changed, false if volume is same.
- */
-static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
- int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
- float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
- // check floating point volume to see if it is identical to the previously
- // set volume.
- // We do not use a tolerance here (and reject changes too small)
- // as it may be confusing to use a different value than the one set.
- // If the resulting volume is too small to ramp, it is a direct set of the volume.
- if (newVolume == *pSetVolume) {
- return false;
- }
- if (newVolume < 0) {
- newVolume = 0; // should not have negative volumes
- } else {
- switch (fpclassify(newVolume)) {
- case FP_SUBNORMAL:
- case FP_NAN:
- newVolume = 0;
- break;
- case FP_ZERO:
- break; // zero volume is fine
- case FP_INFINITE:
- // Infinite volume could be handled consistently since
- // floating point math saturates at infinities,
- // but we limit volume to unity gain float.
- // ramp = 0; break;
- //
- newVolume = AudioMixer::UNITY_GAIN_FLOAT;
- break;
- case FP_NORMAL:
- default:
- // Floating point does not have problems with overflow wrap
- // that integer has. However, we limit the volume to
- // unity gain here.
- // TODO: Revisit the volume limitation and perhaps parameterize.
- if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
- newVolume = AudioMixer::UNITY_GAIN_FLOAT;
- }
- break;
- }
- }
-
- // set floating point volume ramp
- if (ramp != 0) {
- // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
- // is no computational mismatch; hence equality is checked here.
- ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
- " prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
- const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
- // could be inf, cannot be nan, subnormal
- const float maxv = std::max(newVolume, *pPrevVolume);
-
- if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
- && maxv + inc != maxv) { // inc must make forward progress
- *pVolumeInc = inc;
- // ramp is set now.
- // Note: if newVolume is 0, then near the end of the ramp,
- // it may be possible that the ramped volume may be subnormal or
- // temporarily negative by a small amount or subnormal due to floating
- // point inaccuracies.
- } else {
- ramp = 0; // ramp not allowed
- }
- }
-
- // compute and check integer volume, no need to check negative values
- // The integer volume is limited to "unity_gain" to avoid wrapping and other
- // audio artifacts, so it never reaches the range limit of U4.28.
- // We safely use signed 16 and 32 bit integers here.
- const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
- const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
- AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
-
- // set integer volume ramp
- if (ramp != 0) {
- // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
- // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
- // is no computational mismatch; hence equality is checked here.
- ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
- " prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
- const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
-
- if (inc != 0) { // inc must make forward progress
- *pIntVolumeInc = inc;
- } else {
- ramp = 0; // ramp not allowed
- }
- }
-
- // if no ramp, or ramp not allowed, then clear float and integer increments
- if (ramp == 0) {
- *pVolumeInc = 0;
- *pPrevVolume = newVolume;
- *pIntVolumeInc = 0;
- *pIntPrevVolume = intVolume << 16;
- }
- *pSetVolume = newVolume;
- *pIntSetVolume = intVolume;
- return true;
-}
-
void AudioMixer::setParameter(int name, int target, int param, void *value)
{
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- const std::shared_ptr<Track> &track = mTracks[name];
+ const std::shared_ptr<Track> &track = getTrack(name);
int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
@@ -670,11 +378,7 @@
}
break;
case AUX_BUFFER:
- if (track->auxBuffer != valueBuf) {
- track->auxBuffer = valueBuf;
- ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
- invalidate();
- }
+ AudioMixerBase::setParameter(name, target, param, value);
break;
case FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
@@ -730,127 +434,38 @@
break;
case RESAMPLE:
- switch (param) {
- case SAMPLE_RATE:
- ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
- if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
- ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
- uint32_t(valueInt));
- invalidate();
- }
- break;
- case RESET:
- track->resetResampler();
- invalidate();
- break;
- case REMOVE:
- track->mResampler.reset(nullptr);
- track->sampleRate = mSampleRate;
- invalidate();
- break;
- default:
- LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
- }
- break;
-
case RAMP_VOLUME:
case VOLUME:
+ AudioMixerBase::setParameter(name, target, param, value);
+ break;
+ case TIMESTRETCH:
switch (param) {
- case AUXLEVEL:
- if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mFrameCount : 0,
- &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
- &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
- ALOGV("setParameter(%s, AUXLEVEL: %04x)",
- target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
- invalidate();
+ case PLAYBACK_RATE: {
+ const AudioPlaybackRate *playbackRate =
+ reinterpret_cast<AudioPlaybackRate*>(value);
+ ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
+ "bad parameters speed %f, pitch %f",
+ playbackRate->mSpeed, playbackRate->mPitch);
+ if (track->setPlaybackRate(*playbackRate)) {
+ ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
+ "%f %f %d %d",
+ playbackRate->mSpeed,
+ playbackRate->mPitch,
+ playbackRate->mStretchMode,
+ playbackRate->mFallbackMode);
+ // invalidate(); (should not require reconfigure)
}
- break;
+ } break;
default:
- if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
- if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mFrameCount : 0,
- &track->volume[param - VOLUME0],
- &track->prevVolume[param - VOLUME0],
- &track->volumeInc[param - VOLUME0],
- &track->mVolume[param - VOLUME0],
- &track->mPrevVolume[param - VOLUME0],
- &track->mVolumeInc[param - VOLUME0])) {
- ALOGV("setParameter(%s, VOLUME%d: %04x)",
- target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
- track->volume[param - VOLUME0]);
- invalidate();
- }
- } else {
- LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
- }
+ LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
}
break;
- case TIMESTRETCH:
- switch (param) {
- case PLAYBACK_RATE: {
- const AudioPlaybackRate *playbackRate =
- reinterpret_cast<AudioPlaybackRate*>(value);
- ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
- "bad parameters speed %f, pitch %f",
- playbackRate->mSpeed, playbackRate->mPitch);
- if (track->setPlaybackRate(*playbackRate)) {
- ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
- "%f %f %d %d",
- playbackRate->mSpeed,
- playbackRate->mPitch,
- playbackRate->mStretchMode,
- playbackRate->mFallbackMode);
- // invalidate(); (should not require reconfigure)
- }
- } break;
- default:
- LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
- }
- break;
default:
LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
}
}
-bool AudioMixer::Track::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
-{
- if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
- if (sampleRate != trackSampleRate) {
- sampleRate = trackSampleRate;
- if (mResampler.get() == nullptr) {
- ALOGV("Creating resampler from track %d Hz to device %d Hz",
- trackSampleRate, devSampleRate);
- AudioResampler::src_quality quality;
- // force lowest quality level resampler if use case isn't music or video
- // FIXME this is flawed for dynamic sample rates, as we choose the resampler
- // quality level based on the initial ratio, but that could change later.
- // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
- if (isMusicRate(trackSampleRate)) {
- quality = AudioResampler::DEFAULT_QUALITY;
- } else {
- quality = AudioResampler::DYN_LOW_QUALITY;
- }
-
- // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
- // but if none exists, it is the channel count (1 for mono).
- const int resamplerChannelCount = mDownmixerBufferProvider.get() != nullptr
- ? mMixerChannelCount : channelCount;
- ALOGVV("Creating resampler:"
- " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
- mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
- mResampler.reset(AudioResampler::create(
- mMixerInFormat,
- resamplerChannelCount,
- devSampleRate, quality));
- }
- return true;
- }
- }
- return false;
-}
-
bool AudioMixer::Track::setPlaybackRate(const AudioPlaybackRate &playbackRate)
{
if ((mTimestretchBufferProvider.get() == nullptr &&
@@ -863,8 +478,7 @@
if (mTimestretchBufferProvider.get() == nullptr) {
// TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
// but if none exists, it is the channel count (1 for mono).
- const int timestretchChannelCount = mDownmixerBufferProvider.get() != nullptr
- ? mMixerChannelCount : channelCount;
+ const int timestretchChannelCount = getOutputChannelCount();
mTimestretchBufferProvider.reset(new TimestretchBufferProvider(timestretchChannelCount,
mMixerInFormat, sampleRate, playbackRate));
reconfigureBufferProviders();
@@ -875,84 +489,10 @@
return true;
}
-/* Checks to see if the volume ramp has completed and clears the increment
- * variables appropriately.
- *
- * FIXME: There is code to handle int/float ramp variable switchover should it not
- * complete within a mixer buffer processing call, but it is preferred to avoid switchover
- * due to precision issues. The switchover code is included for legacy code purposes
- * and can be removed once the integer volume is removed.
- *
- * It is not sufficient to clear only the volumeInc integer variable because
- * if one channel requires ramping, all channels are ramped.
- *
- * There is a bit of duplicated code here, but it keeps backward compatibility.
- */
-inline void AudioMixer::Track::adjustVolumeRamp(bool aux, bool useFloat)
-{
- if (useFloat) {
- for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
- if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
- (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
- volumeInc[i] = 0;
- prevVolume[i] = volume[i] << 16;
- mVolumeInc[i] = 0.;
- mPrevVolume[i] = mVolume[i];
- } else {
- //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
- prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
- }
- }
- } else {
- for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
- if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
- ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
- volumeInc[i] = 0;
- prevVolume[i] = volume[i] << 16;
- mVolumeInc[i] = 0.;
- mPrevVolume[i] = mVolume[i];
- } else {
- //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
- mPrevVolume[i] = float_from_u4_28(prevVolume[i]);
- }
- }
- }
-
- if (aux) {
-#ifdef FLOAT_AUX
- if (useFloat) {
- if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
- (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
- auxInc = 0;
- prevAuxLevel = auxLevel << 16;
- mAuxInc = 0.f;
- mPrevAuxLevel = mAuxLevel;
- }
- } else
-#endif
- if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
- (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
- auxInc = 0;
- prevAuxLevel = auxLevel << 16;
- mAuxInc = 0.f;
- mPrevAuxLevel = mAuxLevel;
- }
- }
-}
-
-size_t AudioMixer::getUnreleasedFrames(int name) const
-{
- const auto it = mTracks.find(name);
- if (it != mTracks.end()) {
- return it->second->getUnreleasedFrames();
- }
- return 0;
-}
-
void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
{
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
- const std::shared_ptr<Track> &track = mTracks[name];
+ const std::shared_ptr<Track> &track = getTrack(name);
if (track->mInputBufferProvider == bufferProvider) {
return; // don't reset any buffer providers if identical.
@@ -976,679 +516,6 @@
track->reconfigureBufferProviders();
}
-void AudioMixer::process__validate()
-{
- // TODO: fix all16BitsStereNoResample logic to
- // either properly handle muted tracks (it should ignore them)
- // or remove altogether as an obsolete optimization.
- bool all16BitsStereoNoResample = true;
- bool resampling = false;
- bool volumeRamp = false;
-
- mEnabled.clear();
- mGroups.clear();
- for (const auto &pair : mTracks) {
- const int name = pair.first;
- const std::shared_ptr<Track> &t = pair.second;
- if (!t->enabled) continue;
-
- mEnabled.emplace_back(name); // we add to mEnabled in order of name.
- mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
-
- uint32_t n = 0;
- // FIXME can overflow (mask is only 3 bits)
- n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
- if (t->doesResample()) {
- n |= NEEDS_RESAMPLE;
- }
- if (t->auxLevel != 0 && t->auxBuffer != NULL) {
- n |= NEEDS_AUX;
- }
-
- if (t->volumeInc[0]|t->volumeInc[1]) {
- volumeRamp = true;
- } else if (!t->doesResample() && t->volumeRL == 0) {
- n |= NEEDS_MUTE;
- }
- t->needs = n;
-
- if (n & NEEDS_MUTE) {
- t->hook = &Track::track__nop;
- } else {
- if (n & NEEDS_AUX) {
- all16BitsStereoNoResample = false;
- }
- if (n & NEEDS_RESAMPLE) {
- all16BitsStereoNoResample = false;
- resampling = true;
- t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
- t->mMixerInFormat, t->mMixerFormat);
- ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix + resample", name);
- } else {
- if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
- t->hook = Track::getTrackHook(
- (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
- && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
- ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
- t->mMixerChannelCount,
- t->mMixerInFormat, t->mMixerFormat);
- all16BitsStereoNoResample = false;
- }
- if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
- t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
- t->mMixerInFormat, t->mMixerFormat);
- ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix", name);
- }
- }
- }
- }
-
- // select the processing hooks
- mHook = &AudioMixer::process__nop;
- if (mEnabled.size() > 0) {
- if (resampling) {
- if (mOutputTemp.get() == nullptr) {
- mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
- }
- if (mResampleTemp.get() == nullptr) {
- mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
- }
- mHook = &AudioMixer::process__genericResampling;
- } else {
- // we keep temp arrays around.
- mHook = &AudioMixer::process__genericNoResampling;
- if (all16BitsStereoNoResample && !volumeRamp) {
- if (mEnabled.size() == 1) {
- const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
- if ((t->needs & NEEDS_MUTE) == 0) {
- // The check prevents a muted track from acquiring a process hook.
- //
- // This is dangerous if the track is MONO as that requires
- // special case handling due to implicit channel duplication.
- // Stereo or Multichannel should actually be fine here.
- mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
- }
- }
- }
- }
- }
-
- ALOGV("mixer configuration change: %zu "
- "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
- mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
-
- process();
-
- // Now that the volume ramp has been done, set optimal state and
- // track hooks for subsequent mixer process
- if (mEnabled.size() > 0) {
- bool allMuted = true;
-
- for (const int name : mEnabled) {
- const std::shared_ptr<Track> &t = mTracks[name];
- if (!t->doesResample() && t->volumeRL == 0) {
- t->needs |= NEEDS_MUTE;
- t->hook = &Track::track__nop;
- } else {
- allMuted = false;
- }
- }
- if (allMuted) {
- mHook = &AudioMixer::process__nop;
- } else if (all16BitsStereoNoResample) {
- if (mEnabled.size() == 1) {
- //const int i = 31 - __builtin_clz(enabledTracks);
- const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
- // Muted single tracks handled by allMuted above.
- mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
- }
- }
- }
-}
-
-void AudioMixer::Track::track__genericResample(
- int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
-{
- ALOGVV("track__genericResample\n");
- mResampler->setSampleRate(sampleRate);
-
- // ramp gain - resample to temp buffer and scale/mix in 2nd step
- if (aux != NULL) {
- // always resample with unity gain when sending to auxiliary buffer to be able
- // to apply send level after resampling
- mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
- mResampler->resample(temp, outFrameCount, bufferProvider);
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
- volumeRampStereo(out, outFrameCount, temp, aux);
- } else {
- volumeStereo(out, outFrameCount, temp, aux);
- }
- } else {
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
- mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
- mResampler->resample(temp, outFrameCount, bufferProvider);
- volumeRampStereo(out, outFrameCount, temp, aux);
- }
-
- // constant gain
- else {
- mResampler->setVolume(mVolume[0], mVolume[1]);
- mResampler->resample(out, outFrameCount, bufferProvider);
- }
- }
-}
-
-void AudioMixer::Track::track__nop(int32_t* out __unused,
- size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
-{
-}
-
-void AudioMixer::Track::volumeRampStereo(
- int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
- int32_t vl = prevVolume[0];
- int32_t vr = prevVolume[1];
- const int32_t vlInc = volumeInc[0];
- const int32_t vrInc = volumeInc[1];
-
- //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- // ramp volume
- if (CC_UNLIKELY(aux != NULL)) {
- int32_t va = prevAuxLevel;
- const int32_t vaInc = auxInc;
- int32_t l;
- int32_t r;
-
- do {
- l = (*temp++ >> 12);
- r = (*temp++ >> 12);
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * r;
- *aux++ += (va >> 17) * (l + r);
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
- prevAuxLevel = va;
- } else {
- do {
- *out++ += (vl >> 16) * (*temp++ >> 12);
- *out++ += (vr >> 16) * (*temp++ >> 12);
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
- }
- prevVolume[0] = vl;
- prevVolume[1] = vr;
- adjustVolumeRamp(aux != NULL);
-}
-
-void AudioMixer::Track::volumeStereo(
- int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
- const int16_t vl = volume[0];
- const int16_t vr = volume[1];
-
- if (CC_UNLIKELY(aux != NULL)) {
- const int16_t va = auxLevel;
- do {
- int16_t l = (int16_t)(*temp++ >> 12);
- int16_t r = (int16_t)(*temp++ >> 12);
- out[0] = mulAdd(l, vl, out[0]);
- int16_t a = (int16_t)(((int32_t)l + r) >> 1);
- out[1] = mulAdd(r, vr, out[1]);
- out += 2;
- aux[0] = mulAdd(a, va, aux[0]);
- aux++;
- } while (--frameCount);
- } else {
- do {
- int16_t l = (int16_t)(*temp++ >> 12);
- int16_t r = (int16_t)(*temp++ >> 12);
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(r, vr, out[1]);
- out += 2;
- } while (--frameCount);
- }
-}
-
-void AudioMixer::Track::track__16BitsStereo(
- int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
- ALOGVV("track__16BitsStereo\n");
- const int16_t *in = static_cast<const int16_t *>(mIn);
-
- if (CC_UNLIKELY(aux != NULL)) {
- int32_t l;
- int32_t r;
- // ramp gain
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
- int32_t vl = prevVolume[0];
- int32_t vr = prevVolume[1];
- int32_t va = prevAuxLevel;
- const int32_t vlInc = volumeInc[0];
- const int32_t vrInc = volumeInc[1];
- const int32_t vaInc = auxInc;
- // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- l = (int32_t)*in++;
- r = (int32_t)*in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * r;
- *aux++ += (va >> 17) * (l + r);
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
-
- prevVolume[0] = vl;
- prevVolume[1] = vr;
- prevAuxLevel = va;
- adjustVolumeRamp(true);
- }
-
- // constant gain
- else {
- const uint32_t vrl = volumeRL;
- const int16_t va = (int16_t)auxLevel;
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
- in += 2;
- out[0] = mulAddRL(1, rl, vrl, out[0]);
- out[1] = mulAddRL(0, rl, vrl, out[1]);
- out += 2;
- aux[0] = mulAdd(a, va, aux[0]);
- aux++;
- } while (--frameCount);
- }
- } else {
- // ramp gain
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
- int32_t vl = prevVolume[0];
- int32_t vr = prevVolume[1];
- const int32_t vlInc = volumeInc[0];
- const int32_t vrInc = volumeInc[1];
-
- // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- *out++ += (vl >> 16) * (int32_t) *in++;
- *out++ += (vr >> 16) * (int32_t) *in++;
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
-
- prevVolume[0] = vl;
- prevVolume[1] = vr;
- adjustVolumeRamp(false);
- }
-
- // constant gain
- else {
- const uint32_t vrl = volumeRL;
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- out[0] = mulAddRL(1, rl, vrl, out[0]);
- out[1] = mulAddRL(0, rl, vrl, out[1]);
- out += 2;
- } while (--frameCount);
- }
- }
- mIn = in;
-}
-
-void AudioMixer::Track::track__16BitsMono(
- int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
- ALOGVV("track__16BitsMono\n");
- const int16_t *in = static_cast<int16_t const *>(mIn);
-
- if (CC_UNLIKELY(aux != NULL)) {
- // ramp gain
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
- int32_t vl = prevVolume[0];
- int32_t vr = prevVolume[1];
- int32_t va = prevAuxLevel;
- const int32_t vlInc = volumeInc[0];
- const int32_t vrInc = volumeInc[1];
- const int32_t vaInc = auxInc;
-
- // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- int32_t l = *in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * l;
- *aux++ += (va >> 16) * l;
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
-
- prevVolume[0] = vl;
- prevVolume[1] = vr;
- prevAuxLevel = va;
- adjustVolumeRamp(true);
- }
- // constant gain
- else {
- const int16_t vl = volume[0];
- const int16_t vr = volume[1];
- const int16_t va = (int16_t)auxLevel;
- do {
- int16_t l = *in++;
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(l, vr, out[1]);
- out += 2;
- aux[0] = mulAdd(l, va, aux[0]);
- aux++;
- } while (--frameCount);
- }
- } else {
- // ramp gain
- if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
- int32_t vl = prevVolume[0];
- int32_t vr = prevVolume[1];
- const int32_t vlInc = volumeInc[0];
- const int32_t vrInc = volumeInc[1];
-
- // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- int32_t l = *in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * l;
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
-
- prevVolume[0] = vl;
- prevVolume[1] = vr;
- adjustVolumeRamp(false);
- }
- // constant gain
- else {
- const int16_t vl = volume[0];
- const int16_t vr = volume[1];
- do {
- int16_t l = *in++;
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(l, vr, out[1]);
- out += 2;
- } while (--frameCount);
- }
- }
- mIn = in;
-}
-
-// no-op case
-void AudioMixer::process__nop()
-{
- ALOGVV("process__nop\n");
-
- for (const auto &pair : mGroups) {
- // process by group of tracks with same output buffer to
- // avoid multiple memset() on same buffer
- const auto &group = pair.second;
-
- const std::shared_ptr<Track> &t = mTracks[group[0]];
- memset(t->mainBuffer, 0,
- mFrameCount * audio_bytes_per_frame(
- t->mMixerChannelCount + t->mMixerHapticChannelCount, t->mMixerFormat));
-
- // now consume data
- for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
- size_t outFrames = mFrameCount;
- while (outFrames) {
- t->buffer.frameCount = outFrames;
- t->bufferProvider->getNextBuffer(&t->buffer);
- if (t->buffer.raw == NULL) break;
- outFrames -= t->buffer.frameCount;
- t->bufferProvider->releaseBuffer(&t->buffer);
- }
- }
- }
-}
-
-// generic code without resampling
-void AudioMixer::process__genericNoResampling()
-{
- ALOGVV("process__genericNoResampling\n");
- int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
-
- for (const auto &pair : mGroups) {
- // process by group of tracks with same output main buffer to
- // avoid multiple memset() on same buffer
- const auto &group = pair.second;
-
- // acquire buffer
- for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
- t->buffer.frameCount = mFrameCount;
- t->bufferProvider->getNextBuffer(&t->buffer);
- t->frameCount = t->buffer.frameCount;
- t->mIn = t->buffer.raw;
- }
-
- int32_t *out = (int *)pair.first;
- size_t numFrames = 0;
- do {
- const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
- memset(outTemp, 0, sizeof(outTemp));
- for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
- int32_t *aux = NULL;
- if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
- aux = t->auxBuffer + numFrames;
- }
- for (int outFrames = frameCount; outFrames > 0; ) {
- // t->in == nullptr can happen if the track was flushed just after having
- // been enabled for mixing.
- if (t->mIn == nullptr) {
- break;
- }
- size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
- if (inFrames > 0) {
- (t.get()->*t->hook)(
- outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
- inFrames, mResampleTemp.get() /* naked ptr */, aux);
- t->frameCount -= inFrames;
- outFrames -= inFrames;
- if (CC_UNLIKELY(aux != NULL)) {
- aux += inFrames;
- }
- }
- if (t->frameCount == 0 && outFrames) {
- t->bufferProvider->releaseBuffer(&t->buffer);
- t->buffer.frameCount = (mFrameCount - numFrames) -
- (frameCount - outFrames);
- t->bufferProvider->getNextBuffer(&t->buffer);
- t->mIn = t->buffer.raw;
- if (t->mIn == nullptr) {
- break;
- }
- t->frameCount = t->buffer.frameCount;
- }
- }
- }
-
- const std::shared_ptr<Track> &t1 = mTracks[group[0]];
- convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
- frameCount * t1->mMixerChannelCount);
- // TODO: fix ugly casting due to choice of out pointer type
- out = reinterpret_cast<int32_t*>((uint8_t*)out
- + frameCount * t1->mMixerChannelCount
- * audio_bytes_per_sample(t1->mMixerFormat));
- numFrames += frameCount;
- } while (numFrames < mFrameCount);
-
- // release each track's buffer
- for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
- t->bufferProvider->releaseBuffer(&t->buffer);
- }
- }
-}
-
-// generic code with resampling
-void AudioMixer::process__genericResampling()
-{
- ALOGVV("process__genericResampling\n");
- int32_t * const outTemp = mOutputTemp.get(); // naked ptr
- size_t numFrames = mFrameCount;
-
- for (const auto &pair : mGroups) {
- const auto &group = pair.second;
- const std::shared_ptr<Track> &t1 = mTracks[group[0]];
-
- // clear temp buffer
- memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
- for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
- int32_t *aux = NULL;
- if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
- aux = t->auxBuffer;
- }
-
- // this is a little goofy, on the resampling case we don't
- // acquire/release the buffers because it's done by
- // the resampler.
- if (t->needs & NEEDS_RESAMPLE) {
- (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
- } else {
-
- size_t outFrames = 0;
-
- while (outFrames < numFrames) {
- t->buffer.frameCount = numFrames - outFrames;
- t->bufferProvider->getNextBuffer(&t->buffer);
- t->mIn = t->buffer.raw;
- // t->mIn == nullptr can happen if the track was flushed just after having
- // been enabled for mixing.
- if (t->mIn == nullptr) break;
-
- (t.get()->*t->hook)(
- outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
- mResampleTemp.get() /* naked ptr */,
- aux != nullptr ? aux + outFrames : nullptr);
- outFrames += t->buffer.frameCount;
-
- t->bufferProvider->releaseBuffer(&t->buffer);
- }
- }
- }
- convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
- outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
- }
-}
-
-// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__oneTrack16BitsStereoNoResampling()
-{
- ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
- LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
- "%zu != 1 tracks enabled", mEnabled.size());
- const int name = mEnabled[0];
- const std::shared_ptr<Track> &t = mTracks[name];
-
- AudioBufferProvider::Buffer& b(t->buffer);
-
- int32_t* out = t->mainBuffer;
- float *fout = reinterpret_cast<float*>(out);
- size_t numFrames = mFrameCount;
-
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
- const uint32_t vrl = t->volumeRL;
- while (numFrames) {
- b.frameCount = numFrames;
- t->bufferProvider->getNextBuffer(&b);
- const int16_t *in = b.i16;
-
- // in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (in == NULL || (((uintptr_t)in) & 3)) {
- if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
- memset((char*)fout, 0, numFrames
- * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
- } else {
- memset((char*)out, 0, numFrames
- * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
- }
- ALOGE_IF((((uintptr_t)in) & 3),
- "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
- " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
- in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
- return;
- }
- size_t outFrames = b.frameCount;
-
- switch (t->mMixerFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl);
- int32_t r = mulRL(0, rl, vrl);
- *fout++ = float_from_q4_27(l);
- *fout++ = float_from_q4_27(r);
- // Note: In case of later int16_t sink output,
- // conversion and clamping is done by memcpy_to_i16_from_float().
- } while (--outFrames);
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
- // volume is boosted, so we might need to clamp even though
- // we process only one track.
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl) >> 12;
- int32_t r = mulRL(0, rl, vrl) >> 12;
- // clamping...
- l = clamp16(l);
- r = clamp16(r);
- *out++ = (r<<16) | (l & 0xFFFF);
- } while (--outFrames);
- } else {
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl) >> 12;
- int32_t r = mulRL(0, rl, vrl) >> 12;
- *out++ = (r<<16) | (l & 0xFFFF);
- } while (--outFrames);
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
- }
- numFrames -= b.frameCount;
- t->bufferProvider->releaseBuffer(&b);
- }
-}
-
/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
/*static*/ void AudioMixer::sInitRoutine()
@@ -1656,211 +523,71 @@
DownmixerBufferProvider::init(); // for the downmixer
}
-/* TODO: consider whether this level of optimization is necessary.
- * Perhaps just stick with a single for loop.
- */
-
-// Needs to derive a compile time constant (constexpr). Could be targeted to go
-// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
- (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
-
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
- typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
- const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixer::preCreateTrack()
{
- switch (channels) {
- case 1:
- volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 2:
- volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 3:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 4:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 5:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 6:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 7:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 8:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- }
+ return std::make_shared<Track>();
}
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
- typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
- const TI* in, TA* aux, const TV *vol, TAV vola)
+status_t AudioMixer::postCreateTrack(TrackBase *track)
{
- switch (channels) {
- case 1:
- volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
- break;
- case 2:
- volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
- break;
- case 3:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
- break;
- case 4:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
- break;
- case 5:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
- break;
- case 6:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
- break;
- case 7:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
- break;
- case 8:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
- break;
+ Track* t = static_cast<Track*>(track);
+
+ audio_channel_mask_t channelMask = t->channelMask;
+ t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
+ channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+ t->channelCount = audio_channel_count_from_out_mask(channelMask);
+ ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+ "Non-stereo channel mask: %d\n", channelMask);
+ t->channelMask = channelMask;
+ t->mInputBufferProvider = NULL;
+ t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
+ t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ // haptic
+ t->mHapticPlaybackEnabled = false;
+ t->mHapticIntensity = HAPTIC_SCALE_NONE;
+ t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
+ t->mMixerHapticChannelCount = 0;
+ t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
+ t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
+ t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
+ t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+ t->mKeepContractedChannels = false;
+ // Check the downmixing (or upmixing) requirements.
+ status_t status = t->prepareForDownmix();
+ if (status != OK) {
+ ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
+ return BAD_VALUE;
}
+ // prepareForDownmix() may change mDownmixRequiresFormat
+ ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
+ t->prepareForReformat();
+ t->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ t->prepareForAdjustChannels();
+ return OK;
}
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
- typename TO, typename TI, typename TA>
-void AudioMixer::Track::volumeMix(TO *out, size_t outFrames,
- const TI *in, TA *aux, bool ramp)
+void AudioMixer::preProcess()
{
- if (USEFLOATVOL) {
- if (ramp) {
- volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
- mPrevVolume, mVolumeInc,
-#ifdef FLOAT_AUX
- &mPrevAuxLevel, mAuxInc
-#else
- &prevAuxLevel, auxInc
-#endif
- );
- if (ADJUSTVOL) {
- adjustVolumeRamp(aux != NULL, true);
- }
- } else {
- volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
- mVolume,
-#ifdef FLOAT_AUX
- mAuxLevel
-#else
- auxLevel
-#endif
- );
- }
- } else {
- if (ramp) {
- volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
- prevVolume, volumeInc, &prevAuxLevel, auxInc);
- if (ADJUSTVOL) {
- adjustVolumeRamp(aux != NULL);
- }
- } else {
- volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
- volume, auxLevel);
+ for (const auto &pair : mTracks) {
+ // Clear contracted buffer before processing if contracted channels are saved
+ const std::shared_ptr<TrackBase> &tb = pair.second;
+ Track *t = static_cast<Track*>(tb.get());
+ if (t->mKeepContractedChannels) {
+ t->clearContractedBuffer();
}
}
}
-/* This process hook is called when there is a single track without
- * aux buffer, volume ramp, or resampling.
- * TODO: Update the hook selection: this can properly handle aux and ramp.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process__noResampleOneTrack()
+void AudioMixer::postProcess()
{
- ALOGVV("process__noResampleOneTrack\n");
- LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
- "%zu != 1 tracks enabled", mEnabled.size());
- const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
- const uint32_t channels = t->mMixerChannelCount;
- TO* out = reinterpret_cast<TO*>(t->mainBuffer);
- TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
- const bool ramp = t->needsRamp();
-
- for (size_t numFrames = mFrameCount; numFrames > 0; ) {
- AudioBufferProvider::Buffer& b(t->buffer);
- // get input buffer
- b.frameCount = numFrames;
- t->bufferProvider->getNextBuffer(&b);
- const TI *in = reinterpret_cast<TI*>(b.raw);
-
- // in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (in == NULL || (((uintptr_t)in) & 3)) {
- memset(out, 0, numFrames
- * channels * audio_bytes_per_sample(t->mMixerFormat));
- ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
- "buffer %p track %p, channels %d, needs %#x",
- in, &t, t->channelCount, t->needs);
- return;
- }
-
- const size_t outFrames = b.frameCount;
- t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
- out, outFrames, in, aux, ramp);
-
- out += outFrames * channels;
- if (aux != NULL) {
- aux += outFrames;
- }
- numFrames -= b.frameCount;
-
- // release buffer
- t->bufferProvider->releaseBuffer(&b);
- }
- if (ramp) {
- t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
- }
-}
-
-void AudioMixer::processHapticData()
-{
+ // Process haptic data.
// Need to keep consistent with VibrationEffect.scale(int, float, int)
for (const auto &pair : mGroups) {
// process by group of tracks with same output main buffer.
const auto &group = pair.second;
for (const int name : group) {
- const std::shared_ptr<Track> &t = mTracks[name];
+ const std::shared_ptr<Track> &t = getTrack(name);
if (t->mHapticPlaybackEnabled) {
size_t sampleCount = mFrameCount * t->mMixerHapticChannelCount;
float gamma = t->getHapticScaleGamma();
@@ -1887,225 +614,5 @@
}
}
-/* This track hook is called to do resampling then mixing,
- * pulling from the track's upstream AudioBufferProvider.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
-{
- ALOGVV("track__Resample\n");
- mResampler->setSampleRate(sampleRate);
- const bool ramp = needsRamp();
- if (ramp || aux != NULL) {
- // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
- // if aux != NULL: resample with unity gain to temp buffer then apply send level.
-
- mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
- mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
-
- volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
- out, outFrameCount, temp, aux, ramp);
-
- } else { // constant volume gain
- mResampler->setVolume(mVolume[0], mVolume[1]);
- mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
- }
-}
-
-/* This track hook is called to mix a track, when no resampling is required.
- * The input buffer should be present in in.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux)
-{
- ALOGVV("track__NoResample\n");
- const TI *in = static_cast<const TI *>(mIn);
-
- volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
- out, frameCount, in, aux, needsRamp());
-
- // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
- // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
- in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
- mIn = in;
-}
-
-/* The Mixer engine generates either int32_t (Q4_27) or float data.
- * We use this function to convert the engine buffers
- * to the desired mixer output format, either int16_t (Q.15) or float.
- */
-/* static */
-void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
- void *in, audio_format_t mixerInFormat, size_t sampleCount)
-{
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
-}
-
-/* Returns the proper track hook to use for mixing the track into the output buffer.
- */
-/* static */
-AudioMixer::hook_t AudioMixer::Track::getTrackHook(int trackType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
-{
- if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
- switch (trackType) {
- case TRACKTYPE_NOP:
- return &Track::track__nop;
- case TRACKTYPE_RESAMPLE:
- return &Track::track__genericResample;
- case TRACKTYPE_NORESAMPLEMONO:
- return &Track::track__16BitsMono;
- case TRACKTYPE_NORESAMPLE:
- return &Track::track__16BitsStereo;
- default:
- LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
- break;
- }
- }
- LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
- switch (trackType) {
- case TRACKTYPE_NOP:
- return &Track::track__nop;
- case TRACKTYPE_RESAMPLE:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t) &Track::track__Resample<
- MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t) &Track::track__Resample<
- MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- case TRACKTYPE_NORESAMPLEMONO:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t) &Track::track__NoResample<
- MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t) &Track::track__NoResample<
- MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- case TRACKTYPE_NORESAMPLE:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t) &Track::track__NoResample<
- MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t) &Track::track__NoResample<
- MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
- break;
- }
- return NULL;
-}
-
-/* Returns the proper process hook for mixing tracks. Currently works only for
- * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
- *
- * TODO: Due to the special mixing considerations of duplicating to
- * a stereo output track, the input track cannot be MONO. This should be
- * prevented by the caller.
- */
-/* static */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(
- int processType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
-{
- if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
- LOG_ALWAYS_FATAL("bad processType: %d", processType);
- return NULL;
- }
- if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
- return &AudioMixer::process__oneTrack16BitsStereoNoResampling;
- }
- LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return &AudioMixer::process__noResampleOneTrack<
- MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return &AudioMixer::process__noResampleOneTrack<
- MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return &AudioMixer::process__noResampleOneTrack<
- MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return &AudioMixer::process__noResampleOneTrack<
- MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- return NULL;
-}
-
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
new file mode 100644
index 0000000..75c077d
--- /dev/null
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -0,0 +1,1692 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioMixer"
+//#define LOG_NDEBUG 0
+
+#include <sstream>
+#include <string.h>
+
+#include <audio_utils/primitives.h>
+#include <cutils/compiler.h>
+#include <media/AudioMixerBase.h>
+#include <utils/Log.h>
+
+#include "AudioMixerOps.h"
+
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
+#ifndef FCC_2
+#define FCC_2 2
+#endif
+
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
+/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
+ * being used. This is a considerable amount of log spam, so don't enable unless you
+ * are verifying the hook based code.
+ */
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+//define ALOGVV printf // for test-mixer.cpp
+#else
+#define ALOGVV(a...) do { } while (0)
+#endif
+
+// TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
+static constexpr int BLOCKSIZE = 16;
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+bool AudioMixerBase::isValidFormat(audio_format_t format) const
+{
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AudioMixerBase::isValidChannelMask(audio_channel_mask_t channelMask) const
+{
+ return audio_channel_count_from_out_mask(channelMask) <= MAX_NUM_CHANNELS;
+}
+
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixerBase::preCreateTrack()
+{
+ return std::make_shared<TrackBase>();
+}
+
+status_t AudioMixerBase::create(
+ int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
+{
+ LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
+
+ if (!isValidChannelMask(channelMask)) {
+ ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
+ return BAD_VALUE;
+ }
+ if (!isValidFormat(format)) {
+ ALOGE("%s invalid format: %#x", __func__, format);
+ return BAD_VALUE;
+ }
+
+ auto t = preCreateTrack();
+ {
+ // TODO: move initialization to the Track constructor.
+ // assume default parameters for the track, except where noted below
+ t->needs = 0;
+
+ // Integer volume.
+ // Currently integer volume is kept for the legacy integer mixer.
+ // Will be removed when the legacy mixer path is removed.
+ t->volume[0] = 0;
+ t->volume[1] = 0;
+ t->prevVolume[0] = 0 << 16;
+ t->prevVolume[1] = 0 << 16;
+ t->volumeInc[0] = 0;
+ t->volumeInc[1] = 0;
+ t->auxLevel = 0;
+ t->auxInc = 0;
+ t->prevAuxLevel = 0;
+
+ // Floating point volume.
+ t->mVolume[0] = 0.f;
+ t->mVolume[1] = 0.f;
+ t->mPrevVolume[0] = 0.f;
+ t->mPrevVolume[1] = 0.f;
+ t->mVolumeInc[0] = 0.;
+ t->mVolumeInc[1] = 0.;
+ t->mAuxLevel = 0.;
+ t->mAuxInc = 0.;
+ t->mPrevAuxLevel = 0.;
+
+ // no initialization needed
+ // t->frameCount
+ t->channelCount = audio_channel_count_from_out_mask(channelMask);
+ t->enabled = false;
+ ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+ "Non-stereo channel mask: %d\n", channelMask);
+ t->channelMask = channelMask;
+ t->sessionId = sessionId;
+ // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
+ t->bufferProvider = NULL;
+ t->buffer.raw = NULL;
+ // no initialization needed
+ // t->buffer.frameCount
+ t->hook = NULL;
+ t->mIn = NULL;
+ t->sampleRate = mSampleRate;
+ // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
+ t->mainBuffer = NULL;
+ t->auxBuffer = NULL;
+ t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
+ t->mFormat = format;
+ t->mMixerInFormat = kUseFloat && kUseNewMixer ?
+ AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+ AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+ t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+ status_t status = postCreateTrack(t.get());
+ if (status != OK) return status;
+ mTracks[name] = t;
+ return OK;
+ }
+}
+
+// Called when channel masks have changed for a track name
+bool AudioMixerBase::setChannelMasks(int name,
+ audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask)
+{
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+ if (trackChannelMask == track->channelMask && mixerChannelMask == track->mMixerChannelMask) {
+ return false; // no need to change
+ }
+ // always recompute for both channel masks even if only one has changed.
+ const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+ const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+
+ ALOG_ASSERT(trackChannelCount && mixerChannelCount);
+ track->channelMask = trackChannelMask;
+ track->channelCount = trackChannelCount;
+ track->mMixerChannelMask = mixerChannelMask;
+ track->mMixerChannelCount = mixerChannelCount;
+
+ // Resampler channels may have changed.
+ track->recreateResampler(mSampleRate);
+ return true;
+}
+
+void AudioMixerBase::destroy(int name)
+{
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ ALOGV("deleteTrackName(%d)", name);
+
+ if (mTracks[name]->enabled) {
+ invalidate();
+ }
+ mTracks.erase(name); // deallocate track
+}
+
+void AudioMixerBase::enable(int name)
+{
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+ if (!track->enabled) {
+ track->enabled = true;
+ ALOGV("enable(%d)", name);
+ invalidate();
+ }
+}
+
+void AudioMixerBase::disable(int name)
+{
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+ if (track->enabled) {
+ track->enabled = false;
+ ALOGV("disable(%d)", name);
+ invalidate();
+ }
+}
+
+/* Sets the volume ramp variables for the AudioMixer.
+ *
+ * The volume ramp variables are used to transition from the previous
+ * volume to the set volume. ramp controls the duration of the transition.
+ * Its value is typically one state framecount period, but may also be 0,
+ * meaning "immediate."
+ *
+ * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
+ * even if there is a nonzero floating point increment (in that case, the volume
+ * change is immediate). This restriction should be changed when the legacy mixer
+ * is removed (see #2).
+ * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
+ * when no longer needed.
+ *
+ * @param newVolume set volume target in floating point [0.0, 1.0].
+ * @param ramp number of frames to increment over. if ramp is 0, the volume
+ * should be set immediately. Currently ramp should not exceed 65535 (frames).
+ * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
+ * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
+ * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
+ * @param pSetVolume pointer to the float target volume, set on return.
+ * @param pPrevVolume pointer to the float previous volume, set on return.
+ * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
+ * @return true if the volume has changed, false if volume is same.
+ */
+static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
+ int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
+ float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+ // check floating point volume to see if it is identical to the previously
+ // set volume.
+ // We do not use a tolerance here (and reject changes too small)
+ // as it may be confusing to use a different value than the one set.
+ // If the resulting volume is too small to ramp, it is a direct set of the volume.
+ if (newVolume == *pSetVolume) {
+ return false;
+ }
+ if (newVolume < 0) {
+ newVolume = 0; // should not have negative volumes
+ } else {
+ switch (fpclassify(newVolume)) {
+ case FP_SUBNORMAL:
+ case FP_NAN:
+ newVolume = 0;
+ break;
+ case FP_ZERO:
+ break; // zero volume is fine
+ case FP_INFINITE:
+ // Infinite volume could be handled consistently since
+ // floating point math saturates at infinities,
+ // but we limit volume to unity gain float.
+ // ramp = 0; break;
+ //
+ newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+ break;
+ case FP_NORMAL:
+ default:
+ // Floating point does not have problems with overflow wrap
+ // that integer has. However, we limit the volume to
+ // unity gain here.
+ // TODO: Revisit the volume limitation and perhaps parameterize.
+ if (newVolume > AudioMixerBase::UNITY_GAIN_FLOAT) {
+ newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+ }
+ break;
+ }
+ }
+
+ // set floating point volume ramp
+ if (ramp != 0) {
+ // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+ " prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
+ const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+ // could be inf, cannot be nan, subnormal
+ const float maxv = std::max(newVolume, *pPrevVolume);
+
+ if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+ && maxv + inc != maxv) { // inc must make forward progress
+ *pVolumeInc = inc;
+ // ramp is set now.
+ // Note: if newVolume is 0, then near the end of the ramp,
+ // it may be possible that the ramped volume may be subnormal or
+ // temporarily negative by a small amount or subnormal due to floating
+ // point inaccuracies.
+ } else {
+ ramp = 0; // ramp not allowed
+ }
+ }
+
+ // compute and check integer volume, no need to check negative values
+ // The integer volume is limited to "unity_gain" to avoid wrapping and other
+ // audio artifacts, so it never reaches the range limit of U4.28.
+ // We safely use signed 16 and 32 bit integers here.
+ const float scaledVolume = newVolume * AudioMixerBase::UNITY_GAIN_INT; // not neg, subnormal, nan
+ const int32_t intVolume = (scaledVolume >= (float)AudioMixerBase::UNITY_GAIN_INT) ?
+ AudioMixerBase::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+ // set integer volume ramp
+ if (ramp != 0) {
+ // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+ // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+ " prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+ const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+ if (inc != 0) { // inc must make forward progress
+ *pIntVolumeInc = inc;
+ } else {
+ ramp = 0; // ramp not allowed
+ }
+ }
+
+ // if no ramp, or ramp not allowed, then clear float and integer increments
+ if (ramp == 0) {
+ *pVolumeInc = 0;
+ *pPrevVolume = newVolume;
+ *pIntVolumeInc = 0;
+ *pIntPrevVolume = intVolume << 16;
+ }
+ *pSetVolume = newVolume;
+ *pIntSetVolume = intVolume;
+ return true;
+}
+
+void AudioMixerBase::setParameter(int name, int target, int param, void *value)
+{
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+ int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
+ int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
+
+ switch (target) {
+
+ case TRACK:
+ switch (param) {
+ case CHANNEL_MASK: {
+ const audio_channel_mask_t trackChannelMask =
+ static_cast<audio_channel_mask_t>(valueInt);
+ if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
+ ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
+ invalidate();
+ }
+ } break;
+ case MAIN_BUFFER:
+ if (track->mainBuffer != valueBuf) {
+ track->mainBuffer = valueBuf;
+ ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+ invalidate();
+ }
+ break;
+ case AUX_BUFFER:
+ if (track->auxBuffer != valueBuf) {
+ track->auxBuffer = valueBuf;
+ ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
+ invalidate();
+ }
+ break;
+ case FORMAT: {
+ audio_format_t format = static_cast<audio_format_t>(valueInt);
+ if (track->mFormat != format) {
+ ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
+ track->mFormat = format;
+ ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
+ invalidate();
+ }
+ } break;
+ case MIXER_FORMAT: {
+ audio_format_t format = static_cast<audio_format_t>(valueInt);
+ if (track->mMixerFormat != format) {
+ track->mMixerFormat = format;
+ ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+ }
+ } break;
+ case MIXER_CHANNEL_MASK: {
+ const audio_channel_mask_t mixerChannelMask =
+ static_cast<audio_channel_mask_t>(valueInt);
+ if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
+ ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+ invalidate();
+ }
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
+ }
+ break;
+
+ case RESAMPLE:
+ switch (param) {
+ case SAMPLE_RATE:
+ ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
+ if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
+ ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
+ uint32_t(valueInt));
+ invalidate();
+ }
+ break;
+ case RESET:
+ track->resetResampler();
+ invalidate();
+ break;
+ case REMOVE:
+ track->mResampler.reset(nullptr);
+ track->sampleRate = mSampleRate;
+ invalidate();
+ break;
+ default:
+ LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
+ }
+ break;
+
+ case RAMP_VOLUME:
+ case VOLUME:
+ switch (param) {
+ case AUXLEVEL:
+ if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+ target == RAMP_VOLUME ? mFrameCount : 0,
+ &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
+ &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
+ ALOGV("setParameter(%s, AUXLEVEL: %04x)",
+ target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
+ invalidate();
+ }
+ break;
+ default:
+ if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+ if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+ target == RAMP_VOLUME ? mFrameCount : 0,
+ &track->volume[param - VOLUME0],
+ &track->prevVolume[param - VOLUME0],
+ &track->volumeInc[param - VOLUME0],
+ &track->mVolume[param - VOLUME0],
+ &track->mPrevVolume[param - VOLUME0],
+ &track->mVolumeInc[param - VOLUME0])) {
+ ALOGV("setParameter(%s, VOLUME%d: %04x)",
+ target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+ track->volume[param - VOLUME0]);
+ invalidate();
+ }
+ } else {
+ LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+ }
+ }
+ break;
+
+ default:
+ LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
+ }
+}
+
+bool AudioMixerBase::TrackBase::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
+{
+ if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
+ if (sampleRate != trackSampleRate) {
+ sampleRate = trackSampleRate;
+ if (mResampler.get() == nullptr) {
+ ALOGV("Creating resampler from track %d Hz to device %d Hz",
+ trackSampleRate, devSampleRate);
+ AudioResampler::src_quality quality;
+ // force lowest quality level resampler if use case isn't music or video
+ // FIXME this is flawed for dynamic sample rates, as we choose the resampler
+ // quality level based on the initial ratio, but that could change later.
+ // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
+ if (isMusicRate(trackSampleRate)) {
+ quality = AudioResampler::DEFAULT_QUALITY;
+ } else {
+ quality = AudioResampler::DYN_LOW_QUALITY;
+ }
+
+ // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+ // but if none exists, it is the channel count (1 for mono).
+ const int resamplerChannelCount = getOutputChannelCount();
+ ALOGVV("Creating resampler:"
+ " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+ mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
+ mResampler.reset(AudioResampler::create(
+ mMixerInFormat,
+ resamplerChannelCount,
+ devSampleRate, quality));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Checks to see if the volume ramp has completed and clears the increment
+ * variables appropriately.
+ *
+ * FIXME: There is code to handle int/float ramp variable switchover should it not
+ * complete within a mixer buffer processing call, but it is preferred to avoid switchover
+ * due to precision issues. The switchover code is included for legacy code purposes
+ * and can be removed once the integer volume is removed.
+ *
+ * It is not sufficient to clear only the volumeInc integer variable because
+ * if one channel requires ramping, all channels are ramped.
+ *
+ * There is a bit of duplicated code here, but it keeps backward compatibility.
+ */
+void AudioMixerBase::TrackBase::adjustVolumeRamp(bool aux, bool useFloat)
+{
+ if (useFloat) {
+ for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+ if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
+ (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
+ volumeInc[i] = 0;
+ prevVolume[i] = volume[i] << 16;
+ mVolumeInc[i] = 0.;
+ mPrevVolume[i] = mVolume[i];
+ } else {
+ //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
+ prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
+ }
+ }
+ } else {
+ for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+ if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
+ ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
+ volumeInc[i] = 0;
+ prevVolume[i] = volume[i] << 16;
+ mVolumeInc[i] = 0.;
+ mPrevVolume[i] = mVolume[i];
+ } else {
+ //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
+ mPrevVolume[i] = float_from_u4_28(prevVolume[i]);
+ }
+ }
+ }
+
+ if (aux) {
+#ifdef FLOAT_AUX
+ if (useFloat) {
+ if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
+ (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
+ auxInc = 0;
+ prevAuxLevel = auxLevel << 16;
+ mAuxInc = 0.f;
+ mPrevAuxLevel = mAuxLevel;
+ }
+ } else
+#endif
+ if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
+ (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
+ auxInc = 0;
+ prevAuxLevel = auxLevel << 16;
+ mAuxInc = 0.f;
+ mPrevAuxLevel = mAuxLevel;
+ }
+ }
+}
+
+void AudioMixerBase::TrackBase::recreateResampler(uint32_t devSampleRate)
+{
+ if (mResampler.get() != nullptr) {
+ const uint32_t resetToSampleRate = sampleRate;
+ mResampler.reset(nullptr);
+ sampleRate = devSampleRate; // without resampler, track rate is device sample rate.
+ // recreate the resampler with updated format, channels, saved sampleRate.
+ setResampler(resetToSampleRate /*trackSampleRate*/, devSampleRate);
+ }
+}
+
+size_t AudioMixerBase::getUnreleasedFrames(int name) const
+{
+ const auto it = mTracks.find(name);
+ if (it != mTracks.end()) {
+ return it->second->getUnreleasedFrames();
+ }
+ return 0;
+}
+
+std::string AudioMixerBase::trackNames() const
+{
+ std::stringstream ss;
+ for (const auto &pair : mTracks) {
+ ss << pair.first << " ";
+ }
+ return ss.str();
+}
+
+void AudioMixerBase::process__validate()
+{
+ // TODO: fix all16BitsStereNoResample logic to
+ // either properly handle muted tracks (it should ignore them)
+ // or remove altogether as an obsolete optimization.
+ bool all16BitsStereoNoResample = true;
+ bool resampling = false;
+ bool volumeRamp = false;
+
+ mEnabled.clear();
+ mGroups.clear();
+ for (const auto &pair : mTracks) {
+ const int name = pair.first;
+ const std::shared_ptr<TrackBase> &t = pair.second;
+ if (!t->enabled) continue;
+
+ mEnabled.emplace_back(name); // we add to mEnabled in order of name.
+ mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
+
+ uint32_t n = 0;
+ // FIXME can overflow (mask is only 3 bits)
+ n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
+ if (t->doesResample()) {
+ n |= NEEDS_RESAMPLE;
+ }
+ if (t->auxLevel != 0 && t->auxBuffer != NULL) {
+ n |= NEEDS_AUX;
+ }
+
+ if (t->volumeInc[0]|t->volumeInc[1]) {
+ volumeRamp = true;
+ } else if (!t->doesResample() && t->volumeRL == 0) {
+ n |= NEEDS_MUTE;
+ }
+ t->needs = n;
+
+ if (n & NEEDS_MUTE) {
+ t->hook = &TrackBase::track__nop;
+ } else {
+ if (n & NEEDS_AUX) {
+ all16BitsStereoNoResample = false;
+ }
+ if (n & NEEDS_RESAMPLE) {
+ all16BitsStereoNoResample = false;
+ resampling = true;
+ t->hook = TrackBase::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
+ ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+ "Track %d needs downmix + resample", name);
+ } else {
+ if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
+ t->hook = TrackBase::getTrackHook(
+ (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
+ && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
+ ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+ t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
+ all16BitsStereoNoResample = false;
+ }
+ if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
+ t->hook = TrackBase::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
+ ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+ "Track %d needs downmix", name);
+ }
+ }
+ }
+ }
+
+ // select the processing hooks
+ mHook = &AudioMixerBase::process__nop;
+ if (mEnabled.size() > 0) {
+ if (resampling) {
+ if (mOutputTemp.get() == nullptr) {
+ mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+ }
+ if (mResampleTemp.get() == nullptr) {
+ mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+ }
+ mHook = &AudioMixerBase::process__genericResampling;
+ } else {
+ // we keep temp arrays around.
+ mHook = &AudioMixerBase::process__genericNoResampling;
+ if (all16BitsStereoNoResample && !volumeRamp) {
+ if (mEnabled.size() == 1) {
+ const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+ if ((t->needs & NEEDS_MUTE) == 0) {
+ // The check prevents a muted track from acquiring a process hook.
+ //
+ // This is dangerous if the track is MONO as that requires
+ // special case handling due to implicit channel duplication.
+ // Stereo or Multichannel should actually be fine here.
+ mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+ }
+ }
+ }
+ }
+ }
+
+ ALOGV("mixer configuration change: %zu "
+ "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
+ mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
+
+ process();
+
+ // Now that the volume ramp has been done, set optimal state and
+ // track hooks for subsequent mixer process
+ if (mEnabled.size() > 0) {
+ bool allMuted = true;
+
+ for (const int name : mEnabled) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ if (!t->doesResample() && t->volumeRL == 0) {
+ t->needs |= NEEDS_MUTE;
+ t->hook = &TrackBase::track__nop;
+ } else {
+ allMuted = false;
+ }
+ }
+ if (allMuted) {
+ mHook = &AudioMixerBase::process__nop;
+ } else if (all16BitsStereoNoResample) {
+ if (mEnabled.size() == 1) {
+ //const int i = 31 - __builtin_clz(enabledTracks);
+ const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+ // Muted single tracks handled by allMuted above.
+ mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+ }
+ }
+ }
+}
+
+void AudioMixerBase::TrackBase::track__genericResample(
+ int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+{
+ ALOGVV("track__genericResample\n");
+ mResampler->setSampleRate(sampleRate);
+
+ // ramp gain - resample to temp buffer and scale/mix in 2nd step
+ if (aux != NULL) {
+ // always resample with unity gain when sending to auxiliary buffer to be able
+ // to apply send level after resampling
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
+ mResampler->resample(temp, outFrameCount, bufferProvider);
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ volumeRampStereo(out, outFrameCount, temp, aux);
+ } else {
+ volumeStereo(out, outFrameCount, temp, aux);
+ }
+ } else {
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+ mResampler->resample(temp, outFrameCount, bufferProvider);
+ volumeRampStereo(out, outFrameCount, temp, aux);
+ }
+
+ // constant gain
+ else {
+ mResampler->setVolume(mVolume[0], mVolume[1]);
+ mResampler->resample(out, outFrameCount, bufferProvider);
+ }
+ }
+}
+
+void AudioMixerBase::TrackBase::track__nop(int32_t* out __unused,
+ size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
+{
+}
+
+void AudioMixerBase::TrackBase::volumeRampStereo(
+ int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+
+ //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ // ramp volume
+ if (CC_UNLIKELY(aux != NULL)) {
+ int32_t va = prevAuxLevel;
+ const int32_t vaInc = auxInc;
+ int32_t l;
+ int32_t r;
+
+ do {
+ l = (*temp++ >> 12);
+ r = (*temp++ >> 12);
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * r;
+ *aux++ += (va >> 17) * (l + r);
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+ prevAuxLevel = va;
+ } else {
+ do {
+ *out++ += (vl >> 16) * (*temp++ >> 12);
+ *out++ += (vr >> 16) * (*temp++ >> 12);
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+ }
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(aux != NULL);
+}
+
+void AudioMixerBase::TrackBase::volumeStereo(
+ int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ const int16_t va = auxLevel;
+ do {
+ int16_t l = (int16_t)(*temp++ >> 12);
+ int16_t r = (int16_t)(*temp++ >> 12);
+ out[0] = mulAdd(l, vl, out[0]);
+ int16_t a = (int16_t)(((int32_t)l + r) >> 1);
+ out[1] = mulAdd(r, vr, out[1]);
+ out += 2;
+ aux[0] = mulAdd(a, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ } else {
+ do {
+ int16_t l = (int16_t)(*temp++ >> 12);
+ int16_t r = (int16_t)(*temp++ >> 12);
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(r, vr, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+}
+
+void AudioMixerBase::TrackBase::track__16BitsStereo(
+ int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+ ALOGVV("track__16BitsStereo\n");
+ const int16_t *in = static_cast<const int16_t *>(mIn);
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ int32_t l;
+ int32_t r;
+ // ramp gain
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ int32_t va = prevAuxLevel;
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+ const int32_t vaInc = auxInc;
+ // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ l = (int32_t)*in++;
+ r = (int32_t)*in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * r;
+ *aux++ += (va >> 17) * (l + r);
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ prevAuxLevel = va;
+ adjustVolumeRamp(true);
+ }
+
+ // constant gain
+ else {
+ const uint32_t vrl = volumeRL;
+ const int16_t va = (int16_t)auxLevel;
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
+ in += 2;
+ out[0] = mulAddRL(1, rl, vrl, out[0]);
+ out[1] = mulAddRL(0, rl, vrl, out[1]);
+ out += 2;
+ aux[0] = mulAdd(a, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ }
+ } else {
+ // ramp gain
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+
+ // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ *out++ += (vl >> 16) * (int32_t) *in++;
+ *out++ += (vr >> 16) * (int32_t) *in++;
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(false);
+ }
+
+ // constant gain
+ else {
+ const uint32_t vrl = volumeRL;
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ out[0] = mulAddRL(1, rl, vrl, out[0]);
+ out[1] = mulAddRL(0, rl, vrl, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+ }
+ mIn = in;
+}
+
+void AudioMixerBase::TrackBase::track__16BitsMono(
+ int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+ ALOGVV("track__16BitsMono\n");
+ const int16_t *in = static_cast<int16_t const *>(mIn);
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ // ramp gain
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ int32_t va = prevAuxLevel;
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+ const int32_t vaInc = auxInc;
+
+ // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ int32_t l = *in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * l;
+ *aux++ += (va >> 16) * l;
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ prevAuxLevel = va;
+ adjustVolumeRamp(true);
+ }
+ // constant gain
+ else {
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
+ const int16_t va = (int16_t)auxLevel;
+ do {
+ int16_t l = *in++;
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(l, vr, out[1]);
+ out += 2;
+ aux[0] = mulAdd(l, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ }
+ } else {
+ // ramp gain
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+
+ // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ int32_t l = *in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * l;
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(false);
+ }
+ // constant gain
+ else {
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
+ do {
+ int16_t l = *in++;
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(l, vr, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+ }
+ mIn = in;
+}
+
+// no-op case
+void AudioMixerBase::process__nop()
+{
+ ALOGVV("process__nop\n");
+
+ for (const auto &pair : mGroups) {
+ // process by group of tracks with same output buffer to
+ // avoid multiple memset() on same buffer
+ const auto &group = pair.second;
+
+ const std::shared_ptr<TrackBase> &t = mTracks[group[0]];
+ memset(t->mainBuffer, 0,
+ mFrameCount * audio_bytes_per_frame(t->getMixerChannelCount(), t->mMixerFormat));
+
+ // now consume data
+ for (const int name : group) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ size_t outFrames = mFrameCount;
+ while (outFrames) {
+ t->buffer.frameCount = outFrames;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ if (t->buffer.raw == NULL) break;
+ outFrames -= t->buffer.frameCount;
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ }
+ }
+ }
+}
+
+// generic code without resampling
+void AudioMixerBase::process__genericNoResampling()
+{
+ ALOGVV("process__genericNoResampling\n");
+ int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
+
+ for (const auto &pair : mGroups) {
+ // process by group of tracks with same output main buffer to
+ // avoid multiple memset() on same buffer
+ const auto &group = pair.second;
+
+ // acquire buffer
+ for (const int name : group) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ t->buffer.frameCount = mFrameCount;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->frameCount = t->buffer.frameCount;
+ t->mIn = t->buffer.raw;
+ }
+
+ int32_t *out = (int *)pair.first;
+ size_t numFrames = 0;
+ do {
+ const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
+ memset(outTemp, 0, sizeof(outTemp));
+ for (const int name : group) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ int32_t *aux = NULL;
+ if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+ aux = t->auxBuffer + numFrames;
+ }
+ for (int outFrames = frameCount; outFrames > 0; ) {
+ // t->in == nullptr can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (t->mIn == nullptr) {
+ break;
+ }
+ size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
+ if (inFrames > 0) {
+ (t.get()->*t->hook)(
+ outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
+ inFrames, mResampleTemp.get() /* naked ptr */, aux);
+ t->frameCount -= inFrames;
+ outFrames -= inFrames;
+ if (CC_UNLIKELY(aux != NULL)) {
+ aux += inFrames;
+ }
+ }
+ if (t->frameCount == 0 && outFrames) {
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ t->buffer.frameCount = (mFrameCount - numFrames) -
+ (frameCount - outFrames);
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->mIn = t->buffer.raw;
+ if (t->mIn == nullptr) {
+ break;
+ }
+ t->frameCount = t->buffer.frameCount;
+ }
+ }
+ }
+
+ const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+ convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
+ frameCount * t1->mMixerChannelCount);
+ // TODO: fix ugly casting due to choice of out pointer type
+ out = reinterpret_cast<int32_t*>((uint8_t*)out
+ + frameCount * t1->mMixerChannelCount
+ * audio_bytes_per_sample(t1->mMixerFormat));
+ numFrames += frameCount;
+ } while (numFrames < mFrameCount);
+
+ // release each track's buffer
+ for (const int name : group) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ }
+ }
+}
+
+// generic code with resampling
+void AudioMixerBase::process__genericResampling()
+{
+ ALOGVV("process__genericResampling\n");
+ int32_t * const outTemp = mOutputTemp.get(); // naked ptr
+ size_t numFrames = mFrameCount;
+
+ for (const auto &pair : mGroups) {
+ const auto &group = pair.second;
+ const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+
+ // clear temp buffer
+ memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
+ for (const int name : group) {
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+ int32_t *aux = NULL;
+ if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+ aux = t->auxBuffer;
+ }
+
+ // this is a little goofy, on the resampling case we don't
+ // acquire/release the buffers because it's done by
+ // the resampler.
+ if (t->needs & NEEDS_RESAMPLE) {
+ (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
+ } else {
+
+ size_t outFrames = 0;
+
+ while (outFrames < numFrames) {
+ t->buffer.frameCount = numFrames - outFrames;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->mIn = t->buffer.raw;
+ // t->mIn == nullptr can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (t->mIn == nullptr) break;
+
+ (t.get()->*t->hook)(
+ outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
+ mResampleTemp.get() /* naked ptr */,
+ aux != nullptr ? aux + outFrames : nullptr);
+ outFrames += t->buffer.frameCount;
+
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ }
+ }
+ }
+ convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
+ outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
+ }
+}
+
+// one track, 16 bits stereo without resampling is the most common case
+void AudioMixerBase::process__oneTrack16BitsStereoNoResampling()
+{
+ ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
+ LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
+ "%zu != 1 tracks enabled", mEnabled.size());
+ const int name = mEnabled[0];
+ const std::shared_ptr<TrackBase> &t = mTracks[name];
+
+ AudioBufferProvider::Buffer& b(t->buffer);
+
+ int32_t* out = t->mainBuffer;
+ float *fout = reinterpret_cast<float*>(out);
+ size_t numFrames = mFrameCount;
+
+ const int16_t vl = t->volume[0];
+ const int16_t vr = t->volume[1];
+ const uint32_t vrl = t->volumeRL;
+ while (numFrames) {
+ b.frameCount = numFrames;
+ t->bufferProvider->getNextBuffer(&b);
+ const int16_t *in = b.i16;
+
+ // in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (in == NULL || (((uintptr_t)in) & 3)) {
+ if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
+ memset((char*)fout, 0, numFrames
+ * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+ } else {
+ memset((char*)out, 0, numFrames
+ * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+ }
+ ALOGE_IF((((uintptr_t)in) & 3),
+ "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
+ " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+ in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
+ return;
+ }
+ size_t outFrames = b.frameCount;
+
+ switch (t->mMixerFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl);
+ int32_t r = mulRL(0, rl, vrl);
+ *fout++ = float_from_q4_27(l);
+ *fout++ = float_from_q4_27(r);
+ // Note: In case of later int16_t sink output,
+ // conversion and clamping is done by memcpy_to_i16_from_float().
+ } while (--outFrames);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
+ // volume is boosted, so we might need to clamp even though
+ // we process only one track.
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl) >> 12;
+ int32_t r = mulRL(0, rl, vrl) >> 12;
+ // clamping...
+ l = clamp16(l);
+ r = clamp16(r);
+ *out++ = (r<<16) | (l & 0xFFFF);
+ } while (--outFrames);
+ } else {
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl) >> 12;
+ int32_t r = mulRL(0, rl, vrl) >> 12;
+ *out++ = (r<<16) | (l & 0xFFFF);
+ } while (--outFrames);
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
+ }
+ numFrames -= b.frameCount;
+ t->bufferProvider->releaseBuffer(&b);
+ }
+}
+
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr). Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+ (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+ typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+ const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+ switch (channels) {
+ case 1:
+ volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 2:
+ volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 3:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 4:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 5:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 6:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 7:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 8:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ }
+}
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+ typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+ const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+ switch (channels) {
+ case 1:
+ volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 2:
+ volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 3:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 4:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 5:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 6:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 7:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 8:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+ break;
+ }
+}
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+ typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::volumeMix(TO *out, size_t outFrames,
+ const TI *in, TA *aux, bool ramp)
+{
+ if (USEFLOATVOL) {
+ if (ramp) {
+ volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ mPrevVolume, mVolumeInc,
+#ifdef FLOAT_AUX
+ &mPrevAuxLevel, mAuxInc
+#else
+ &prevAuxLevel, auxInc
+#endif
+ );
+ if (ADJUSTVOL) {
+ adjustVolumeRamp(aux != NULL, true);
+ }
+ } else {
+ volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ mVolume,
+#ifdef FLOAT_AUX
+ mAuxLevel
+#else
+ auxLevel
+#endif
+ );
+ }
+ } else {
+ if (ramp) {
+ volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ prevVolume, volumeInc, &prevAuxLevel, auxInc);
+ if (ADJUSTVOL) {
+ adjustVolumeRamp(aux != NULL);
+ }
+ } else {
+ volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ volume, auxLevel);
+ }
+ }
+}
+
+/* This process hook is called when there is a single track without
+ * aux buffer, volume ramp, or resampling.
+ * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::process__noResampleOneTrack()
+{
+ ALOGVV("process__noResampleOneTrack\n");
+ LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
+ "%zu != 1 tracks enabled", mEnabled.size());
+ const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+ const uint32_t channels = t->mMixerChannelCount;
+ TO* out = reinterpret_cast<TO*>(t->mainBuffer);
+ TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
+ const bool ramp = t->needsRamp();
+
+ for (size_t numFrames = mFrameCount; numFrames > 0; ) {
+ AudioBufferProvider::Buffer& b(t->buffer);
+ // get input buffer
+ b.frameCount = numFrames;
+ t->bufferProvider->getNextBuffer(&b);
+ const TI *in = reinterpret_cast<TI*>(b.raw);
+
+ // in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (in == NULL || (((uintptr_t)in) & 3)) {
+ memset(out, 0, numFrames
+ * channels * audio_bytes_per_sample(t->mMixerFormat));
+ ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
+ "buffer %p track %p, channels %d, needs %#x",
+ in, &t, t->channelCount, t->needs);
+ return;
+ }
+
+ const size_t outFrames = b.frameCount;
+ t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
+ out, outFrames, in, aux, ramp);
+
+ out += outFrames * channels;
+ if (aux != NULL) {
+ aux += outFrames;
+ }
+ numFrames -= b.frameCount;
+
+ // release buffer
+ t->bufferProvider->releaseBuffer(&b);
+ }
+ if (ramp) {
+ t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
+ }
+}
+
+/* This track hook is called to do resampling then mixing,
+ * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
+{
+ ALOGVV("track__Resample\n");
+ mResampler->setSampleRate(sampleRate);
+ const bool ramp = needsRamp();
+ if (ramp || aux != NULL) {
+ // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
+ // if aux != NULL: resample with unity gain to temp buffer then apply send level.
+
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
+ mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
+
+ volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+ out, outFrameCount, temp, aux, ramp);
+
+ } else { // constant volume gain
+ mResampler->setVolume(mVolume[0], mVolume[1]);
+ mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
+ }
+}
+
+/* This track hook is called to mix a track, when no resampling is required.
+ * The input buffer should be present in in.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__NoResample(
+ TO* out, size_t frameCount, TO* temp __unused, TA* aux)
+{
+ ALOGVV("track__NoResample\n");
+ const TI *in = static_cast<const TI *>(mIn);
+
+ volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+ out, frameCount, in, aux, needsRamp());
+
+ // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
+ // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
+ in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
+ mIn = in;
+}
+
+/* The Mixer engine generates either int32_t (Q4_27) or float data.
+ * We use this function to convert the engine buffers
+ * to the desired mixer output format, either int16_t (Q.15) or float.
+ */
+/* static */
+void AudioMixerBase::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+ void *in, audio_format_t mixerInFormat, size_t sampleCount)
+{
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+}
+
+/* Returns the proper track hook to use for mixing the track into the output buffer.
+ */
+/* static */
+AudioMixerBase::hook_t AudioMixerBase::TrackBase::getTrackHook(int trackType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
+{
+ if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ switch (trackType) {
+ case TRACKTYPE_NOP:
+ return &TrackBase::track__nop;
+ case TRACKTYPE_RESAMPLE:
+ return &TrackBase::track__genericResample;
+ case TRACKTYPE_NORESAMPLEMONO:
+ return &TrackBase::track__16BitsMono;
+ case TRACKTYPE_NORESAMPLE:
+ return &TrackBase::track__16BitsStereo;
+ default:
+ LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+ break;
+ }
+ }
+ LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+ switch (trackType) {
+ case TRACKTYPE_NOP:
+ return &TrackBase::track__nop;
+ case TRACKTYPE_RESAMPLE:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+ MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+ MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ case TRACKTYPE_NORESAMPLEMONO:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+ MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+ MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ case TRACKTYPE_NORESAMPLE:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+ MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+ MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+ break;
+ }
+ return NULL;
+}
+
+/* Returns the proper process hook for mixing tracks. Currently works only for
+ * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO. This should be
+ * prevented by the caller.
+ */
+/* static */
+AudioMixerBase::process_hook_t AudioMixerBase::getProcessHook(
+ int processType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
+{
+ if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
+ LOG_ALWAYS_FATAL("bad processType: %d", processType);
+ return NULL;
+ }
+ if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ return &AudioMixerBase::process__oneTrack16BitsStereoNoResampling;
+ }
+ LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return &AudioMixerBase::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return &AudioMixerBase::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return &AudioMixerBase::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return &AudioMixerBase::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ return NULL;
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
new file mode 100644
index 0000000..3f7cd48
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -0,0 +1,238 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_H
+#define ANDROID_AUDIO_MIXER_H
+
+#include <pthread.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/os/IExternalVibratorService.h>
+#include <media/AudioMixerBase.h>
+#include <media/BufferProviders.h>
+#include <utils/threads.h>
+
+// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
+#define MAX_GAIN_INT AudioMixerBase::UNITY_GAIN_INT
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixer extends AudioMixerBase by adding support for down- and up-mixing
+// and time stretch that are implemented via Effects HAL, and adding support
+// for haptic channels which depends on Vibrator service. This is the version
+// that is used by Audioflinger.
+
+class AudioMixer : public AudioMixerBase
+{
+public:
+ // maximum number of channels supported for the content
+ static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
+
+ enum { // extension of AudioMixerBase parameters
+ DOWNMIX_TYPE = 0x4004,
+ // for haptic
+ HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
+ HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+ // for target TIMESTRETCH
+ PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
+ // parameter 'value' is a pointer to the new playback rate.
+ };
+
+ typedef enum { // Haptic intensity, should keep consistent with VibratorService
+ HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
+ HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
+ HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
+ HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
+ HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
+ HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
+ } haptic_intensity_t;
+ static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
+ static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
+ static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
+
+ static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
+ switch (hapticIntensity) {
+ case HAPTIC_SCALE_MUTE:
+ case HAPTIC_SCALE_VERY_LOW:
+ case HAPTIC_SCALE_LOW:
+ case HAPTIC_SCALE_NONE:
+ case HAPTIC_SCALE_HIGH:
+ case HAPTIC_SCALE_VERY_HIGH:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ AudioMixer(size_t frameCount, uint32_t sampleRate)
+ : AudioMixerBase(frameCount, sampleRate) {
+ pthread_once(&sOnceControl, &sInitRoutine);
+ }
+
+ bool isValidChannelMask(audio_channel_mask_t channelMask) const override;
+
+ void setParameter(int name, int target, int param, void *value) override;
+ void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
+
+private:
+
+ struct Track : public TrackBase {
+ Track() : TrackBase() {}
+
+ ~Track()
+ {
+ // mInputBufferProvider need not be deleted.
+ // Ensure the order of destruction of buffer providers as they
+ // release the upstream provider in the destructor.
+ mTimestretchBufferProvider.reset(nullptr);
+ mPostDownmixReformatBufferProvider.reset(nullptr);
+ mDownmixerBufferProvider.reset(nullptr);
+ mReformatBufferProvider.reset(nullptr);
+ mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
+ mAdjustChannelsBufferProvider.reset(nullptr);
+ }
+
+ uint32_t getOutputChannelCount() override {
+ return mDownmixerBufferProvider.get() != nullptr ? mMixerChannelCount : channelCount;
+ }
+ uint32_t getMixerChannelCount() override {
+ return mMixerChannelCount + mMixerHapticChannelCount;
+ }
+
+ status_t prepareForDownmix();
+ void unprepareForDownmix();
+ status_t prepareForReformat();
+ void unprepareForReformat();
+ status_t prepareForAdjustChannels();
+ void unprepareForAdjustChannels();
+ status_t prepareForAdjustChannelsNonDestructive(size_t frames);
+ void unprepareForAdjustChannelsNonDestructive();
+ void clearContractedBuffer();
+ bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
+ void reconfigureBufferProviders();
+
+ /* Buffer providers are constructed to translate the track input data as needed.
+ * See DownmixerBufferProvider below for how the Track buffer provider
+ * is wrapped by another one when dowmixing is required.
+ *
+ * TODO: perhaps make a single PlaybackConverterProvider class to move
+ * all pre-mixer track buffer conversions outside the AudioMixer class.
+ *
+ * 1) mInputBufferProvider: The AudioTrack buffer provider.
+ * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+ * channel format to another. Expanded channels are filled with zeros and put at the end
+ * of each audio frame. Contracted channels are copied to the end of the buffer.
+ * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+ * This is currently using at audio-haptic coupled playback to separate audio and haptic
+ * data. Contracted channels could be written to given buffer.
+ * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+ * requires reformat. For example, it may convert floating point input to
+ * PCM_16_bit if that's required by the downmixer.
+ * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * the number of channels required by the mixer sink.
+ * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * the downmixer requirements to the mixer engine input requirements.
+ * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+ */
+ AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
+ // TODO: combine mAdjustChannelsBufferProvider and
+ // mContractChannelsNonDestructiveBufferProvider
+ std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
+
+ audio_format_t mDownmixRequiresFormat; // required downmixer format
+ // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+ // AUDIO_FORMAT_INVALID if no required format
+
+ AudioPlaybackRate mPlaybackRate;
+
+ // Haptic
+ bool mHapticPlaybackEnabled;
+ haptic_intensity_t mHapticIntensity;
+ audio_channel_mask_t mHapticChannelMask;
+ uint32_t mHapticChannelCount;
+ audio_channel_mask_t mMixerHapticChannelMask;
+ uint32_t mMixerHapticChannelCount;
+ uint32_t mAdjustInChannelCount;
+ uint32_t mAdjustOutChannelCount;
+ uint32_t mAdjustNonDestructiveInChannelCount;
+ uint32_t mAdjustNonDestructiveOutChannelCount;
+ bool mKeepContractedChannels;
+
+ float getHapticScaleGamma() const {
+ // Need to keep consistent with the value in VibratorService.
+ switch (mHapticIntensity) {
+ case HAPTIC_SCALE_VERY_LOW:
+ return 2.0f;
+ case HAPTIC_SCALE_LOW:
+ return 1.5f;
+ case HAPTIC_SCALE_HIGH:
+ return 0.5f;
+ case HAPTIC_SCALE_VERY_HIGH:
+ return 0.25f;
+ default:
+ return 1.0f;
+ }
+ }
+
+ float getHapticMaxAmplitudeRatio() const {
+ // Need to keep consistent with the value in VibratorService.
+ switch (mHapticIntensity) {
+ case HAPTIC_SCALE_VERY_LOW:
+ return HAPTIC_SCALE_VERY_LOW_RATIO;
+ case HAPTIC_SCALE_LOW:
+ return HAPTIC_SCALE_LOW_RATIO;
+ case HAPTIC_SCALE_NONE:
+ case HAPTIC_SCALE_HIGH:
+ case HAPTIC_SCALE_VERY_HIGH:
+ return 1.0f;
+ default:
+ return 0.0f;
+ }
+ }
+ };
+
+ inline std::shared_ptr<Track> getTrack(int name) {
+ return std::static_pointer_cast<Track>(mTracks[name]);
+ }
+
+ std::shared_ptr<TrackBase> preCreateTrack() override;
+ status_t postCreateTrack(TrackBase *track) override;
+
+ void preProcess() override;
+ void postProcess() override;
+
+ bool setChannelMasks(int name,
+ audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) override;
+
+ static void sInitRoutine();
+
+ static pthread_once_t sOnceControl; // initialized in constructor by first new
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
new file mode 100644
index 0000000..805b6d0
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -0,0 +1,359 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_BASE_H
+#define ANDROID_AUDIO_MIXER_BASE_H
+
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+#include <media/AudioResamplerPublic.h>
+#include <system/audio.h>
+#include <utils/Compat.h>
+
+// This must match frameworks/av/services/audioflinger/Configuration.h
+// when used with the Audio Framework.
+#define FLOAT_AUX
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixerBase is functional on its own if only mixing and resampling
+// is needed.
+
+class AudioMixerBase
+{
+public:
+ // Do not change these unless underlying code changes.
+ // This mixer has a hard-coded upper limit of 8 channels for output.
+ static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+ static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
+
+ static const uint16_t UNITY_GAIN_INT = 0x1000;
+ static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
+
+ enum { // names
+ // setParameter targets
+ TRACK = 0x3000,
+ RESAMPLE = 0x3001,
+ RAMP_VOLUME = 0x3002, // ramp to new volume
+ VOLUME = 0x3003, // don't ramp
+ TIMESTRETCH = 0x3004,
+
+ // set Parameter names
+ // for target TRACK
+ CHANNEL_MASK = 0x4000,
+ FORMAT = 0x4001,
+ MAIN_BUFFER = 0x4002,
+ AUX_BUFFER = 0x4003,
+ // 0x4004 reserved
+ MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+ // for target RESAMPLE
+ SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
+ // parameter 'value' is the new sample rate in Hz.
+ // Only creates a sample rate converter the first time that
+ // the track sample rate is different from the mix sample rate.
+ // If the new sample rate is the same as the mix sample rate,
+ // and a sample rate converter already exists,
+ // then the sample rate converter remains present but is a no-op.
+ RESET = 0x4101, // Reset sample rate converter without changing sample rate.
+ // This clears out the resampler's input buffer.
+ REMOVE = 0x4102, // Remove the sample rate converter on this track name;
+ // the track is restored to the mix sample rate.
+ // for target RAMP_VOLUME and VOLUME (8 channels max)
+ // FIXME use float for these 3 to improve the dynamic range
+ VOLUME0 = 0x4200,
+ VOLUME1 = 0x4201,
+ AUXLEVEL = 0x4210,
+ };
+
+ AudioMixerBase(size_t frameCount, uint32_t sampleRate)
+ : mSampleRate(sampleRate)
+ , mFrameCount(frameCount) {
+ }
+
+ virtual ~AudioMixerBase() {}
+
+ virtual bool isValidFormat(audio_format_t format) const;
+ virtual bool isValidChannelMask(audio_channel_mask_t channelMask) const;
+
+ // Create a new track in the mixer.
+ //
+ // \param name a unique user-provided integer associated with the track.
+ // If name already exists, the function will abort.
+ // \param channelMask output channel mask.
+ // \param format PCM format
+ // \param sessionId Session id for the track. Tracks with the same
+ // session id will be submixed together.
+ //
+ // \return OK on success.
+ // BAD_VALUE if the format does not satisfy isValidFormat()
+ // or the channelMask does not satisfy isValidChannelMask().
+ status_t create(
+ int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
+
+ bool exists(int name) const {
+ return mTracks.count(name) > 0;
+ }
+
+ // Free an allocated track by name.
+ void destroy(int name);
+
+ // Enable or disable an allocated track by name
+ void enable(int name);
+ void disable(int name);
+
+ virtual void setParameter(int name, int target, int param, void *value);
+
+ void process() {
+ preProcess();
+ (this->*mHook)();
+ postProcess();
+ }
+
+ size_t getUnreleasedFrames(int name) const;
+
+ std::string trackNames() const;
+
+ protected:
+ // Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+ // original code will be used for stereo sinks, the new mixer for everything else.
+ static constexpr bool kUseNewMixer = true;
+
+ // Set kUseFloat to true to allow floating input into the mixer engine.
+ // If kUseNewMixer is false, this is ignored or may be overridden internally
+ static constexpr bool kUseFloat = true;
+
+#ifdef FLOAT_AUX
+ using TYPE_AUX = float;
+ static_assert(kUseNewMixer && kUseFloat,
+ "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
+#else
+ using TYPE_AUX = int32_t; // q4.27
+#endif
+
+ /* For multi-format functions (calls template functions
+ * in AudioMixerOps.h). The template parameters are as follows:
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+
+ enum {
+ // FIXME this representation permits up to 8 channels
+ NEEDS_CHANNEL_COUNT__MASK = 0x00000007,
+ };
+
+ enum {
+ NEEDS_CHANNEL_1 = 0x00000000, // mono
+ NEEDS_CHANNEL_2 = 0x00000001, // stereo
+
+ // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
+
+ NEEDS_MUTE = 0x00000100,
+ NEEDS_RESAMPLE = 0x00001000,
+ NEEDS_AUX = 0x00010000,
+ };
+
+ // hook types
+ enum {
+ PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
+ };
+
+ enum {
+ TRACKTYPE_NOP,
+ TRACKTYPE_RESAMPLE,
+ TRACKTYPE_NORESAMPLE,
+ TRACKTYPE_NORESAMPLEMONO,
+ };
+
+ // process hook functionality
+ using process_hook_t = void(AudioMixerBase::*)();
+
+ struct TrackBase;
+ using hook_t = void(TrackBase::*)(
+ int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+
+ struct TrackBase {
+ TrackBase()
+ : bufferProvider(nullptr)
+ {
+ // TODO: move additional initialization here.
+ }
+ virtual ~TrackBase() {}
+
+ virtual uint32_t getOutputChannelCount() { return channelCount; }
+ virtual uint32_t getMixerChannelCount() { return mMixerChannelCount; }
+
+ bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
+ bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
+ bool doesResample() const { return mResampler.get() != nullptr; }
+ void recreateResampler(uint32_t devSampleRate);
+ void resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
+ void adjustVolumeRamp(bool aux, bool useFloat = false);
+ size_t getUnreleasedFrames() const { return mResampler.get() != nullptr ?
+ mResampler->getUnreleasedFrames() : 0; };
+
+ static hook_t getTrackHook(int trackType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+ void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+ template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+ typename TO, typename TI, typename TA>
+ void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
+
+ uint32_t needs;
+
+ // TODO: Eventually remove legacy integer volume settings
+ union {
+ int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
+ int32_t volumeRL;
+ };
+
+ int32_t prevVolume[MAX_NUM_VOLUMES];
+ int32_t volumeInc[MAX_NUM_VOLUMES];
+ int32_t auxInc;
+ int32_t prevAuxLevel;
+ int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
+
+ uint16_t frameCount;
+
+ uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+ uint8_t unused_padding; // formerly format, was always 16
+ uint16_t enabled; // actually bool
+ audio_channel_mask_t channelMask;
+
+ // actual buffer provider used by the track hooks
+ AudioBufferProvider* bufferProvider;
+
+ mutable AudioBufferProvider::Buffer buffer; // 8 bytes
+
+ hook_t hook;
+ const void *mIn; // current location in buffer
+
+ std::unique_ptr<AudioResampler> mResampler;
+ uint32_t sampleRate;
+ int32_t* mainBuffer;
+ int32_t* auxBuffer;
+
+ int32_t sessionId;
+
+ audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ audio_format_t mFormat; // input track format
+ audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ // each track must be converted to this format.
+
+ float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
+ float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+ float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment
+
+ float mAuxLevel; // floating point set aux level
+ float mPrevAuxLevel; // floating point prev aux level
+ float mAuxInc; // floating point aux increment
+
+ audio_channel_mask_t mMixerChannelMask;
+ uint32_t mMixerChannelCount;
+
+ protected:
+
+ // hooks
+ void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+ void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+ void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+
+ // multi-format track hooks
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+ };
+
+ // preCreateTrack must create an instance of a proper TrackBase descendant.
+ // postCreateTrack is called after filling out fields of TrackBase. It can
+ // abort track creation by returning non-OK status. See the implementation
+ // of create() for details.
+ virtual std::shared_ptr<TrackBase> preCreateTrack();
+ virtual status_t postCreateTrack(TrackBase *track __unused) { return OK; }
+
+ // preProcess is called before the process hook, postProcess after,
+ // see the implementation of process() method.
+ virtual void preProcess() {}
+ virtual void postProcess() {}
+
+ virtual bool setChannelMasks(int name,
+ audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+ // Called when track info changes and a new process hook should be determined.
+ void invalidate() {
+ mHook = &AudioMixerBase::process__validate;
+ }
+
+ void process__validate();
+ void process__nop();
+ void process__genericNoResampling();
+ void process__genericResampling();
+ void process__oneTrack16BitsStereoNoResampling();
+
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ void process__noResampleOneTrack();
+
+ static process_hook_t getProcessHook(int processType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+ static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+ void *in, audio_format_t mixerInFormat, size_t sampleCount);
+
+ // initialization constants
+ const uint32_t mSampleRate;
+ const size_t mFrameCount;
+
+ process_hook_t mHook = &AudioMixerBase::process__nop; // one of process__*, never nullptr
+
+ // the size of the type (int32_t) should be the largest of all types supported
+ // by the mixer.
+ std::unique_ptr<int32_t[]> mOutputTemp;
+ std::unique_ptr<int32_t[]> mResampleTemp;
+
+ // track names grouped by main buffer, in no particular order of main buffer.
+ // however names for a particular main buffer are in order (by construction).
+ std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
+
+ // track names that are enabled, in increasing order (by construction).
+ std::vector<int /* name */> mEnabled;
+
+ // track smart pointers, by name, in increasing order of name.
+ std::map<int /* name */, std::shared_ptr<TrackBase>> mTracks;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MIXER_BASE_H
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
similarity index 100%
rename from media/libmedia/include/media/BufferProviders.h
rename to media/libaudioprocessing/include/media/BufferProviders.h
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index 9c82b1d..2a2f36e 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -6,6 +6,7 @@
srcs: ["EffectDownmix.c"],
shared_libs: [
+ "libaudioutils",
"libcutils",
"liblog",
],
@@ -23,5 +24,4 @@
"libaudioeffects",
"libhardware_headers",
],
- static_libs: ["libaudioutils" ],
}
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 7468a90..10eedd9 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -53,6 +53,7 @@
LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
void Copy_Float_Stereo_Mc( const LVM_FLOAT *src,
+ LVM_FLOAT *StereoOut,
LVM_FLOAT *dst,
LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index 3858450..3eb3c14 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -117,30 +117,31 @@
}
}
-// Merge a multichannel source with stereo contained in dst, to dst.
+// Merge a multichannel source with stereo contained in StereoOut, to dst.
void Copy_Float_Stereo_Mc(const LVM_FLOAT *src,
+ LVM_FLOAT *StereoOut,
LVM_FLOAT *dst,
LVM_INT16 NrFrames, /* Number of frames*/
LVM_INT32 NrChannels)
{
LVM_INT16 ii, jj;
- LVM_FLOAT *src_st = dst + 2 * (NrFrames - 1);
- // repack dst which carries stereo information
+ // pack dst with stereo information of StereoOut
// together with the upper channels of src.
+ StereoOut += 2 * (NrFrames - 1);
dst += NrChannels * (NrFrames - 1);
src += NrChannels * (NrFrames - 1);
for (ii = NrFrames; ii != 0; ii--)
{
- dst[1] = src_st[1];
- dst[0] = src_st[0]; // copy 1 before 0 is required for NrChannels == 3.
+ dst[1] = StereoOut[1];
+ dst[0] = StereoOut[0]; // copy 1 before 0 is required for NrChannels == 3.
for (jj = 2; jj < NrChannels; jj++)
{
dst[jj] = src[jj];
}
dst -= NrChannels;
src -= NrChannels;
- src_st -= 2;
+ StereoOut -= 2;
}
}
#endif
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index ab8ccd1..c8df8e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -60,7 +60,11 @@
#define LVCS_COMPGAINFRAME 64 /* Compressor gain update interval */
/* Memory */
+#ifdef SUPPORT_MC
+#define LVCS_SCRATCHBUFFERS 8 /* Number of buffers required for inplace processing */
+#else
#define LVCS_SCRATCHBUFFERS 6 /* Number of buffers required for inplace processing */
+#endif
#ifdef SUPPORT_MC
/*
* The Concert Surround module applies processing only on the first two
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
index ef1d9eb..56fb04f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
@@ -106,7 +106,7 @@
* The Concert Surround module carries out processing only on L, R.
*/
pInput = pScratch + (2 * NrFrames);
- pStIn = pScratch + (LVCS_SCRATCHBUFFERS * NrFrames);
+ pStIn = pScratch + ((LVCS_SCRATCHBUFFERS - 2) * NrFrames);
/* The first two channel data is extracted from the input data and
* copied into pInput buffer
*/
@@ -303,13 +303,45 @@
*/
if (pInstance->Params.OperatingMode != LVCS_OFF)
{
+#ifdef SUPPORT_MC
+ LVM_FLOAT *pStereoOut;
+ /*
+ * LVCS_Process_CS uses output buffer to store intermediate outputs of StereoEnhancer,
+ * Equalizer, ReverbGenerator and BypassMixer.
+ * So, to avoid i/o data overlapping, when i/o buffers are common, use scratch buffer
+ * to store intermediate outputs.
+ */
+ if (pOutData == pInData)
+ {
+ /*
+ * Scratch memory is used in 4 chunks of (2 * NrFrames) size.
+ * First chunk of memory is used by LVCS_StereoEnhancer and LVCS_ReverbGenerator,
+ * second and fourth are used as input buffers by pInput and pStIn in LVCS_Process_CS.
+ * Hence, pStereoOut is pointed to use unused third portion of scratch memory.
+ */
+ pStereoOut = (LVM_FLOAT *) \
+ pInstance->MemoryTable. \
+ Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress +
+ ((LVCS_SCRATCHBUFFERS - 4) * NrFrames);
+ }
+ else
+ {
+ pStereoOut = pOutData;
+ }
+
/*
* Call CS process function
*/
err = LVCS_Process_CS(hInstance,
pInData,
+ pStereoOut,
+ NrFrames);
+#else
+ err = LVCS_Process_CS(hInstance,
+ pInData,
pOutData,
NumSamples);
+#endif
/*
@@ -329,10 +361,17 @@
if(NumSamples < LVCS_COMPGAINFRAME)
{
+#ifdef SUPPORT_MC
+ NonLinComp_Float(Gain, /* Compressor gain setting */
+ pStereoOut,
+ pStereoOut,
+ (LVM_INT32)(2 * NrFrames));
+#else
NonLinComp_Float(Gain, /* Compressor gain setting */
pOutData,
pOutData,
(LVM_INT32)(2 * NumSamples));
+#endif
}
else
{
@@ -361,7 +400,11 @@
FinalGain = Gain;
Gain = pInstance->CompressGain;
+#ifdef SUPPORT_MC
+ pOutPtr = pStereoOut;
+#else
pOutPtr = pOutData;
+#endif
while(SampleToProcess > 0)
{
@@ -428,6 +471,7 @@
}
#ifdef SUPPORT_MC
Copy_Float_Stereo_Mc(pInData,
+ pStereoOut,
pOutData,
NrFrames,
channels);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 10dda19..0a2850f 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -2710,7 +2710,7 @@
name[*pValueSize - 1] = 0;
*pValueSize = strlen(name) + 1;
ALOGVV("%s EQ_PARAM_GET_PRESET_NAME preset %d, name %s len %d",
- __func__, preset, gEqualizerPresets[preset].name, *pValueSize);
+ __func__, preset, name, *pValueSize);
} break;
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 1c95e27..060b92b 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -40,8 +40,8 @@
AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED;
const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
-const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
-const char * const AudioParameter::keyStreamDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
+const char * const AudioParameter::keyDeviceConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
+const char * const AudioParameter::keyDeviceDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
const char * const AudioParameter::keyStreamSupportedFormats = AUDIO_PARAMETER_STREAM_SUP_FORMATS;
const char * const AudioParameter::keyStreamSupportedChannels = AUDIO_PARAMETER_STREAM_SUP_CHANNELS;
const char * const AudioParameter::keyStreamSupportedSamplingRates =
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index f9fa86e..d95bc8e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -109,7 +109,7 @@
data.writeInt32(0);
} else {
// serialize the headers
- data.writeInt64(headers->size());
+ data.writeInt32(headers->size());
for (size_t i = 0; i < headers->size(); ++i) {
data.writeString8(headers->keyAt(i));
data.writeString8(headers->valueAt(i));
@@ -213,15 +213,14 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
- status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly)
+ sp<IMemory> getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly)
{
- ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
- frameIndex, numFrames, colorFormat, metaOnly);
+ ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+ index, colorFormat, metaOnly);
Parcel data, reply;
data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
- data.writeInt32(frameIndex);
- data.writeInt32(numFrames);
+ data.writeInt32(index);
data.writeInt32(colorFormat);
data.writeInt32(metaOnly);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
@@ -230,16 +229,9 @@
remote()->transact(GET_FRAME_AT_INDEX, data, &reply);
status_t ret = reply.readInt32();
if (ret != NO_ERROR) {
- return ret;
+ return NULL;
}
- int retNumFrames = reply.readInt32();
- if (retNumFrames < numFrames) {
- numFrames = retNumFrames;
- }
- for (int i = 0; i < numFrames; i++) {
- frames->push_back(interface_cast<IMemory>(reply.readStrongBinder()));
- }
- return OK;
+ return interface_cast<IMemory>(reply.readStrongBinder());
}
sp<IMemory> extractAlbumArt()
@@ -318,11 +310,22 @@
}
KeyedVector<String8, String8> headers;
- size_t numHeaders = (size_t) data.readInt64();
+ size_t numHeaders = (size_t) data.readInt32();
for (size_t i = 0; i < numHeaders; ++i) {
- String8 key = data.readString8();
- String8 value = data.readString8();
- headers.add(key, value);
+ String8 key;
+ String8 value;
+ status_t status;
+ status = data.readString8(&key);
+ if (status != OK) {
+ return status;
+ }
+ status = data.readString8(&value);
+ if (status != OK) {
+ return status;
+ }
+ if (headers.add(key, value) < 0) {
+ return UNKNOWN_ERROR;
+ }
}
reply->writeInt32(
@@ -431,24 +434,20 @@
case GET_FRAME_AT_INDEX: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
- int frameIndex = data.readInt32();
- int numFrames = data.readInt32();
+ int index = data.readInt32();
int colorFormat = data.readInt32();
bool metaOnly = (data.readInt32() != 0);
- ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
- frameIndex, numFrames, colorFormat, metaOnly);
+ ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+ index, colorFormat, metaOnly);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
setSchedPolicy(data);
#endif
- std::vector<sp<IMemory> > frames;
- status_t err = getFrameAtIndex(
- &frames, frameIndex, numFrames, colorFormat, metaOnly);
- reply->writeInt32(err);
- if (OK == err) {
- reply->writeInt32(frames.size());
- for (size_t i = 0; i < frames.size(); i++) {
- reply->writeStrongBinder(IInterface::asBinder(frames[i]));
- }
+ sp<IMemory> frame = getFrameAtIndex(index, colorFormat, metaOnly);
+ if (frame != nullptr) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(frame));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
}
#ifndef DISABLE_GROUP_SCHEDULE_HACK
restoreSchedPolicy();
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index c6f422d..28d2192 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -48,9 +48,8 @@
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) = 0;
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> > *frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+ virtual sp<IMemory> getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 98d300f..37dc401 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -49,9 +49,8 @@
int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom) = 0;
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> >* frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+ virtual sp<IMemory> getFrameAtIndex(
+ int frameIndex, int colorFormat, bool metaOnly) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index d29e97d..138a014 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -98,9 +98,8 @@
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
- status_t getFrameAtIndex(
- std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
- int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+ sp<IMemory> getFrameAtIndex(
+ int index, int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> extractAlbumArt();
const char* extractMetadata(int keyCode);
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index e61b04d..2ae76b3 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -179,18 +179,16 @@
index, colorFormat, left, top, right, bottom);
}
-status_t MediaMetadataRetriever::getFrameAtIndex(
- std::vector<sp<IMemory> > *frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
- ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
- frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> MediaMetadataRetriever::getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+ index, colorFormat, metaOnly);
Mutex::Autolock _l(mLock);
if (mRetriever == 0) {
ALOGE("retriever is not initialized");
- return INVALID_OPERATION;
+ return NULL;
}
- return mRetriever->getFrameAtIndex(
- frames, frameIndex, numFrames, colorFormat, metaOnly);
+ return mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
}
const char* MediaMetadataRetriever::extractMetadata(int keyCode)
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 15ea578..50f18f4 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -37,6 +37,11 @@
"1" ,
]
},
+
+ header_abi_checker: {
+ enabled: true,
+ symbol_file: "libmediametrics.map.txt",
+ },
}
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 40b17bf..4a3c65e 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -242,31 +242,27 @@
sp<IMemory> frame = mRetriever->getImageRectAtIndex(
index, colorFormat, left, top, right, bottom);
if (frame == NULL) {
- ALOGE("failed to extract image");
- return NULL;
+ ALOGE("failed to extract image at index %d", index);
}
return frame;
}
-status_t MetadataRetrieverClient::getFrameAtIndex(
- std::vector<sp<IMemory> > *frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
- ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
- frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> MetadataRetrieverClient::getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+ index, colorFormat, metaOnly);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
if (mRetriever == NULL) {
ALOGE("retriever is not initialized");
- return INVALID_OPERATION;
+ return NULL;
}
- status_t err = mRetriever->getFrameAtIndex(
- frames, frameIndex, numFrames, colorFormat, metaOnly);
- if (err != OK) {
- frames->clear();
- return err;
+ sp<IMemory> frame = mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+ if (frame == NULL) {
+ ALOGE("failed to extract frame at index %d", index);
}
- return OK;
+ return frame;
}
sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index 272d093..8020441 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -56,9 +56,8 @@
int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> > *frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly);
virtual sp<IMemory> extractAlbumArt();
virtual const char* extractMetadata(int keyCode);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2f0da2d..ee463ce 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -106,16 +106,17 @@
releaseAndResetMediaBuffers();
}
-sp<AMessage> NuPlayer::Decoder::getStats() const {
+sp<AMessage> NuPlayer::Decoder::getStats() {
+ Mutex::Autolock autolock(mStatsLock);
mStats->setInt64("frames-total", mNumFramesTotal);
mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
mStats->setFloat("frame-rate-total", mFrameRateTotal);
- // i'm mutexed right now.
// make our own copy, so we aren't victim to any later changes.
sp<AMessage> copiedStats = mStats->dup();
+
return copiedStats;
}
@@ -362,13 +363,17 @@
CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
- mStats->setString("mime", mime.c_str());
- mStats->setString("component-name", mComponentName.c_str());
+ {
+ Mutex::Autolock autolock(mStatsLock);
+ mStats->setString("mime", mime.c_str());
+ mStats->setString("component-name", mComponentName.c_str());
+ }
if (!mIsAudio) {
int32_t width, height;
if (mOutputFormat->findInt32("width", &width)
&& mOutputFormat->findInt32("height", &height)) {
+ Mutex::Autolock autolock(mStatsLock);
mStats->setInt32("width", width);
mStats->setInt32("height", height);
}
@@ -799,6 +804,7 @@
int32_t width, height;
if (format->findInt32("width", &width)
&& format->findInt32("height", &height)) {
+ Mutex::Autolock autolock(mStatsLock);
mStats->setInt32("width", width);
mStats->setInt32("height", height);
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 3da2f0b..4a52b0c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -34,7 +34,7 @@
const sp<Surface> &surface = NULL,
const sp<CCDecoder> &ccDecoder = NULL);
- virtual sp<AMessage> getStats() const;
+ virtual sp<AMessage> getStats();
// sets the output surface of video decoders.
virtual status_t setVideoSurface(const sp<Surface> &surface);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index d44c396..a3e0046 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -47,7 +47,7 @@
void signalResume(bool notifyComplete);
void initiateShutdown();
- virtual sp<AMessage> getStats() const {
+ virtual sp<AMessage> getStats() {
return mStats;
}
@@ -88,6 +88,7 @@
int32_t mBufferGeneration;
bool mPaused;
sp<AMessage> mStats;
+ Mutex mStatsLock;
private:
enum {
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 18a6bd8..c6ec6de 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -21,6 +21,7 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
#include <inttypes.h>
#include <media/ICrypto.h>
#include <media/IMediaSource.h>
@@ -28,6 +29,7 @@
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
@@ -44,7 +46,7 @@
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
- int32_t dstBpp, bool metaOnly = false) {
+ int32_t dstBpp, bool allocRotated, bool metaOnly) {
int32_t rotationAngle;
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
@@ -74,6 +76,14 @@
displayHeight = height;
}
+ if (allocRotated && (rotationAngle == 90 || rotationAngle == 270)) {
+ int32_t tmp;
+ tmp = width; width = height; height = tmp;
+ tmp = displayWidth; displayWidth = displayHeight; displayHeight = tmp;
+ tmp = tileWidth; tileWidth = tileHeight; tileHeight = tmp;
+ rotationAngle = 0;
+ }
+
VideoFrame frame(width, height, displayWidth, displayHeight,
tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
@@ -94,6 +104,20 @@
return frameMem;
}
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
+ int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+ int32_t dstBpp, bool allocRotated = false) {
+ return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+ allocRotated, false /*metaOnly*/);
+}
+
+sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
+ int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+ int32_t dstBpp) {
+ return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+ false /*allocRotated*/, true /*metaOnly*/);
+}
+
bool findThumbnailInfo(
const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
@@ -117,23 +141,27 @@
bool getDstColorFormat(
android_pixel_format_t colorFormat,
OMX_COLOR_FORMATTYPE *dstFormat,
+ ui::PixelFormat *captureFormat,
int32_t *dstBpp) {
switch (colorFormat) {
case HAL_PIXEL_FORMAT_RGB_565:
{
*dstFormat = OMX_COLOR_Format16bitRGB565;
+ *captureFormat = ui::PixelFormat::RGB_565;
*dstBpp = 2;
return true;
}
case HAL_PIXEL_FORMAT_RGBA_8888:
{
*dstFormat = OMX_COLOR_Format32BitRGBA8888;
+ *captureFormat = ui::PixelFormat::RGBA_8888;
*dstBpp = 4;
return true;
}
case HAL_PIXEL_FORMAT_BGRA_8888:
{
*dstFormat = OMX_COLOR_Format32bitBGRA8888;
+ *captureFormat = ui::PixelFormat::BGRA_8888;
*dstBpp = 4;
return true;
}
@@ -150,9 +178,10 @@
sp<IMemory> FrameDecoder::getMetadataOnly(
const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
OMX_COLOR_FORMATTYPE dstFormat;
+ ui::PixelFormat captureFormat;
int32_t dstBpp;
- if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+ if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+ &dstFormat, &captureFormat, &dstBpp)) {
return NULL;
}
@@ -170,8 +199,7 @@
tileWidth = tileHeight = 0;
}
}
- return allocVideoFrame(trackMeta,
- width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
+ return allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
}
FrameDecoder::FrameDecoder(
@@ -194,15 +222,30 @@
}
}
+bool isHDR(const sp<AMessage> &format) {
+ uint32_t standard, range, transfer;
+ if (!format->findInt32("color-standard", (int32_t*)&standard)) {
+ standard = 0;
+ }
+ if (!format->findInt32("color-range", (int32_t*)&range)) {
+ range = 0;
+ }
+ if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
+ transfer = 0;
+ }
+ return standard == ColorUtils::kColorStandardBT2020 &&
+ transfer == ColorUtils::kColorTransferST2084;
+}
+
status_t FrameDecoder::init(
- int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
- if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
+ int64_t frameTimeUs, int option, int colorFormat) {
+ if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+ &mDstFormat, &mCaptureFormat, &mDstBpp)) {
return ERROR_UNSUPPORTED;
}
sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
- frameTimeUs, numFrames, option, &mReadOptions);
+ frameTimeUs, option, &mReadOptions, &mSurface);
if (videoFormat == NULL) {
ALOGE("video format or seek mode not supported");
return ERROR_UNSUPPORTED;
@@ -219,7 +262,7 @@
}
err = decoder->configure(
- videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+ videoFormat, mSurface, NULL /* crypto */, 0 /* flags */);
if (err != OK) {
ALOGW("configure returned error %d (%s)", err, asString(err));
decoder->release();
@@ -253,19 +296,7 @@
return NULL;
}
- return mFrames.size() > 0 ? mFrames[0] : NULL;
-}
-
-status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
- status_t err = extractInternal();
- if (err != OK) {
- return err;
- }
-
- for (size_t i = 0; i < mFrames.size(); i++) {
- frames->push_back(mFrames[i]);
- }
- return OK;
+ return mFrameMemory;
}
status_t FrameDecoder::extractInternal() {
@@ -379,8 +410,13 @@
ALOGE("failed to get output buffer %zu", index);
break;
}
- err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
- mDecoder->releaseOutputBuffer(index);
+ if (mSurface != nullptr) {
+ mDecoder->renderOutputBufferAndRelease(index);
+ err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ } else {
+ err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ mDecoder->releaseOutputBuffer(index);
+ }
} else {
ALOGW("Received error %d (%s) instead of output", err, asString(err));
done = true;
@@ -404,22 +440,22 @@
const sp<MetaData> &trackMeta,
const sp<IMediaSource> &source)
: FrameDecoder(componentName, trackMeta, source),
+ mFrame(NULL),
mIsAvcOrHevc(false),
mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
- mTargetTimeUs(-1LL),
- mNumFrames(0),
- mNumFramesDecoded(0) {
+ mTargetTimeUs(-1LL) {
}
sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
- int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
+ int64_t frameTimeUs, int seekMode,
+ MediaSource::ReadOptions *options,
+ sp<Surface> *window) {
mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
ALOGE("Unknown seek mode: %d", mSeekMode);
return NULL;
}
- mNumFrames = numFrames;
const char *mime;
if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
@@ -460,6 +496,16 @@
videoFormat->setInt32("android._num-input-buffers", 1);
videoFormat->setInt32("android._num-output-buffers", 1);
}
+
+ if (isHDR(videoFormat)) {
+ *window = initSurfaceControl();
+ if (*window == NULL) {
+ ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
+ } else {
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+ }
+ }
+
return videoFormat;
}
@@ -495,7 +541,7 @@
return OK;
}
- *done = (++mNumFramesDecoded >= mNumFrames);
+ *done = true;
if (outputFormat == NULL) {
return ERROR_MALFORMED;
@@ -504,13 +550,22 @@
int32_t width, height, stride, srcFormat;
if (!outputFormat->findInt32("width", &width) ||
!outputFormat->findInt32("height", &height) ||
- !outputFormat->findInt32("stride", &stride) ||
!outputFormat->findInt32("color-format", &srcFormat)) {
ALOGE("format missing dimension or color: %s",
outputFormat->debugString().c_str());
return ERROR_MALFORMED;
}
+ if (!outputFormat->findInt32("stride", &stride)) {
+ if (mSurfaceControl == NULL) {
+ ALOGE("format must have stride for byte buffer mode: %s",
+ outputFormat->debugString().c_str());
+ return ERROR_MALFORMED;
+ }
+ // for surface output, set stride to width, we don't actually need it.
+ stride = width;
+ }
+
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
crop_left = crop_top = 0;
@@ -518,15 +573,23 @@
crop_bottom = height - 1;
}
- sp<IMemory> frameMem = allocVideoFrame(
- trackMeta(),
- (crop_right - crop_left + 1),
- (crop_bottom - crop_top + 1),
- 0,
- 0,
- dstBpp());
- addFrame(frameMem);
- VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
+ if (mFrame == NULL) {
+ sp<IMemory> frameMem = allocVideoFrame(
+ trackMeta(),
+ (crop_right - crop_left + 1),
+ (crop_bottom - crop_top + 1),
+ 0,
+ 0,
+ dstBpp(),
+ mSurfaceControl != nullptr /*allocRotated*/);
+ mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+
+ setFrame(frameMem);
+ }
+
+ if (mSurfaceControl != nullptr) {
+ return captureSurfaceControl();
+ }
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
@@ -547,8 +610,8 @@
(const uint8_t *)videoFrameBuffer->data(),
width, height, stride,
crop_left, crop_top, crop_right, crop_bottom,
- frame->getFlattenedData(),
- frame->mWidth, frame->mHeight, frame->mRowBytes,
+ mFrame->getFlattenedData(),
+ mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
crop_left, crop_top, crop_right, crop_bottom);
return OK;
}
@@ -558,6 +621,101 @@
return ERROR_UNSUPPORTED;
}
+sp<Surface> VideoFrameDecoder::initSurfaceControl() {
+ sp<SurfaceComposerClient> client = new SurfaceComposerClient();
+ if (client->initCheck() != NO_ERROR) {
+ ALOGE("failed to get SurfaceComposerClient");
+ return NULL;
+ }
+
+ // create a container layer to hold the capture layer, so that we can
+ // use full frame drop. If without the container, the crop will be set
+ // to display size.
+ sp<SurfaceControl> parent = client->createSurface(
+ String8("parent"),
+ 0 /* width */, 0 /* height */,
+ PIXEL_FORMAT_RGBA_8888,
+ ISurfaceComposerClient::eFXSurfaceContainer );
+
+ if (!parent) {
+ ALOGE("failed to get surface control parent");
+ return NULL;
+ }
+
+ // create the surface with unknown size 1x1 for now, real size will
+ // be set before the capture when we have output format info.
+ sp<SurfaceControl> surfaceControl = client->createSurface(
+ String8("thumbnail"),
+ 1 /* width */, 1 /* height */,
+ PIXEL_FORMAT_RGBA_8888,
+ ISurfaceComposerClient::eFXSurfaceBufferQueue,
+ parent.get());
+
+ if (!surfaceControl) {
+ ALOGE("failed to get surface control");
+ return NULL;
+ }
+
+ SurfaceComposerClient::Transaction t;
+ t.hide(parent)
+ .show(surfaceControl)
+ .apply(true);
+
+ mSurfaceControl = surfaceControl;
+ mParent = parent;
+
+ return surfaceControl->getSurface();
+}
+
+status_t VideoFrameDecoder::captureSurfaceControl() {
+ // set the layer size to the output size before the capture
+ SurfaceComposerClient::Transaction()
+ .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
+ .apply(true);
+
+ sp<GraphicBuffer> outBuffer;
+ status_t err = ScreenshotClient::captureChildLayers(
+ mParent->getHandle(),
+ ui::Dataspace::V0_SRGB,
+ captureFormat(),
+ Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
+ {},
+ 1.0f /*frameScale*/,
+ &outBuffer);
+
+ if (err != OK) {
+ ALOGE("failed to captureLayers: err %d", err);
+ return err;
+ }
+
+ ALOGV("capture: %dx%d, format %d, stride %d",
+ outBuffer->getWidth(),
+ outBuffer->getHeight(),
+ outBuffer->getPixelFormat(),
+ outBuffer->getStride());
+
+ uint8_t *base;
+ int32_t outBytesPerPixel, outBytesPerStride;
+ err = outBuffer->lock(
+ GraphicBuffer::USAGE_SW_READ_OFTEN,
+ reinterpret_cast<void**>(&base),
+ &outBytesPerPixel,
+ &outBytesPerStride);
+ if (err != OK) {
+ ALOGE("failed to lock graphic buffer: err %d", err);
+ return err;
+ }
+
+ uint8_t *dst = mFrame->getFlattenedData();
+ for (size_t y = 0 ; y < fmin(mFrame->mHeight, outBuffer->getHeight()) ; y++) {
+ memcpy(dst, base, fmin(mFrame->mWidth, outBuffer->getWidth()) * mFrame->mBytesPerPixel);
+ dst += mFrame->mRowBytes;
+ base += outBuffer->getStride() * mFrame->mBytesPerPixel;
+ }
+ outBuffer->unlock();
+ return OK;
+}
+
////////////////////////////////////////////////////////////////////////
ImageDecoder::ImageDecoder(
@@ -577,8 +735,8 @@
}
sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
- int64_t frameTimeUs, size_t /*numFrames*/,
- int /*seekMode*/, MediaSource::ReadOptions *options) {
+ int64_t frameTimeUs, int /*seekMode*/,
+ MediaSource::ReadOptions *options, sp<Surface> * /*window*/) {
sp<MetaData> overrideMeta;
if (frameTimeUs < 0) {
uint32_t type;
@@ -705,7 +863,7 @@
trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
mFrame = static_cast<VideoFrame*>(frameMem->pointer());
- addFrame(frameMem);
+ setFrame(frameMem);
}
int32_t srcFormat;
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index babdc7a..8b6262f 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -36,7 +36,7 @@
using namespace android;
const int64_t kTimeoutWaitForOutputUs = 500000; // 0.5 seconds
-const int64_t kTimeoutWaitForInputUs = 5000; // 5 milliseconds
+const int64_t kTimeoutWaitForInputUs = 0; // don't wait
const int kTimeoutMaxRetries = 20;
//static
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index fa3d372..6f536a9 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -44,7 +44,7 @@
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
mAlbumArt(NULL),
- mLastImageIndex(-1) {
+ mLastDecodedIndex(-1) {
ALOGV("StagefrightMetadataRetriever()");
}
@@ -143,8 +143,8 @@
FrameRect rect = {left, top, right, bottom};
- if (mImageDecoder != NULL && index == mLastImageIndex) {
- return mImageDecoder->extractFrame(&rect);
+ if (mDecoder != NULL && index == mLastDecodedIndex) {
+ return mDecoder->extractFrame(&rect);
}
return getImageInternal(
@@ -153,6 +153,8 @@
sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
+ mDecoder.clear();
+ mLastDecodedIndex = -1;
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
@@ -227,14 +229,14 @@
const AString &componentName = matchingCodecs[i];
sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
int64_t frameTimeUs = thumbnail ? -1 : 0;
- if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+ if (decoder->init(frameTimeUs, 0 /*option*/, colorFormat) == OK) {
sp<IMemory> frame = decoder->extractFrame(rect);
if (frame != NULL) {
if (rect != NULL) {
// keep the decoder if slice decoding
- mImageDecoder = decoder;
- mLastImageIndex = index;
+ mDecoder = decoder;
+ mLastDecodedIndex = index;
}
return frame;
}
@@ -242,6 +244,7 @@
ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
}
+ ALOGE("all codecs failed to extract frame.");
return NULL;
}
@@ -250,36 +253,40 @@
ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
timeUs, option, colorFormat, metaOnly);
- sp<IMemory> frame;
- status_t err = getFrameInternal(
- timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
- return (err == OK) ? frame : NULL;
+ return getFrameInternal(timeUs, option, colorFormat, metaOnly);
}
-status_t StagefrightMetadataRetriever::getFrameAtIndex(
- std::vector<sp<IMemory> >* frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
- ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
- frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtIndex(
+ int frameIndex, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex %d, colorFormat: %d, metaOnly: %d",
+ frameIndex, colorFormat, metaOnly);
+ if (mDecoder != NULL && frameIndex == mLastDecodedIndex + 1) {
+ sp<IMemory> frame = mDecoder->extractFrame();
+ if (frame != nullptr) {
+ mLastDecodedIndex = frameIndex;
+ }
+ return frame;
+ }
- return getFrameInternal(
- frameIndex, numFrames, MediaSource::ReadOptions::SEEK_FRAME_INDEX,
- colorFormat, metaOnly, NULL /*outFrame*/, frames);
+ return getFrameInternal(frameIndex,
+ MediaSource::ReadOptions::SEEK_FRAME_INDEX, colorFormat, metaOnly);
}
-status_t StagefrightMetadataRetriever::getFrameInternal(
- int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
- sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
+sp<IMemory> StagefrightMetadataRetriever::getFrameInternal(
+ int64_t timeUs, int option, int colorFormat, bool metaOnly) {
+ mDecoder.clear();
+ mLastDecodedIndex = -1;
+
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
- return NO_INIT;
+ return NULL;
}
sp<MetaData> fileMeta = mExtractor->getMetaData();
if (fileMeta == NULL) {
ALOGE("extractor doesn't publish metadata, failed to initialize?");
- return NO_INIT;
+ return NULL;
}
size_t n = mExtractor->countTracks();
@@ -300,30 +307,24 @@
if (i == n) {
ALOGE("no video track found.");
- return INVALID_OPERATION;
+ return NULL;
}
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
i, MediaExtractor::kIncludeExtensiveMetaData);
if (!trackMeta) {
- return UNKNOWN_ERROR;
+ return NULL;
}
if (metaOnly) {
- if (outFrame != NULL) {
- *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
- if (*outFrame != NULL) {
- return OK;
- }
- }
- return UNKNOWN_ERROR;
+ return FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
}
sp<IMediaSource> source = mExtractor->getTrack(i);
if (source.get() == NULL) {
ALOGV("unable to instantiate video track.");
- return UNKNOWN_ERROR;
+ return NULL;
}
const void *data;
@@ -350,24 +351,22 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
sp<VideoFrameDecoder> decoder = new VideoFrameDecoder(componentName, trackMeta, source);
- if (decoder->init(timeUs, numFrames, option, colorFormat) == OK) {
- if (outFrame != NULL) {
- *outFrame = decoder->extractFrame();
- if (*outFrame != NULL) {
- return OK;
+ if (decoder->init(timeUs, option, colorFormat) == OK) {
+ sp<IMemory> frame = decoder->extractFrame();
+ if (frame != nullptr) {
+ // keep the decoder if seeking by frame index
+ if (option == MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
+ mDecoder = decoder;
+ mLastDecodedIndex = timeUs;
}
- } else if (outFrames != NULL) {
- status_t err = decoder->extractFrames(outFrames);
- if (err == OK) {
- return OK;
- }
+ return frame;
}
}
ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
}
ALOGE("all codecs failed to extract frame.");
- return UNKNOWN_ERROR;
+ return NULL;
}
MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index d7d871a..f35bce1 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -15,8 +15,10 @@
},
header_libs: ["libbase_headers"],
- static_libs: [
+ shared_libs: [
"libaudioutils",
+ ],
+ static_libs: [
"libFLAC",
],
}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index da86758..87e8fd4 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1426,75 +1426,90 @@
RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
UWORD32 ui_exec_done;
+ WORD32 i_num_preroll = 0;
/* Checking for end of processing */
err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DONE_QUERY,
&ui_exec_done);
RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
-#ifdef ENABLE_MPEG_D_DRC
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+ &i_num_preroll);
+
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
{
- if (ui_exec_done != 1) {
- VOID* p_array; // ITTIAM:buffer to handle gain payload
- WORD32 buf_size = 0; // ITTIAM:gain payload length
- WORD32 bit_str_fmt = 1;
- WORD32 gain_stream_flag = 1;
+ int32_t pi_preroll_frame_offset = 0;
+ do {
+#ifdef ENABLE_MPEG_D_DRC
+ if (ui_exec_done != 1) {
+ VOID* p_array; // ITTIAM:buffer to handle gain payload
+ WORD32 buf_size = 0; // ITTIAM:gain payload length
+ WORD32 bit_str_fmt = 1;
+ WORD32 gain_stream_flag = 1;
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
- IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
- RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
- IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
- RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
- if (buf_size > 0) {
- /*Set bitstream_split_format */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
- IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+ if (buf_size > 0) {
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
- memcpy(mDrcInBuf, p_array, buf_size);
- /* Set number of bytes to be processed */
- err_code =
- ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+ memcpy(mDrcInBuf, p_array, buf_size);
+ /* Set number of bytes to be processed */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS,
+ 0, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
- IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG,
+ &gain_stream_flag);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
- /* Execute process */
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
- IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
- RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
- mMpegDDRCPresent = 1;
+ mMpegDDRCPresent = 1;
+ }
}
- }
- }
#endif
- /* How much buffer is used in input buffers */
- err_code =
- ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF, 0, bytesConsumed);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+ /* How much buffer is used in input buffers */
+ err_code =
+ ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF,
+ 0, bytesConsumed);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
- /* Get the output bytes */
- err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+ /* Get the output bytes */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
#ifdef ENABLE_MPEG_D_DRC
- if (mMpegDDRCPresent == 1) {
- memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
- err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
- RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+ if (mMpegDDRCPresent == 1) {
+ memcpy(mDrcInBuf, mOutputBuffer + pi_preroll_frame_offset, *outBytes);
+ pi_preroll_frame_offset += *outBytes;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES,
+ 0, outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
- err_code =
- ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, NULL);
- RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE,
+ IA_CMD_TYPE_DO_EXECUTE, NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
- memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
- }
+ memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+ }
#endif
+ i_num_preroll--;
+ } while (i_num_preroll > 0);
+ }
return IA_NO_ERROR;
}
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index d685321..c7dc415 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -324,8 +324,8 @@
}
#define DECLARE_YUV2RGBFUNC(func, rgb) int (*func)( \
- const uint8*, int, const uint8*, int, \
- const uint8*, int, uint8*, int, int, int) \
+ const uint8_t*, int, const uint8_t*, int, \
+ const uint8_t*, int, uint8_t*, int, int, int) \
= mSrcColorSpace.isBt709() ? libyuv::H420To##rgb \
: mSrcColorSpace.isJpeg() ? libyuv::J420To##rgb \
: libyuv::I420To##rgb
@@ -350,7 +350,7 @@
{
DECLARE_YUV2RGBFUNC(func, RGB565);
(*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
- (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+ (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
break;
}
@@ -358,7 +358,7 @@
{
DECLARE_YUV2RGBFUNC(func, ABGR);
(*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
- (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+ (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
break;
}
@@ -366,7 +366,7 @@
{
DECLARE_YUV2RGBFUNC(func, ARGB);
(*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
- (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+ (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
break;
}
@@ -391,17 +391,17 @@
switch (mDstFormat) {
case OMX_COLOR_Format16bitRGB565:
- libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+ libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
dst.mStride, src.cropWidth(), src.cropHeight());
break;
case OMX_COLOR_Format32bitBGRA8888:
- libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+ libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
dst.mStride, src.cropWidth(), src.cropHeight());
break;
case OMX_COLOR_Format32BitRGBA8888:
- libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+ libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
dst.mStride, src.cropWidth(), src.cropHeight());
break;
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 04041eb..a07eb8c 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -77,7 +77,7 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.android.av1.decoder" type="video/av01">
+ <MediaCodec name="c2.android.gav1.decoder" type="video/av01">
<Limit name="size" min="96x96" max="1920x1080" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 67d3f1a..9532ba6 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -182,7 +182,7 @@
</Variant>
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
+ <MediaCodec name="c2.android.gav1.decoder" type="video/av01" variant="!slow-cpu">
<Limit name="size" min="2x2" max="1920x1080" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/exports.lds b/media/libstagefright/exports.lds
index aabc233..f5ddf1e 100644
--- a/media/libstagefright/exports.lds
+++ b/media/libstagefright/exports.lds
@@ -395,7 +395,6 @@
ScaleFilterCols_NEON*;
ScaleFilterReduce;
ScaleFilterRows_NEON*;
- ScaleOffset;
ScalePlane;
ScalePlane_16;
ScalePlaneBilinearDown;
@@ -505,4 +504,8 @@
YUY2ToYRow_Any_NEON*;
YUY2ToYRow_C;
YUY2ToYRow_NEON*;
+ ogg_packet_*;
+ ogg_page_*;
+ ogg_stream_*;
+ ogg_sync_*;
};
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index b494e16..7ebe71f 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -1,4 +1,4 @@
-cc_library {
+cc_library_shared {
name: "libstagefright_flacdec",
vendor_available: true,
@@ -18,29 +18,20 @@
cfi: true,
},
- static: {
- whole_static_libs: [
- "libFLAC",
- "libaudioutils",
- ],
- },
-
- shared: {
- static_libs: [
- "libFLAC",
- "libaudioutils",
- ],
- export_static_lib_headers: [
- "libFLAC",
- ],
- },
-
shared_libs: [
+ "libaudioutils",
"liblog",
],
+ static_libs: [
+ "libFLAC",
+ ],
+
+ export_static_lib_headers: [
+ "libFLAC",
+ ],
+
header_libs: [
"libmedia_headers",
- "libFLAC-headers",
],
}
diff --git a/media/libstagefright/foundation/avc_utils.cpp b/media/libstagefright/foundation/avc_utils.cpp
index e8a6083..f53d2c9 100644
--- a/media/libstagefright/foundation/avc_utils.cpp
+++ b/media/libstagefright/foundation/avc_utils.cpp
@@ -166,10 +166,21 @@
unsigned pic_height_in_map_units_minus1 = parseUE(&br);
unsigned frame_mbs_only_flag = br.getBits(1);
- *width = pic_width_in_mbs_minus1 * 16 + 16;
+ // *width = pic_width_in_mbs_minus1 * 16 + 16;
+ if (__builtin_mul_overflow(pic_width_in_mbs_minus1, 16, &pic_width_in_mbs_minus1) ||
+ __builtin_add_overflow(pic_width_in_mbs_minus1, 16, width)) {
+ *width = 0;
+ }
- *height = (2 - frame_mbs_only_flag)
- * (pic_height_in_map_units_minus1 * 16 + 16);
+ // *height = (2 - frame_mbs_only_flag) * (pic_height_in_map_units_minus1 * 16 + 16);
+ if (__builtin_mul_overflow(
+ pic_height_in_map_units_minus1, 16, &pic_height_in_map_units_minus1) ||
+ __builtin_add_overflow(
+ pic_height_in_map_units_minus1, 16, &pic_height_in_map_units_minus1) ||
+ __builtin_mul_overflow(
+ pic_height_in_map_units_minus1, (2 - frame_mbs_only_flag), height)) {
+ *height = 0;
+ }
if (!frame_mbs_only_flag) {
br.getBits(1); // mb_adaptive_frame_field_flag
@@ -202,17 +213,19 @@
// *width -= (frame_crop_left_offset + frame_crop_right_offset) * cropUnitX;
- if(__builtin_add_overflow(frame_crop_left_offset, frame_crop_right_offset, &frame_crop_left_offset) ||
- __builtin_mul_overflow(frame_crop_left_offset, cropUnitX, &frame_crop_left_offset) ||
- __builtin_sub_overflow(*width, frame_crop_left_offset, width) ||
+ if(__builtin_add_overflow(
+ frame_crop_left_offset, frame_crop_right_offset, &frame_crop_left_offset) ||
+ __builtin_mul_overflow(frame_crop_left_offset, cropUnitX, &frame_crop_left_offset) ||
+ __builtin_sub_overflow(*width, frame_crop_left_offset, width) ||
*width < 0) {
*width = 0;
}
//*height -= (frame_crop_top_offset + frame_crop_bottom_offset) * cropUnitY;
- if(__builtin_add_overflow(frame_crop_top_offset, frame_crop_bottom_offset, &frame_crop_top_offset) ||
- __builtin_mul_overflow(frame_crop_top_offset, cropUnitY, &frame_crop_top_offset) ||
- __builtin_sub_overflow(*height, frame_crop_top_offset, height) ||
+ if(__builtin_add_overflow(
+ frame_crop_top_offset, frame_crop_bottom_offset, &frame_crop_top_offset) ||
+ __builtin_mul_overflow(frame_crop_top_offset, cropUnitY, &frame_crop_top_offset) ||
+ __builtin_sub_overflow(*height, frame_crop_top_offset, height) ||
*height < 0) {
*height = 0;
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 635ecfe..0950db0 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -2160,7 +2160,9 @@
return ERROR_MALFORMED;
}
- CHECK_LE(offset + aac_frame_length, buffer->size());
+ if (aac_frame_length > buffer->size() - offset) {
+ return ERROR_MALFORMED;
+ }
int64_t unitTimeUs = timeUs + numSamples * 1000000LL / sampleRate;
offset += aac_frame_length;
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index dc58c15..1af6276 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -24,15 +24,17 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/MediaSource.h>
#include <media/openmax/OMX_Video.h>
-#include <system/graphics-base.h>
+#include <ui/GraphicTypes.h>
namespace android {
struct AMessage;
-class MediaCodecBuffer;
-class IMediaSource;
-class VideoFrame;
struct MediaCodec;
+class IMediaSource;
+class MediaCodecBuffer;
+class Surface;
+class SurfaceControl;
+class VideoFrame;
struct FrameRect {
int32_t left, top, right, bottom;
@@ -44,13 +46,10 @@
const sp<MetaData> &trackMeta,
const sp<IMediaSource> &source);
- status_t init(
- int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
+ status_t init(int64_t frameTimeUs, int option, int colorFormat);
sp<IMemory> extractFrame(FrameRect *rect = NULL);
- status_t extractFrames(std::vector<sp<IMemory> >* frames);
-
static sp<IMemory> getMetadataOnly(
const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
@@ -59,9 +58,9 @@
virtual sp<AMessage> onGetFormatAndSeekOptions(
int64_t frameTimeUs,
- size_t numFrames,
int seekMode,
- MediaSource::ReadOptions *options) = 0;
+ MediaSource::ReadOptions *options,
+ sp<Surface> *window) = 0;
virtual status_t onExtractRect(FrameRect *rect) = 0;
@@ -79,24 +78,24 @@
sp<MetaData> trackMeta() const { return mTrackMeta; }
OMX_COLOR_FORMATTYPE dstFormat() const { return mDstFormat; }
+ ui::PixelFormat captureFormat() const { return mCaptureFormat; }
int32_t dstBpp() const { return mDstBpp; }
-
- void addFrame(const sp<IMemory> &frame) {
- mFrames.push_back(frame);
- }
+ void setFrame(const sp<IMemory> &frameMem) { mFrameMemory = frameMem; }
private:
AString mComponentName;
sp<MetaData> mTrackMeta;
sp<IMediaSource> mSource;
OMX_COLOR_FORMATTYPE mDstFormat;
+ ui::PixelFormat mCaptureFormat;
int32_t mDstBpp;
- std::vector<sp<IMemory> > mFrames;
+ sp<IMemory> mFrameMemory;
MediaSource::ReadOptions mReadOptions;
sp<MediaCodec> mDecoder;
sp<AMessage> mOutputFormat;
bool mHaveMoreInputs;
bool mFirstSample;
+ sp<Surface> mSurface;
status_t extractInternal();
@@ -112,9 +111,9 @@
protected:
virtual sp<AMessage> onGetFormatAndSeekOptions(
int64_t frameTimeUs,
- size_t numFrames,
int seekMode,
- MediaSource::ReadOptions *options) override;
+ MediaSource::ReadOptions *options,
+ sp<Surface> *window) override;
virtual status_t onExtractRect(FrameRect *rect) override {
// Rect extraction for sequences is not supported for now.
@@ -134,11 +133,15 @@
bool *done) override;
private:
+ sp<SurfaceControl> mSurfaceControl;
+ sp<SurfaceControl> mParent;
+ VideoFrame *mFrame;
bool mIsAvcOrHevc;
MediaSource::ReadOptions::SeekMode mSeekMode;
int64_t mTargetTimeUs;
- size_t mNumFrames;
- size_t mNumFramesDecoded;
+
+ sp<Surface> initSurfaceControl();
+ status_t captureSurfaceControl();
};
struct ImageDecoder : public FrameDecoder {
@@ -150,9 +153,9 @@
protected:
virtual sp<AMessage> onGetFormatAndSeekOptions(
int64_t frameTimeUs,
- size_t numFrames,
int seekMode,
- MediaSource::ReadOptions *options) override;
+ MediaSource::ReadOptions *options,
+ sp<Surface> *window) override;
virtual status_t onExtractRect(FrameRect *rect) override;
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index c50677a..ee51290 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -26,7 +26,7 @@
namespace android {
class DataSource;
-struct ImageDecoder;
+struct FrameDecoder;
struct FrameRect;
struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
@@ -47,9 +47,8 @@
int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual sp<IMemory> getImageRectAtIndex(
int index, int colorFormat, int left, int top, int right, int bottom);
- virtual status_t getFrameAtIndex(
- std::vector<sp<IMemory> >* frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getFrameAtIndex(
+ int index, int colorFormat, bool metaOnly);
virtual MediaAlbumArt *extractAlbumArt();
virtual const char *extractMetadata(int keyCode);
@@ -62,17 +61,17 @@
KeyedVector<int, String8> mMetaData;
MediaAlbumArt *mAlbumArt;
- sp<ImageDecoder> mImageDecoder;
- int mLastImageIndex;
+ sp<FrameDecoder> mDecoder;
+ int mLastDecodedIndex;
void parseMetaData();
void parseColorAspects(const sp<MetaData>& meta);
// Delete album art and clear metadata.
void clearMetadata();
- status_t getFrameInternal(
- int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
- sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
- virtual sp<IMemory> getImageInternal(
+ sp<IMemory> getFrameInternal(
+ int64_t timeUs, int option, int colorFormat, bool metaOnly);
+
+ sp<IMemory> getImageInternal(
int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 9263565..2b42040 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -141,6 +141,12 @@
AString key, value;
ssize_t equalPos = line.find("=");
+ /* The condition 'if (line.size() < 2 || line.c_str()[1] != '=')' a few lines above
+ * ensures '=' is at position 1. However for robustness we do the following check.
+ */
+ if (equalPos < 0) {
+ return false;
+ }
key = AString(line, 0, equalPos + 1);
value = AString(line, equalPos + 1, line.size() - equalPos - 1);
diff --git a/media/libstagefright/timedtext/TextDescriptions2.cpp b/media/libstagefright/timedtext/TextDescriptions2.cpp
index f48eacc..fd42d3a 100644
--- a/media/libstagefright/timedtext/TextDescriptions2.cpp
+++ b/media/libstagefright/timedtext/TextDescriptions2.cpp
@@ -145,7 +145,7 @@
tmpData += 8;
size_t remaining = size - 8;
- if (size < chunkSize) {
+ if (chunkSize <= 8 || size < chunkSize) {
return OK;
}
switch(chunkType) {
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index ca8cb78..6adf563 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -44,6 +44,7 @@
#include "MtpStringBuffer.h"
namespace android {
+static const int SN_EVENT_LOG_ID = 0x534e4554;
static const MtpOperationCode kSupportedOperationCodes[] = {
MTP_OPERATION_GET_DEVICE_INFO,
@@ -961,9 +962,20 @@
if (!parseDateTime(modified, modifiedTime))
modifiedTime = 0;
+ if ((strcmp(name, ".") == 0) || (strcmp(name, "..") == 0) ||
+ (strcmp(name, "/") == 0) || (strcmp(basename(name), name) != 0)) {
+ char errMsg[80];
+
+ snprintf(errMsg, sizeof(errMsg), "Invalid name: %s", (const char *) name);
+ ALOGE("%s (b/130656917)", errMsg);
+ android_errorWriteWithInfoLog(SN_EVENT_LOG_ID, "130656917", -1, errMsg,
+ strlen(errMsg));
+
+ return MTP_RESPONSE_INVALID_PARAMETER;
+ }
if (path[path.size() - 1] != '/')
path.append("/");
- path.append(name);
+ path.append(basename(name));
// check space first
if (mSendObjectFileSize > storage->getFreeSpace())
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index 1f8799f..8cc9a9a 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -34,8 +34,11 @@
class IMtpDatabase;
class MtpStorage;
+class MtpMockServer;
class MtpServer {
+ // libFuzzer testing
+ friend class MtpMockServer;
private:
IMtpDatabase* mDatabase;
diff --git a/media/mtp/MtpStringBuffer.cpp b/media/mtp/MtpStringBuffer.cpp
index cd379bf..d8d425b 100644
--- a/media/mtp/MtpStringBuffer.cpp
+++ b/media/mtp/MtpStringBuffer.cpp
@@ -26,14 +26,31 @@
namespace {
-std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert;
+const char * utf16_cerror = "__CONVERSION_ERROR__";
+const char16_t * utf8_cerror = u"__CONVERSION_ERROR__";
+
+std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert(utf16_cerror, utf8_cerror);
static std::string utf16ToUtf8(std::u16string input_str) {
- return gConvert.to_bytes(input_str);
+ std::string conversion = gConvert.to_bytes(input_str);
+
+ if (conversion == utf16_cerror) {
+ ALOGE("Unable to convert UTF-16 string to UTF-8");
+ return "";
+ } else {
+ return conversion;
+ }
}
static std::u16string utf8ToUtf16(std::string input_str) {
- return gConvert.from_bytes(input_str);
+ std::u16string conversion = gConvert.from_bytes(input_str);
+
+ if (conversion == utf8_cerror) {
+ ALOGE("Unable to convert UTF-8 string to UTF-16");
+ return u"";
+ } else {
+ return conversion;
+ }
}
} // namespace
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 8564576..84a20d3 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -150,6 +150,7 @@
ret += copyFile(oldFile.c_str(), newFile.c_str());
}
}
+ closedir(dir);
return ret;
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0b745ac..355d945 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1357,8 +1357,8 @@
String8(AudioParameter::keyFrameCount),
String8(AudioParameter::keyInputSource),
String8(AudioParameter::keyMonoOutput),
- String8(AudioParameter::keyStreamConnect),
- String8(AudioParameter::keyStreamDisconnect),
+ String8(AudioParameter::keyDeviceConnect),
+ String8(AudioParameter::keyDeviceDisconnect),
String8(AudioParameter::keyStreamSupportedFormats),
String8(AudioParameter::keyStreamSupportedChannels),
String8(AudioParameter::keyStreamSupportedSamplingRates),
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c5b9953..3eacc8c 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -105,13 +105,8 @@
return mSQ.poll();
}
-void FastMixer::setNBLogWriter(NBLog::Writer *logWriter)
+void FastMixer::setNBLogWriter(NBLog::Writer *logWriter __unused)
{
- // FIXME If mMixer is set or changed prior to this, we don't inform correctly.
- // Should cache logWriter and re-apply it at the assignment to mMixer.
- if (mMixer != NULL) {
- mMixer->setNBLogWriter(logWriter);
- }
}
void FastMixer::onIdle()
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index 04b32c2..8b7a124 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -124,7 +124,7 @@
mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
tlNBLogWriter = next->mNBLogWriter != NULL ?
next->mNBLogWriter : mDummyNBLogWriter.get();
- setNBLogWriter(tlNBLogWriter); // FastMixer informs its AudioMixer, FastCapture ignores
+ setNBLogWriter(tlNBLogWriter); // This is used for debugging only
// We want to always have a valid reference to the previous (non-idle) state.
// However, the state queue only guarantees access to current and previous states.
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index bcd351d..cf15045 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2609,7 +2609,7 @@
LOG_ALWAYS_FATAL_IF(result != OK,
"Error when retrieving output stream buffer size: %d", result);
mFrameCount = mBufferSize / mFrameSize;
- if (mFrameCount & 15) {
+ if ((mType == MIXER || mType == DUPLICATING) && (mFrameCount & 15)) {
ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -5301,11 +5301,11 @@
return false;
}
// Check validity as we don't call AudioMixer::create() here.
- if (!AudioMixer::isValidFormat(format)) {
+ if (!mAudioMixer->isValidFormat(format)) {
ALOGW("%s: invalid format: %#x", __func__, format);
return false;
}
- if (!AudioMixer::isValidChannelMask(channelMask)) {
+ if (!mAudioMixer->isValidChannelMask(channelMask)) {
ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
return false;
}
diff --git a/services/audiopolicy/audio_policy.conf b/services/audiopolicy/audio_policy.conf
deleted file mode 100644
index 9b83fef..0000000
--- a/services/audiopolicy/audio_policy.conf
+++ /dev/null
@@ -1,145 +0,0 @@
-#
-# Template audio policy configuration file
-#
-
-# Global configuration section:
-# - before audio HAL version 3.0:
-# lists input and output devices always present on the device
-# as well as the output device selected by default.
-# Devices are designated by a string that corresponds to the enum in audio.h
-#
-# global_configuration {
-# attached_output_devices AUDIO_DEVICE_OUT_SPEAKER
-# default_output_device AUDIO_DEVICE_OUT_SPEAKER
-# attached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_REMOTE_SUBMIX
-# }
-#
-# - after and including audio HAL 3.0 the global_configuration section is included in each
-# hardware module section.
-# it also includes the audio HAL version of this hw module:
-# global_configuration {
-# ...
-# audio_hal_version <major.minor> # audio HAL version in e.g. 3.0
-# }
-# other attributes (attached devices, default device) have to be included in the
-# global_configuration section of each hardware module
-
-
-# audio hardware module section: contains descriptors for all audio hw modules present on the
-# device. Each hw module node is named after the corresponding hw module library base name.
-# For instance, "primary" corresponds to audio.primary.<device>.so.
-# The "primary" module is mandatory and must include at least one output with
-# AUDIO_OUTPUT_FLAG_PRIMARY flag.
-# Each module descriptor contains one or more output profile descriptors and zero or more
-# input profile descriptors. Each profile lists all the parameters supported by a given output
-# or input stream category.
-# The "channel_masks", "formats", "devices" and "flags" are specified using strings corresponding
-# to enums in audio.h and audio_policy.h. They are concatenated by use of "|" without space or "\n".
-#
-# For audio HAL version posterior to 3.0 the following sections or sub sections can be present in
-# a hw module section:
-# - A "global_configuration" section: see above
-# - Optionally a "devices" section:
-# This section contains descriptors for audio devices with attributes like an address or a
-# gain controller. The syntax for the devices section and device descriptor is as follows:
-# devices {
-# <device name> { # <device name>: any string without space
-# type <device type> # <device type> e.g. AUDIO_DEVICE_OUT_SPEAKER
-# address <address> # optional: device address, char string less than 64 in length
-# }
-# }
-# - one or more "gains" sections can be present in a device descriptor section.
-# If present, they describe the capabilities of gain controllers attached to this input or
-# output device. e.g. :
-# <device name> { # <device name>: any string without space
-# type <device type> # <device type> e.g. AUDIO_DEVICE_OUT_SPEAKER
-# address <address> # optional: device address, char string less than 64 in length
-# gains {
-# <gain name> {
-# mode <gain modes supported> # e.g. AUDIO_GAIN_MODE_CHANNELS
-# channel_mask <controlled channels> # needed if mode AUDIO_GAIN_MODE_CHANNELS
-# min_value_mB <min value in millibel>
-# max_value_mB <max value in millibel>
-# default_value_mB <default value in millibel>
-# step_value_mB <step value in millibel>
-# min_ramp_ms <min duration in ms> # needed if mode AUDIO_GAIN_MODE_RAMP
-# max_ramp_ms <max duration ms> # needed if mode AUDIO_GAIN_MODE_RAMP
-# }
-# }
-# }
-# - when a device descriptor is present, output and input profiles can refer to this device by
-# its name in their "devices" section instead of specifying a device type. e.g. :
-# outputs {
-# primary {
-# sampling_rates 44100
-# channel_masks AUDIO_CHANNEL_OUT_STEREO
-# formats AUDIO_FORMAT_PCM_16_BIT
-# devices <device name>
-# flags AUDIO_OUTPUT_FLAG_PRIMARY
-# }
-# }
-# sample audio_policy.conf file below
-
-audio_hw_modules {
- primary {
- global_configuration {
- attached_output_devices AUDIO_DEVICE_OUT_SPEAKER
- default_output_device AUDIO_DEVICE_OUT_SPEAKER
- attached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC
- audio_hal_version 3.0
- }
- devices {
- speaker {
- type AUDIO_DEVICE_OUT_SPEAKER
- gains {
- gain_1 {
- mode AUDIO_GAIN_MODE_JOINT
- min_value_mB -8400
- max_value_mB 4000
- default_value_mB 0
- step_value_mB 100
- }
- }
- }
- }
- outputs {
- primary {
- sampling_rates 48000
- channel_masks AUDIO_CHANNEL_OUT_STEREO
- formats AUDIO_FORMAT_PCM_16_BIT
- devices speaker
- flags AUDIO_OUTPUT_FLAG_PRIMARY
- }
- }
- inputs {
- primary {
- sampling_rates 8000|16000
- channel_masks AUDIO_CHANNEL_IN_MONO
- formats AUDIO_FORMAT_PCM_16_BIT
- devices AUDIO_DEVICE_IN_BUILTIN_MIC
- }
- }
- }
- r_submix {
- global_configuration {
- attached_input_devices AUDIO_DEVICE_IN_REMOTE_SUBMIX
- audio_hal_version 2.0
- }
- outputs {
- submix {
- sampling_rates 48000
- channel_masks AUDIO_CHANNEL_OUT_STEREO
- formats AUDIO_FORMAT_PCM_16_BIT
- devices AUDIO_DEVICE_OUT_REMOTE_SUBMIX
- }
- }
- inputs {
- submix {
- sampling_rates 48000
- channel_masks AUDIO_CHANNEL_IN_STEREO
- formats AUDIO_FORMAT_PCM_16_BIT
- devices AUDIO_DEVICE_IN_REMOTE_SUBMIX
- }
- }
- }
-}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index f02f3cf..ebfba83 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -3,7 +3,6 @@
srcs: [
"src/AudioCollections.cpp",
- "src/AudioGain.cpp",
"src/AudioInputDescriptor.cpp",
"src/AudioOutputDescriptor.cpp",
"src/AudioPatch.cpp",
@@ -21,6 +20,7 @@
"src/TypeConverter.cpp",
],
shared_libs: [
+ "libaudiofoundation",
"libcutils",
"libhidlbase",
"liblog",
@@ -28,7 +28,10 @@
"libutils",
"libxml2",
],
- export_shared_lib_headers: ["libmedia"],
+ export_shared_lib_headers: [
+ "libaudiofoundation",
+ "libmedia",
+ ],
static_libs: [
"libaudioutils",
],
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 2264d8f..31c5041 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -19,7 +19,6 @@
#include <unordered_map>
#include <unordered_set>
-#include <AudioGain.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
@@ -40,7 +39,8 @@
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
sp<DeviceDescriptor> &defaultOutputDevice)
- : mHwModules(hwModules),
+ : mEngineLibraryNameSuffix(kDefaultEngineLibraryNameSuffix),
+ mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
mDefaultOutputDevice(defaultOutputDevice),
@@ -55,6 +55,14 @@
mSource = file;
}
+ const std::string& getEngineLibraryNameSuffix() const {
+ return mEngineLibraryNameSuffix;
+ }
+
+ void setEngineLibraryNameSuffix(const std::string& suffix) {
+ mEngineLibraryNameSuffix = suffix;
+ }
+
void setHwModules(const HwModuleCollection &hwModules)
{
mHwModules = hwModules;
@@ -108,6 +116,7 @@
void setDefault(void)
{
mSource = "AudioPolicyConfig::setDefault";
+ mEngineLibraryNameSuffix = kDefaultEngineLibraryNameSuffix;
mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
@@ -167,7 +176,10 @@
}
private:
+ static const constexpr char* const kDefaultEngineLibraryNameSuffix = "default";
+
std::string mSource;
+ std::string mEngineLibraryNameSuffix;
HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d906f11..2e9ddf4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -18,8 +18,8 @@
#include "AudioCollections.h"
#include "AudioProfile.h"
-#include "AudioGain.h"
#include "HandleGenerator.h"
+#include <media/AudioGain.h>
#include <utils/String8.h>
#include <utils/Vector.h>
#include <utils/RefBase.h>
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 33e506f..c7c1fee 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -116,6 +116,13 @@
DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
+ DeviceVector getFirstDevicesFromTypes(std::vector<audio_devices_t> orderedTypes) const;
+ sp<DeviceDescriptor> getFirstExistingDevice(std::vector<audio_devices_t> orderedTypes) const;
+
+ // If there are devices with the given type and the devices to add is not empty,
+ // remove all the devices with the given type and add all the devices to add.
+ void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
+
bool contains(const sp<DeviceDescriptor>& item) const { return indexOf(item) >= 0; }
/**
diff --git a/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
deleted file mode 100644
index 0a27947..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-
-/////////////////////////////////////////////////
-// Definitions for audio policy configuration file (audio_policy.conf)
-/////////////////////////////////////////////////
-
-#define AUDIO_HARDWARE_MODULE_ID_MAX_LEN 32
-
-#define AUDIO_POLICY_CONFIG_FILE "/system/etc/audio_policy.conf"
-#define AUDIO_POLICY_VENDOR_CONFIG_FILE "/vendor/etc/audio_policy.conf"
-
-// global configuration
-#define GLOBAL_CONFIG_TAG "global_configuration"
-
-#define ATTACHED_OUTPUT_DEVICES_TAG "attached_output_devices"
-#define DEFAULT_OUTPUT_DEVICE_TAG "default_output_device"
-#define ATTACHED_INPUT_DEVICES_TAG "attached_input_devices"
-#define SPEAKER_DRC_ENABLED_TAG "speaker_drc_enabled"
-#define AUDIO_HAL_VERSION_TAG "audio_hal_version"
-
-// hw modules descriptions
-#define AUDIO_HW_MODULE_TAG "audio_hw_modules"
-
-#define OUTPUTS_TAG "outputs"
-#define INPUTS_TAG "inputs"
-
-#define SAMPLING_RATES_TAG "sampling_rates"
-#define FORMATS_TAG "formats"
-#define CHANNELS_TAG "channel_masks"
-#define DEVICES_TAG "devices"
-#define FLAGS_TAG "flags"
-
-#define APM_DEVICES_TAG "devices"
-#define APM_DEVICE_TYPE "type"
-#define APM_DEVICE_ADDRESS "address"
-
-#define MIXERS_TAG "mixers"
-#define MIXER_TYPE "type"
-#define MIXER_TYPE_MUX "mux"
-#define MIXER_TYPE_MIX "mix"
-
-#define GAINS_TAG "gains"
-#define GAIN_MODE "mode"
-#define GAIN_CHANNELS "channel_mask"
-#define GAIN_MIN_VALUE "min_value_mB"
-#define GAIN_MAX_VALUE "max_value_mB"
-#define GAIN_DEFAULT_VALUE "default_value_mB"
-#define GAIN_STEP_VALUE "step_value_mB"
-#define GAIN_MIN_RAMP_MS "min_ramp_ms"
-#define GAIN_MAX_RAMP_MS "max_ramp_ms"
-
-#define DYNAMIC_VALUE_TAG "dynamic" // special value for "channel_masks", "sampling_rates" and
- // "formats" in outputs descriptors indicating that supported
- // values should be queried after opening the output.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index c90a582..e8cf485 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -21,7 +21,6 @@
#include "AudioPort.h"
#include "AudioRoute.h"
#include "HwModule.h"
-#include "AudioGain.h"
namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
deleted file mode 100644
index 2725870..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioGain"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "AudioGain.h"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <math.h>
-
-namespace android {
-
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
- mIndex = index;
- mUseInChannelMask = useInChannelMask;
- memset(&mGain, 0, sizeof(struct audio_gain));
-}
-
-void AudioGain::getDefaultConfig(struct audio_gain_config *config)
-{
- config->index = mIndex;
- config->mode = mGain.mode;
- config->channel_mask = mGain.channel_mask;
- if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
- config->values[0] = mGain.default_value;
- } else {
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
- }
- for (size_t i = 0; i < numValues; i++) {
- config->values[i] = mGain.default_value;
- }
- }
- if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
- config->ramp_duration_ms = mGain.min_ramp_ms;
- }
-}
-
-status_t AudioGain::checkConfig(const struct audio_gain_config *config)
-{
- if ((config->mode & ~mGain.mode) != 0) {
- return BAD_VALUE;
- }
- if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
- if ((config->values[0] < mGain.min_value) ||
- (config->values[0] > mGain.max_value)) {
- return BAD_VALUE;
- }
- } else {
- if ((config->channel_mask & ~mGain.channel_mask) != 0) {
- return BAD_VALUE;
- }
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(config->channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(config->channel_mask);
- }
- for (size_t i = 0; i < numValues; i++) {
- if ((config->values[i] < mGain.min_value) ||
- (config->values[i] > mGain.max_value)) {
- return BAD_VALUE;
- }
- }
- }
- if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
- if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
- (config->ramp_duration_ms > mGain.max_ramp_ms)) {
- return BAD_VALUE;
- }
- }
- return NO_ERROR;
-}
-
-void AudioGain::dump(String8 *dst, int spaces, int index) const
-{
- dst->appendFormat("%*sGain %d:\n", spaces, "", index+1);
- dst->appendFormat("%*s- mode: %08x\n", spaces, "", mGain.mode);
- dst->appendFormat("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
- dst->appendFormat("%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
- dst->appendFormat("%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
- dst->appendFormat("%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
- dst->appendFormat("%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
- dst->appendFormat("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
- dst->appendFormat("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index a096e8f..a9b87e3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -22,7 +22,6 @@
#include <policy.h>
#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
-#include "AudioGain.h"
#include "AudioPolicyMix.h"
#include "HwModule.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 8a60cf2..49524b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -21,10 +21,10 @@
#include "AudioOutputDescriptor.h"
#include "AudioPolicyMix.h"
#include "IOProfile.h"
-#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
#include "TypeConverter.h"
+#include <media/AudioGain.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 3a4db90..bf0cc94 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -18,7 +18,6 @@
//#define LOG_NDEBUG 0
#include "AudioPatch.h"
-#include "AudioGain.h"
#include "TypeConverter.h"
#include <log/log.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c42923a..0221348 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -22,7 +22,6 @@
#include "HwModule.h"
#include "AudioPort.h"
#include "IOProfile.h"
-#include "AudioGain.h"
#include <AudioOutputDescriptor.h>
namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index c11490a..68811e9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -19,7 +19,6 @@
#include "TypeConverter.h"
#include "AudioPort.h"
#include "HwModule.h"
-#include "AudioGain.h"
#include <policy.h>
#ifndef ARRAY_SIZE
@@ -366,7 +365,9 @@
if (mGains.size() != 0) {
dst->appendFormat("%*s- gains:\n", spaces, "");
for (size_t i = 0; i < mGains.size(); i++) {
- mGains[i]->dump(dst, spaces + 2, i);
+ std::string gainStr;
+ mGains[i]->dump(&gainStr, spaces + 2, i);
+ dst->append(gainStr.c_str());
}
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 69d6b0c..a5fe07b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -24,7 +24,6 @@
#include <media/AudioResamplerPublic.h>
#include <utils/Errors.h>
-#include "AudioGain.h"
#include "AudioPort.h"
#include "AudioProfile.h"
#include "HwModule.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 79f0919..92cbe4e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -19,7 +19,6 @@
#include "AudioRoute.h"
#include "HwModule.h"
-#include "AudioGain.h"
namespace android
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index ad07ab1..1dc7020 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -21,7 +21,6 @@
#include <utils/Log.h>
#include <utils/String8.h>
#include <TypeConverter.h>
-#include "AudioGain.h"
#include "AudioOutputDescriptor.h"
#include "AudioPatch.h"
#include "ClientDescriptor.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ecd5b34..57564e5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -22,7 +22,6 @@
#include <set>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
-#include "AudioGain.h"
#include "HwModule.h"
namespace android {
@@ -256,7 +255,6 @@
audio_devices_t curType = itemAt(i)->type() & ~AUDIO_DEVICE_BIT_IN;
if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
devices.add(itemAt(i));
- type &= ~curType;
ALOGV("DeviceVector::%s() for type %08x found %p",
__func__, itemAt(i)->type(), itemAt(i).get());
}
@@ -274,6 +272,38 @@
return nullptr;
}
+DeviceVector DeviceVector::getFirstDevicesFromTypes(
+ std::vector<audio_devices_t> orderedTypes) const
+{
+ DeviceVector devices;
+ for (auto deviceType : orderedTypes) {
+ if (!(devices = getDevicesFromTypeMask(deviceType)).isEmpty()) {
+ break;
+ }
+ }
+ return devices;
+}
+
+sp<DeviceDescriptor> DeviceVector::getFirstExistingDevice(
+ std::vector<audio_devices_t> orderedTypes) const {
+ sp<DeviceDescriptor> device;
+ for (auto deviceType : orderedTypes) {
+ if ((device = getDevice(deviceType, String8(""), AUDIO_FORMAT_DEFAULT)) != nullptr) {
+ break;
+ }
+ }
+ return device;
+}
+
+void DeviceVector::replaceDevicesByType(
+ audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
+ DeviceVector devicesToRemove = getDevicesFromTypeMask(typeToRemove);
+ if (!devicesToRemove.isEmpty() && !devicesToAdd.isEmpty()) {
+ remove(devicesToRemove);
+ add(devicesToAdd);
+ }
+}
+
void DeviceVector::dump(String8 *dst, const String8 &tag, int spaces, bool verbose) const
{
if (isEmpty()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 1f9b725..99e282e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -19,7 +19,6 @@
#include "HwModule.h"
#include "IOProfile.h"
-#include "AudioGain.h"
#include <policy.h>
#include <system/audio.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index fe2eaee..5662dcf 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -20,7 +20,6 @@
#include <system/audio-base.h>
#include "IOProfile.h"
#include "HwModule.h"
-#include "AudioGain.h"
#include "TypeConverter.h"
namespace android {
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index ec64a7c..27bd3ff 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -44,7 +44,7 @@
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
- ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index cedc78f..fca9a60 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -17,18 +17,18 @@
#pragma once
#include <EngineConfig.h>
-#include <AudioPolicyManagerInterface.h>
+#include <EngineInterface.h>
#include <ProductStrategy.h>
#include <VolumeGroup.h>
namespace android {
namespace audio_policy {
-class EngineBase : public AudioPolicyManagerInterface
+class EngineBase : public EngineInterface
{
public:
///
- /// from AudioPolicyManagerInterface
+ /// from EngineInterface
///
android::status_t initCheck() override;
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 1a2a198..c538f52 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -19,7 +19,6 @@
#include "VolumeGroup.h"
#include <system/audio.h>
-#include <AudioPolicyManagerInterface.h>
#include <utils/RefBase.h>
#include <HandleGenerator.h>
#include <string>
@@ -27,6 +26,7 @@
#include <map>
#include <utils/Errors.h>
#include <utils/String8.h>
+#include <media/AudioAttributes.h>
namespace android {
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
index 54314e3..d3d0904 100644
--- a/services/audiopolicy/engine/common/include/VolumeCurve.h
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -18,7 +18,6 @@
#include "IVolumeCurves.h"
#include <policy.h>
-#include <AudioPolicyManagerInterface.h>
#include <utils/RefBase.h>
#include <HandleGenerator.h>
#include <utils/String8.h>
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
index c34b406..5378f64 100644
--- a/services/audiopolicy/engine/common/include/VolumeGroup.h
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -16,7 +16,6 @@
#pragma once
-#include <AudioPolicyManagerInterface.h>
#include <VolumeCurve.h>
#include <system/audio.h>
#include <utils/RefBase.h>
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index f74f190..ac3e462 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -19,6 +19,7 @@
#include "ProductStrategy.h"
+#include <media/AudioProductStrategy.h>
#include <media/TypeConverter.h>
#include <utils/String8.h>
#include <cstdint>
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 1ad7739..d47fbd2 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -32,9 +32,9 @@
#include <istream>
#include <cstdint>
+#include <stdarg.h>
#include <string>
-
namespace android {
using utilities::convertTo;
@@ -603,7 +603,39 @@
return NO_ERROR;
}
+namespace {
+
+class XmlErrorHandler {
+public:
+ XmlErrorHandler() {
+ xmlSetGenericErrorFunc(this, &xmlErrorHandler);
+ }
+ XmlErrorHandler(const XmlErrorHandler&) = delete;
+ XmlErrorHandler(XmlErrorHandler&&) = delete;
+ XmlErrorHandler& operator=(const XmlErrorHandler&) = delete;
+ XmlErrorHandler& operator=(XmlErrorHandler&&) = delete;
+ ~XmlErrorHandler() {
+ xmlSetGenericErrorFunc(NULL, NULL);
+ if (!mErrorMessage.empty()) {
+ ALOG(LOG_ERROR, "libxml2", "%s", mErrorMessage.c_str());
+ }
+ }
+ static void xmlErrorHandler(void* ctx, const char* msg, ...) {
+ char buffer[256];
+ va_list args;
+ va_start(args, msg);
+ vsnprintf(buffer, sizeof(buffer), msg, args);
+ va_end(args);
+ static_cast<XmlErrorHandler*>(ctx)->mErrorMessage += buffer;
+ }
+private:
+ std::string mErrorMessage;
+};
+
+} // namespace
+
ParsingResult parse(const char* path) {
+ XmlErrorHandler errorHandler;
xmlDocPtr doc;
doc = xmlParseFile(path);
if (doc == NULL) {
@@ -641,6 +673,7 @@
}
android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
+ XmlErrorHandler errorHandler;
xmlDocPtr doc;
doc = xmlParseFile(path);
if (doc == NULL) {
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index ebd82a7..ae3fc79 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
#pragma once
-#include <AudioGain.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <IOProfile.h>
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
similarity index 97%
rename from services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
rename to services/audiopolicy/engine/interface/EngineInterface.h
index b7fd031..0c58a7c 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -38,7 +38,7 @@
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
*/
-class AudioPolicyManagerInterface
+class EngineInterface
{
public:
/**
@@ -295,7 +295,13 @@
virtual void dump(String8 *dst) const = 0;
protected:
- virtual ~AudioPolicyManagerInterface() {}
+ virtual ~EngineInterface() {}
};
+__attribute__((visibility("default")))
+extern "C" EngineInterface* createEngineInstance();
+
+__attribute__((visibility("default")))
+extern "C" void destroyEngineInstance(EngineInterface *engine);
+
} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index c27dc88..8f522f0 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -33,6 +33,7 @@
],
shared_libs: [
+ "libaudiofoundation",
"liblog",
"libcutils",
"libutils",
diff --git a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
index efc69da..f52de21 100644
--- a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
+++ b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
@@ -16,7 +16,7 @@
#pragma once
-class AudioPolicyManagerInterface;
+class EngineInterface;
class AudioPolicyPluginInterface;
namespace android {
@@ -69,7 +69,7 @@
* Compile time error will claim if invalid interface is requested.
*/
template <>
-AudioPolicyManagerInterface *EngineInstance::queryInterface() const;
+EngineInterface *EngineInstance::queryInterface() const;
template <>
AudioPolicyPluginInterface *EngineInstance::queryInterface() const;
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index cb45fcf..c37efca 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -361,7 +361,7 @@
}
template <>
-AudioPolicyManagerInterface *Engine::queryInterface()
+EngineInterface *Engine::queryInterface()
{
return this;
}
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 4662e7e..3b371d8 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -17,7 +17,7 @@
#pragma once
#include "EngineBase.h"
-#include <AudioPolicyManagerInterface.h>
+#include <EngineInterface.h>
#include <AudioPolicyPluginInterface.h>
#include "Collection.h"
diff --git a/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
index 2442590..b127796 100644
--- a/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
+++ b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include <AudioPolicyManagerInterface.h>
+#include <EngineInterface.h>
#include <AudioPolicyPluginInterface.h>
#include "AudioPolicyEngineInstance.h"
#include "Engine.h"
@@ -45,9 +45,9 @@
}
template <>
-AudioPolicyManagerInterface *EngineInstance::queryInterface() const
+EngineInterface *EngineInstance::queryInterface() const
{
- return getEngine()->queryInterface<AudioPolicyManagerInterface>();
+ return getEngine()->queryInterface<EngineInterface>();
}
template <>
@@ -57,5 +57,16 @@
}
} // namespace audio_policy
+
+extern "C" EngineInterface* createEngineInstance()
+{
+ return audio_policy::EngineInstance::getInstance()->queryInterface<EngineInterface>();
+}
+
+extern "C" void destroyEngineInstance(EngineInterface*)
+{
+ // The engine is a singleton.
+}
+
} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 5bfad29..72c8de1 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,7 +16,6 @@
#pragma once
-#include <AudioGain.h>
#include <AudioPort.h>
#include <HwModule.h>
#include <DeviceDescriptor.h>
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7b42c6a..aaf4158 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -1,16 +1,15 @@
cc_library_shared {
name: "libaudiopolicyenginedefault",
- export_include_dirs: ["include"],
srcs: [
"src/Engine.cpp",
"src/EngineInstance.cpp",
],
cflags: [
+ "-fvisibility=hidden",
"-Wall",
"-Werror",
"-Wextra",
],
- local_include_dirs: ["include"],
header_libs: [
"libbase_headers",
"libaudiopolicycommon",
@@ -22,6 +21,7 @@
"libaudiopolicyengine_config",
],
shared_libs: [
+ "libaudiofoundation",
"liblog",
"libcutils",
"libutils",
diff --git a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
deleted file mode 100644
index 1e329f0..0000000
--- a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-class AudioPolicyManagerInterface;
-
-namespace android
-{
-namespace audio_policy
-{
-
-class Engine;
-
-class EngineInstance
-{
-protected:
- EngineInstance();
-
-public:
- virtual ~EngineInstance();
-
- /**
- * Get Audio Policy Engine instance.
- *
- * @return pointer to Route Manager Instance object.
- */
- static EngineInstance *getInstance();
-
- /**
- * Interface query.
- * The first client of an interface of the policy engine will start the singleton.
- *
- * @tparam RequestedInterface: interface that the client is wishing to retrieve.
- *
- * @return interface handle.
- */
- template <class RequestedInterface>
- RequestedInterface *queryInterface() const;
-
-protected:
- /**
- * Get Audio Policy Engine instance.
- *
- * @return Audio Policy Engine singleton.
- */
- Engine *getEngine() const;
-
-private:
- /* Copy facilities are put private to disable copy. */
- EngineInstance(const EngineInstance &object);
- EngineInstance &operator=(const EngineInstance &object);
-};
-
-/**
- * Limit template instantation to supported type interfaces.
- * Compile time error will claim if invalid interface is requested.
- */
-template <>
-AudioPolicyManagerInterface *EngineInstance::queryInterface() const;
-
-} // namespace audio_policy
-} // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 04170ac..cfb2206 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -136,27 +136,23 @@
return EngineBase::setForceUse(usage, config);
}
-audio_devices_t Engine::getDeviceForStrategyInt(legacy_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs,
- uint32_t outputDeviceTypesToIgnore) const
+DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs) const
{
- uint32_t device = AUDIO_DEVICE_NONE;
- uint32_t availableOutputDevicesType =
- availableOutputDevices.types() & ~outputDeviceTypesToIgnore;
+ DeviceVector devices;
switch (strategy) {
case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+ devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
if (isInCall() || outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
- device = getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ devices = getDevicesForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
} else {
bool media_active_locally =
outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_MUSIC),
@@ -165,17 +161,18 @@
toVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
// routing is same as media without the "remote" device
- device = getDeviceForStrategyInt(STRATEGY_MEDIA,
+ availableOutputDevices.remove(availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX));
+ devices = getDevicesForStrategyInt(STRATEGY_MEDIA,
availableOutputDevices,
- availableInputDevices, outputs,
- AUDIO_DEVICE_OUT_REMOTE_SUBMIX | outputDeviceTypesToIgnore);
+ availableInputDevices, outputs);
// if no media is playing on the device, check for mandatory use of "safe" speaker
// when media would have played on speaker, and the safe speaker path is available
- if (!media_active_locally
- && (device & AUDIO_DEVICE_OUT_SPEAKER)
- && (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
- device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
- device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ if (!media_active_locally) {
+ devices.replaceDevicesByType(
+ AUDIO_DEVICE_OUT_SPEAKER,
+ availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
}
break;
@@ -183,9 +180,8 @@
case STRATEGY_DTMF:
if (!isInCall()) {
// when off call, DTMF strategy follows the same rules as MEDIA strategy
- device = getDeviceForStrategyInt(
- STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ devices = getDevicesForStrategyInt(
+ STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
break;
}
// when in call, DTMF and PHONE strategies follow the same rules
@@ -197,24 +193,26 @@
// - cannot route from voice call RX OR
// - audio HAL version is < 3.0 and TX device is on the primary HW module
if (getPhoneState() == AUDIO_MODE_IN_CALL) {
- audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+ audio_devices_t txDevice = getDeviceForInputSource(
+ AUDIO_SOURCE_VOICE_COMMUNICATION)->type();
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
- audio_devices_t availPrimaryInputDevices =
- availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());
+ DeviceVector availPrimaryInputDevices =
+ availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
// TODO: getPrimaryOutput return only devices from first module in
// audio_policy_configuration.xml, hearing aid is not there, but it's
// a primary device
// FIXME: this is not the right way of solving this problem
- audio_devices_t availPrimaryOutputDevices =
- (primaryOutput->supportedDevices().types() | AUDIO_DEVICE_OUT_HEARING_AID) &
- availableOutputDevices.types();
+ DeviceVector availPrimaryOutputDevices = primaryOutput->supportedDevices();
+ availPrimaryOutputDevices.add(
+ availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID));
- if (((availableInputDevices.types() &
- AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
- (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
- availableOutputDevicesType = availPrimaryOutputDevices;
+ if ((availableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
+ String8(""), AUDIO_FORMAT_DEFAULT) == nullptr) ||
+ ((availPrimaryInputDevices.getDevice(
+ txDevice, String8(""), AUDIO_FORMAT_DEFAULT) != nullptr) &&
+ (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
+ availableOutputDevices = availPrimaryOutputDevices;
}
}
// for phone strategy, we first consider the forced use and then the available devices by
@@ -222,49 +220,40 @@
switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
case AUDIO_POLICY_FORCE_BT_SCO:
if (!isInCall() || strategy != STRATEGY_DTMF) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
- if (device) break;
+ devices = availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT);
+ if (!devices.isEmpty()) break;
}
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
- if (device) break;
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO});
+ if (!devices.isEmpty()) break;
// if SCO device is requested but no SCO device is available, fall back to default case
FALLTHROUGH_INTENDED;
default: // FORCE_NONE
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
- if (device) break;
+ devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
+ if (!devices.isEmpty()) break;
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
- if (device) break;
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES});
+ if (!devices.isEmpty()) break;
}
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
- if (device) break;
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+ AUDIO_DEVICE_OUT_LINE, AUDIO_DEVICE_OUT_USB_HEADSET,
+ AUDIO_DEVICE_OUT_USB_DEVICE});
+ if (!devices.isEmpty()) break;
if (!isInCall()) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
- if (device) break;
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+ AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
+ if (!devices.isEmpty()) break;
}
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
+ devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_EARPIECE);
break;
case AUDIO_POLICY_FORCE_SPEAKER:
@@ -273,22 +262,18 @@
if (!isInCall() &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
- if (device) break;
+ devices = availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER);
+ if (!devices.isEmpty()) break;
}
if (!isInCall()) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
- if (device) break;
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
- if (device) break;
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_DEVICE,
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_AUX_DIGITAL,
+ AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
+ if (!devices.isEmpty()) break;
}
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+ devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
break;
}
break;
@@ -298,9 +283,8 @@
// If incall, just select the STRATEGY_PHONE device
if (isInCall() ||
outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
- device = getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ devices = getDevicesForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
break;
}
FALLTHROUGH_INTENDED;
@@ -313,41 +297,37 @@
if ((strategy == STRATEGY_SONIFICATION) ||
(getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+ devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
}
// if SCO headset is connected and we are told to use it, play ringtone over
// speaker and BT SCO
- if ((availableOutputDevicesType & AUDIO_DEVICE_OUT_ALL_SCO) != 0) {
- uint32_t device2 = AUDIO_DEVICE_NONE;
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
- }
+ if (!availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_ALL_SCO).isEmpty()) {
+ DeviceVector devices2;
+ devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO});
// Use ONLY Bluetooth SCO output when ringing in vibration mode
if (!((getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
&& (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
if (getForceUse(AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING)
== AUDIO_POLICY_FORCE_BT_SCO) {
- if (device2 != AUDIO_DEVICE_NONE) {
- device = device2;
+ if (!devices2.isEmpty()) {
+ devices = devices2;
break;
}
}
}
// Use both Bluetooth SCO and phone default output when ringing in normal mode
if (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) {
- if ((strategy == STRATEGY_SONIFICATION) &&
- (device & AUDIO_DEVICE_OUT_SPEAKER) &&
- (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
- device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
- device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ if (strategy == STRATEGY_SONIFICATION) {
+ devices.replaceDevicesByType(
+ AUDIO_DEVICE_OUT_SPEAKER,
+ availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
- if (device2 != AUDIO_DEVICE_NONE) {
- device |= device2;
+ if (!devices2.isEmpty()) {
+ devices.add(devices2);
break;
}
}
@@ -361,25 +341,20 @@
// compressed format as they would likely not be mixed and dropped.
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
- audio_devices_t devices = desc->devices().types() &
- (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
- if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
- devices != AUDIO_DEVICE_NONE) {
- availableOutputDevicesType = availableOutputDevices.types() & ~devices;
+ if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat)) {
+ availableOutputDevices.remove(desc->devices().getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF
+ | AUDIO_DEVICE_OUT_HDMI_ARC));
}
}
- availableOutputDevices =
- availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
if (outputs.isActive(toVolumeSource(AUDIO_STREAM_RING)) ||
outputs.isActive(toVolumeSource(AUDIO_STREAM_ALARM))) {
- return getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ return getDevicesForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
}
if (isInCall()) {
- return getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ return getDevicesForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
}
}
// For other cases, STRATEGY_ACCESSIBILITY behaves like STRATEGY_MEDIA
@@ -388,128 +363,116 @@
// FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now
case STRATEGY_REROUTING:
case STRATEGY_MEDIA: {
- uint32_t device2 = AUDIO_DEVICE_NONE;
+ DeviceVector devices2;
if (strategy != STRATEGY_SONIFICATION) {
// no sonification on remote submix (e.g. WFD)
- if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
- device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+ sp<DeviceDescriptor> remoteSubmix;
+ if ((remoteSubmix = availableOutputDevices.getDevice(
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0"),
+ AUDIO_FORMAT_DEFAULT)) != nullptr) {
+ devices2.add(remoteSubmix);
}
}
if (isInCall() && (strategy == STRATEGY_MEDIA)) {
- device = getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
- outputDeviceTypesToIgnore);
+ devices = getDevicesForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
break;
}
// FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
- if ((device2 == AUDIO_DEVICE_NONE) &&
+ if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
+ devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
}
- if ((device2 == AUDIO_DEVICE_NONE) &&
+ if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
- }
+ devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER});
}
- if ((device2 == AUDIO_DEVICE_NONE) &&
+ if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+ devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
}
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+ if (devices2.isEmpty()) {
+ devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, AUDIO_DEVICE_OUT_LINE,
+ AUDIO_DEVICE_OUT_WIRED_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+ AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_DEVICE,
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET});
}
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
- }
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
- }
- if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+ if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION)) {
// no sonification on aux digital (e.g. HDMI)
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+ devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_AUX_DIGITAL);
}
- if ((device2 == AUDIO_DEVICE_NONE) &&
+ if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+ devices2 = availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET);
}
- if (device2 == AUDIO_DEVICE_NONE) {
- device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+ if (devices2.isEmpty()) {
+ devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
}
- int device3 = AUDIO_DEVICE_NONE;
+ DeviceVector devices3;
if (strategy == STRATEGY_MEDIA) {
// ARC, SPDIF and AUX_LINE can co-exist with others.
- device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
- device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
- device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
+ devices3 = availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_HDMI_ARC | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_AUX_LINE);
}
- device2 |= device3;
+ devices2.add(devices3);
// device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
// STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
- device |= device2;
+ devices.add(devices2);
// If hdmi system audio mode is on, remove speaker out of output list.
if ((strategy == STRATEGY_MEDIA) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
- device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ devices.remove(devices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER));
}
// for STRATEGY_SONIFICATION:
// if SPEAKER was selected, and SPEAKER_SAFE is available, use SPEAKER_SAFE instead
- if ((strategy == STRATEGY_SONIFICATION) &&
- (device & AUDIO_DEVICE_OUT_SPEAKER) &&
- (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
- device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
- device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ if (strategy == STRATEGY_SONIFICATION) {
+ devices.replaceDevicesByType(
+ AUDIO_DEVICE_OUT_SPEAKER,
+ availableOutputDevices.getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
} break;
default:
- ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+ ALOGW("getDevicesForStrategy() unknown strategy: %d", strategy);
break;
}
- if (device == AUDIO_DEVICE_NONE) {
- ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
- device = getApmObserver()->getDefaultOutputDevice()->type();
- ALOGE_IF(device == AUDIO_DEVICE_NONE,
- "getDeviceForStrategy() no default device defined");
+ if (devices.isEmpty()) {
+ ALOGV("getDevicesForStrategy() no device found for strategy %d", strategy);
+ sp<DeviceDescriptor> defaultOutputDevice = getApmObserver()->getDefaultOutputDevice();
+ if (defaultOutputDevice != nullptr) {
+ devices.add(defaultOutputDevice);
+ }
+ ALOGE_IF(devices.isEmpty(),
+ "getDevicesForStrategy() no default device defined");
}
- ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
- return device;
+
+ ALOGVV("getDevices"
+ "ForStrategy() strategy %d, device %x", strategy, devices.types());
+ return devices;
}
-audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
+sp<DeviceDescriptor> Engine::getDeviceForInputSource(audio_source_t inputSource) const
{
const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
const DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
- audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+ DeviceVector availableDevices = availableInputDevices;
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
- audio_devices_t availablePrimaryDeviceTypes = availableInputDevices.getDeviceTypesFromHwModule(
- primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
- uint32_t device = AUDIO_DEVICE_NONE;
+ DeviceVector availablePrimaryDevices = availableInputDevices.getDevicesFromHwModule(
+ primaryOutput->getModuleHandle());
+ sp<DeviceDescriptor> device;
// when a call is active, force device selection to match source VOICE_COMMUNICATION
// for most other input sources to avoid rerouting call TX audio
@@ -532,57 +495,47 @@
switch (inputSource) {
case AUDIO_SOURCE_DEFAULT:
case AUDIO_SOURCE_MIC:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
- device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
- } else if ((getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) &&
- (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
- device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
- device = AUDIO_DEVICE_IN_WIRED_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
- device = AUDIO_DEVICE_IN_USB_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
- device = AUDIO_DEVICE_IN_USB_DEVICE;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- }
- break;
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP, String8(""), AUDIO_FORMAT_DEFAULT);
+ if (device != nullptr) break;
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) {
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+ if (device != nullptr) break;
+ }
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
+ break;
case AUDIO_SOURCE_VOICE_COMMUNICATION:
// Allow only use of devices on primary input if in call and HAL does not support routing
// to voice call path.
if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
- (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
- availableDeviceTypes = availablePrimaryDeviceTypes;
+ (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
+ String8(""), AUDIO_FORMAT_DEFAULT)) == nullptr) {
+ availableDevices = availablePrimaryDevices;
}
switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
case AUDIO_POLICY_FORCE_BT_SCO:
// if SCO device is requested but no SCO device is available, fall back to default case
- if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
- device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+ if (device != nullptr) {
break;
}
FALLTHROUGH_INTENDED;
default: // FORCE_NONE
- if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
- device = AUDIO_DEVICE_IN_WIRED_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
- device = AUDIO_DEVICE_IN_USB_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
- device = AUDIO_DEVICE_IN_USB_DEVICE;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- }
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_POLICY_FORCE_SPEAKER:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
- device = AUDIO_DEVICE_IN_BACK_MIC;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- }
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_BACK_MIC, AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
}
break;
@@ -591,77 +544,60 @@
case AUDIO_SOURCE_UNPROCESSED:
case AUDIO_SOURCE_HOTWORD:
if (inputSource == AUDIO_SOURCE_HOTWORD) {
- availableDeviceTypes = availablePrimaryDeviceTypes;
+ availableDevices = availablePrimaryDevices;
}
- if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO &&
- availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
- device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
- device = AUDIO_DEVICE_IN_WIRED_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
- device = AUDIO_DEVICE_IN_USB_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
- device = AUDIO_DEVICE_IN_USB_DEVICE;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) {
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+ if (device != nullptr) break;
}
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_SOURCE_CAMCORDER:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
- device = AUDIO_DEVICE_IN_BACK_MIC;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
- // This is specifically for a device without built-in mic
- device = AUDIO_DEVICE_IN_USB_DEVICE;
- }
+ // For a device without built-in mic, adding usb device
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_BACK_MIC, AUDIO_DEVICE_IN_BUILTIN_MIC,
+ AUDIO_DEVICE_IN_USB_DEVICE});
break;
case AUDIO_SOURCE_VOICE_DOWNLINK:
case AUDIO_SOURCE_VOICE_CALL:
case AUDIO_SOURCE_VOICE_UPLINK:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
- device = AUDIO_DEVICE_IN_VOICE_CALL;
- }
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_VOICE_CALL, String8(""), AUDIO_FORMAT_DEFAULT);
break;
case AUDIO_SOURCE_VOICE_PERFORMANCE:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
- device = AUDIO_DEVICE_IN_WIRED_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
- device = AUDIO_DEVICE_IN_USB_HEADSET;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
- device = AUDIO_DEVICE_IN_USB_DEVICE;
- } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
- device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- }
+ device = availableDevices.getFirstExistingDevice({
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_SOURCE_REMOTE_SUBMIX:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
- device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
- }
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8(""), AUDIO_FORMAT_DEFAULT);
break;
case AUDIO_SOURCE_FM_TUNER:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
- device = AUDIO_DEVICE_IN_FM_TUNER;
- }
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_FM_TUNER, String8(""), AUDIO_FORMAT_DEFAULT);
break;
case AUDIO_SOURCE_ECHO_REFERENCE:
- if (availableDeviceTypes & AUDIO_DEVICE_IN_ECHO_REFERENCE) {
- device = AUDIO_DEVICE_IN_ECHO_REFERENCE;
- }
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_ECHO_REFERENCE, String8(""), AUDIO_FORMAT_DEFAULT);
break;
default:
ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
break;
}
- if (device == AUDIO_DEVICE_NONE) {
+ if (device == nullptr) {
ALOGV("getDeviceForInputSource() no device found for source %d", inputSource);
- if (availableDeviceTypes & AUDIO_DEVICE_IN_STUB) {
- device = AUDIO_DEVICE_IN_STUB;
- }
- ALOGE_IF(device == AUDIO_DEVICE_NONE,
+ device = availableDevices.getDevice(
+ AUDIO_DEVICE_IN_STUB, String8(""), AUDIO_FORMAT_DEFAULT);
+ ALOGE_IF(device == nullptr,
"getDeviceForInputSource() no default device defined");
}
- ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+ ALOGV_IF(device != nullptr,
+ "getDeviceForInputSource()input source %d, device %08x",
+ inputSource, device->type());
return device;
}
@@ -684,11 +620,9 @@
auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
- audio_devices_t devices = getDeviceForStrategyInt(legacyStrategy,
- availableOutputDevices,
- availableInputDevices, outputs,
- (uint32_t)AUDIO_DEVICE_NONE);
- return availableOutputDevices.getDevicesFromTypeMask(devices);
+ return getDevicesForStrategyInt(legacyStrategy,
+ availableOutputDevices,
+ availableInputDevices, outputs);
}
DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
@@ -747,27 +681,25 @@
if (device != nullptr) {
return device;
}
- audio_devices_t deviceType = getDeviceForInputSource(attr.source);
- if (audio_is_remote_submix_device(deviceType)) {
- address = "0";
- std::size_t pos;
- std::string tags { attr.tags };
- if ((pos = tags.find("addr=")) != std::string::npos) {
- address = tags.substr(pos + std::strlen("addr="));
- }
+ device = getDeviceForInputSource(attr.source);
+ if (device == nullptr || !audio_is_remote_submix_device(device->type())) {
+ // Return immediately if the device is null or it is not a remote submix device.
+ return device;
}
- return availableInputDevices.getDevice(deviceType,
+
+ // For remote submix device, try to find the device by address.
+ address = "0";
+ std::size_t pos;
+ std::string tags { attr.tags };
+ if ((pos = tags.find("addr=")) != std::string::npos) {
+ address = tags.substr(pos + std::strlen("addr="));
+ }
+ return availableInputDevices.getDevice(device->type(),
String8(address.c_str()),
AUDIO_FORMAT_DEFAULT);
}
-template <>
-AudioPolicyManagerInterface *Engine::queryInterface()
-{
- return this;
-}
-
} // namespace audio_policy
} // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index d5dfacc..4360c6f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -17,8 +17,7 @@
#pragma once
#include "EngineBase.h"
-#include "AudioPolicyManagerInterface.h"
-#include <AudioGain.h>
+#include "EngineInterface.h"
#include <policy.h>
namespace android
@@ -48,12 +47,9 @@
Engine();
virtual ~Engine() = default;
- template <class RequestedInterface>
- RequestedInterface *queryInterface();
-
private:
///
- /// from EngineBase, so from AudioPolicyManagerInterface
+ /// from EngineBase, so from EngineInterface
///
status_t setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config) override;
@@ -77,15 +73,14 @@
status_t setDefaultDevice(audio_devices_t device);
- audio_devices_t getDeviceForStrategyInt(legacy_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs,
- uint32_t outputDeviceTypesToIgnore) const;
+ DeviceVector getDevicesForStrategyInt(legacy_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs) const;
DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
- audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
+ sp<DeviceDescriptor> getDeviceForInputSource(audio_source_t inputSource) const;
DeviceStrategyMap mDevicesForStrategies;
diff --git a/services/audiopolicy/enginedefault/src/EngineInstance.cpp b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
index 17e9832..eeb3758 100644
--- a/services/audiopolicy/enginedefault/src/EngineInstance.cpp
+++ b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
@@ -14,41 +14,21 @@
* limitations under the License.
*/
-#include <AudioPolicyManagerInterface.h>
-#include "AudioPolicyEngineInstance.h"
+#include <EngineInterface.h>
#include "Engine.h"
-namespace android
-{
-namespace audio_policy
-{
+namespace android {
+namespace audio_policy {
-EngineInstance::EngineInstance()
+extern "C" EngineInterface* createEngineInstance()
{
+ return new (std::nothrow) Engine();
}
-EngineInstance *EngineInstance::getInstance()
+extern "C" void destroyEngineInstance(EngineInterface *engine)
{
- static EngineInstance instance;
- return &instance;
-}
-
-EngineInstance::~EngineInstance()
-{
-}
-
-Engine *EngineInstance::getEngine() const
-{
- static Engine engine;
- return &engine;
-}
-
-template <>
-AudioPolicyManagerInterface *EngineInstance::queryInterface() const
-{
- return getEngine()->queryInterface<AudioPolicyManagerInterface>();
+ delete static_cast<Engine*>(engine);
}
} // namespace audio_policy
} // namespace android
-
diff --git a/services/audiopolicy/manager/AudioPolicyFactory.cpp b/services/audiopolicy/manager/AudioPolicyFactory.cpp
index 7aff6a9..476a1ec 100644
--- a/services/audiopolicy/manager/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/manager/AudioPolicyFactory.cpp
@@ -21,7 +21,13 @@
extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyClientInterface *clientInterface)
{
- return new AudioPolicyManager(clientInterface);
+ AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);
+ status_t status = apm->initialize();
+ if (status != NO_ERROR) {
+ delete apm;
+ apm = nullptr;
+ }
+ return apm;
}
extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
new file mode 100644
index 0000000..1fa0d19
--- /dev/null
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -0,0 +1,44 @@
+cc_library_shared {
+ name: "libaudiopolicymanagerdefault",
+
+ srcs: [
+ "AudioPolicyManager.cpp",
+ "EngineLibrary.cpp",
+ ],
+
+ export_include_dirs: ["."],
+
+ shared_libs: [
+ "libaudiofoundation",
+ "libcutils",
+ "libdl",
+ "libutils",
+ "liblog",
+ "libaudiopolicy",
+ "libsoundtrigger",
+ "libmedia_helper",
+ "libmediametrics",
+ "libbinder",
+ "libhidlbase",
+ "libxml2",
+ // The default audio policy engine is always present in the system image.
+ // libaudiopolicyengineconfigurable can be built in addition by specifying
+ // a dependency on it in the device makefile. There will be no build time
+ // conflict with libaudiopolicyenginedefault.
+ "libaudiopolicyenginedefault",
+ ],
+
+ header_libs: [
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ ],
+
+ static_libs: ["libaudiopolicycomponents"],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+}
diff --git a/services/audiopolicy/managerdefault/Android.mk b/services/audiopolicy/managerdefault/Android.mk
deleted file mode 100644
index 684fc9f..0000000
--- a/services/audiopolicy/managerdefault/Android.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= AudioPolicyManager.cpp
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- liblog \
- libaudiopolicy \
- libsoundtrigger
-
-ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
-
-ifneq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-$(error Configurable policy does not support legacy conf file)
-endif #ifneq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-
-LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
-
-else
-
-LOCAL_SHARED_LIBRARIES += libaudiopolicyenginedefault
-
-endif # ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
-
-LOCAL_C_INCLUDES += \
- $(call include-path-for, audio-utils)
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiopolicycommon \
- libaudiopolicyengine_interface_headers \
- libaudiopolicymanager_interface_headers
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents
-
-LOCAL_SHARED_LIBRARIES += libmedia_helper
-LOCAL_SHARED_LIBRARIES += libmediametrics
-
-LOCAL_SHARED_LIBRARIES += libbinder libhidlbase libxml2
-
-ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
-endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-
-LOCAL_CFLAGS += -Wall -Werror
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE:= libaudiopolicymanagerdefault
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c048de3..83ae35e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -42,15 +42,12 @@
#include <set>
#include <unordered_set>
#include <vector>
-#include <AudioPolicyManagerInterface.h>
-#include <AudioPolicyEngineInstance.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <media/AudioParameter.h>
#include <private/android_filesystem_config.h>
#include <soundtrigger/SoundTrigger.h>
#include <system/audio.h>
-#include <audio_policy_conf.h>
#include "AudioPolicyManager.h"
#include <Serializer.h>
#include "TypeConverter.h"
@@ -97,7 +94,7 @@
{
AudioParameter param(device->address());
const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
- AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
+ AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
param.addInt(key, device->type());
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
}
@@ -475,6 +472,10 @@
std::unordered_set<audio_format_t> formatSet;
sp<HwModule> primaryModule =
mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+ if (primaryModule == nullptr) {
+ ALOGE("%s() unable to get primary module", __func__);
+ return NO_INIT;
+ }
DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
AUDIO_DEVICE_OUT_ALL_A2DP);
for (const auto& device : declaredDevices) {
@@ -839,7 +840,7 @@
// if explicitly requested
static const uint32_t kRelevantFlags =
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
- AUDIO_OUTPUT_FLAG_VOIP_RX);
+ AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
flags =
(audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
}
@@ -2239,16 +2240,22 @@
return status;
}
- // increment activity count before calling getNewInputDevice() below as only active sessions
+ // increment activity count before calling getNewInputDevice() below as only active sessions
// are considered for device selection
inputDesc->setClientActive(client, true);
// indicate active capture to sound trigger service if starting capture from a mic on
// primary HW module
sp<DeviceDescriptor> device = getNewInputDevice(inputDesc);
- setInputDevice(input, device, true /* force */);
+ if (device != nullptr) {
+ status = setInputDevice(input, device, true /* force */);
+ } else {
+ ALOGW("%s no new input device can be found for descriptor %d",
+ __FUNCTION__, inputDesc->getId());
+ status = BAD_VALUE;
+ }
- if (inputDesc->activeCount() == 1) {
+ if (status == NO_ERROR && inputDesc->activeCount() == 1) {
sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote();
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((policyMix != NULL)
@@ -2279,11 +2286,16 @@
address, "remote-submix", AUDIO_FORMAT_DEFAULT);
}
}
+ } else if (status != NO_ERROR) {
+ // Restore client activity state.
+ inputDesc->setClientActive(client, false);
+ inputDesc->stop();
}
- ALOGV("%s input %d source = %d exit", __FUNCTION__, input, client->source());
+ ALOGV("%s input %d source = %d status = %d exit",
+ __FUNCTION__, input, client->source(), status);
- return NO_ERROR;
+ return status;
}
status_t AudioPolicyManager::stopInput(audio_port_handle_t portId)
@@ -4294,17 +4306,8 @@
: AudioPolicyManager(clientInterface, false /*forTesting*/)
{
loadConfig();
- initialize();
}
-// This check is to catch any legacy platform updating to Q without having
-// switched to XML since its deprecation on O.
-// TODO: after Q release, remove this check and flag as XML is now the only
-// option and all legacy platform should have transitioned to XML.
-#ifndef USE_XML_AUDIO_POLICY_CONF
-#error Audio policy no longer supports legacy .conf configuration format
-#endif
-
void AudioPolicyManager::loadConfig() {
if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) {
ALOGE("could not load audio policy configuration file, setting defaults");
@@ -4313,17 +4316,18 @@
}
status_t AudioPolicyManager::initialize() {
- // Once policy config has been parsed, retrieve an instance of the engine and initialize it.
- audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
- if (!engineInstance) {
- ALOGE("%s: Could not get an instance of policy engine", __FUNCTION__);
- return NO_INIT;
- }
- // Retrieve the Policy Manager Interface
- mEngine = engineInstance->queryInterface<AudioPolicyManagerInterface>();
- if (mEngine == NULL) {
- ALOGE("%s: Failed to get Policy Engine Interface", __FUNCTION__);
- return NO_INIT;
+ {
+ auto engLib = EngineLibrary::load(
+ "libaudiopolicyengine" + getConfig().getEngineLibraryNameSuffix() + ".so");
+ if (!engLib) {
+ ALOGE("%s: Failed to load the engine library", __FUNCTION__);
+ return NO_INIT;
+ }
+ mEngine = engLib->createEngine();
+ if (mEngine == nullptr) {
+ ALOGE("%s: Failed to instantiate the APM engine", __FUNCTION__);
+ return NO_INIT;
+ }
}
mEngine->setObserver(this);
status_t status = mEngine->initCheck();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 612bd8f..d38176b 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -34,9 +34,7 @@
#include <media/PatchBuilder.h>
#include "AudioPolicyInterface.h"
-#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyManagerObserver.h>
-#include <AudioGain.h>
#include <AudioPolicyConfig.h>
#include <AudioPort.h>
#include <AudioPatch.h>
@@ -49,6 +47,7 @@
#include <AudioPolicyMix.h>
#include <EffectDescriptor.h>
#include <SoundTriggerSession.h>
+#include "EngineLibrary.h"
#include "TypeConverter.h"
namespace android {
@@ -307,6 +306,8 @@
return volumeGroup != VOLUME_GROUP_NONE ? NO_ERROR : BAD_VALUE;
}
+ status_t initialize();
+
protected:
// A constructor that allows more fine-grained control over initialization process,
// used in automatic tests.
@@ -321,7 +322,6 @@
// - initialize.
AudioPolicyConfig& getConfig() { return mConfig; }
void loadConfig();
- status_t initialize();
// From AudioPolicyManagerObserver
virtual const AudioPatchCollection &getAudioPatches() const
@@ -752,7 +752,7 @@
uint32_t nextAudioPortGeneration();
// Audio Policy Engine Interface.
- AudioPolicyManagerInterface *mEngine;
+ EngineInstance mEngine;
// Surround formats that are enabled manually. Taken into account when
// "encoded surround" is forced into "manual" mode.
diff --git a/services/audiopolicy/managerdefault/EngineLibrary.cpp b/services/audiopolicy/managerdefault/EngineLibrary.cpp
new file mode 100644
index 0000000..ef699aa
--- /dev/null
+++ b/services/audiopolicy/managerdefault/EngineLibrary.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM_EngineLoader"
+
+#include <dlfcn.h>
+#include <utils/Log.h>
+
+#include "EngineLibrary.h"
+
+namespace android {
+
+// static
+std::shared_ptr<EngineLibrary> EngineLibrary::load(std::string libraryPath)
+{
+ std::shared_ptr<EngineLibrary> engLib(new EngineLibrary());
+ return engLib->init(std::move(libraryPath)) ? engLib : nullptr;
+}
+
+EngineLibrary::~EngineLibrary()
+{
+ close();
+}
+
+bool EngineLibrary::init(std::string libraryPath)
+{
+ mLibraryHandle = dlopen(libraryPath.c_str(), 0);
+ if (mLibraryHandle == nullptr) {
+ ALOGE("Could not dlopen %s: %s", libraryPath.c_str(), dlerror());
+ return false;
+ }
+ mCreateEngineInstance = (EngineInterface* (*)())dlsym(mLibraryHandle, "createEngineInstance");
+ mDestroyEngineInstance = (void (*)(EngineInterface*))dlsym(
+ mLibraryHandle, "destroyEngineInstance");
+ if (mCreateEngineInstance == nullptr || mDestroyEngineInstance == nullptr) {
+ ALOGE("Could not find engine interface functions in %s", libraryPath.c_str());
+ close();
+ return false;
+ }
+ ALOGD("Loaded engine from %s", libraryPath.c_str());
+ return true;
+}
+
+EngineInstance EngineLibrary::createEngine()
+{
+ if (mCreateEngineInstance == nullptr || mDestroyEngineInstance == nullptr) {
+ return EngineInstance();
+ }
+ return EngineInstance(mCreateEngineInstance(),
+ [lib = shared_from_this(), destroy = mDestroyEngineInstance] (EngineInterface* e) {
+ destroy(e);
+ });
+}
+
+void EngineLibrary::close()
+{
+ if (mLibraryHandle != nullptr) {
+ dlclose(mLibraryHandle);
+ }
+ mLibraryHandle = nullptr;
+ mCreateEngineInstance = nullptr;
+ mDestroyEngineInstance = nullptr;
+}
+
+} // namespace android
diff --git a/services/audiopolicy/managerdefault/EngineLibrary.h b/services/audiopolicy/managerdefault/EngineLibrary.h
new file mode 100644
index 0000000..f143916
--- /dev/null
+++ b/services/audiopolicy/managerdefault/EngineLibrary.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <functional>
+#include <memory>
+#include <string>
+
+#include <EngineInterface.h>
+
+namespace android {
+
+using EngineInstance = std::unique_ptr<EngineInterface, std::function<void (EngineInterface*)>>;
+
+class EngineLibrary : public std::enable_shared_from_this<EngineLibrary> {
+public:
+ static std::shared_ptr<EngineLibrary> load(std::string libraryPath);
+ ~EngineLibrary();
+
+ EngineLibrary(const EngineLibrary&) = delete;
+ EngineLibrary(EngineLibrary&&) = delete;
+ EngineLibrary& operator=(const EngineLibrary&) = delete;
+ EngineLibrary& operator=(EngineLibrary&&) = delete;
+
+ EngineInstance createEngine();
+
+private:
+ EngineLibrary() = default;
+ bool init(std::string libraryPath);
+ void close();
+
+ void *mLibraryHandle = nullptr;
+ EngineInterface* (*mCreateEngineInstance)() = nullptr;
+ void (*mDestroyEngineInstance)(EngineInterface*) = nullptr;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index ab9f78b..c8d1459 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -7,6 +7,7 @@
$(call include-path-for, audio-utils) \
LOCAL_SHARED_LIBRARIES := \
+ libaudiofoundation \
libaudiopolicymanagerdefault \
libbase \
liblog \
@@ -41,6 +42,7 @@
include $(CLEAR_VARS)
LOCAL_SHARED_LIBRARIES := \
+ libaudiofoundation \
libbase \
liblog \
libmedia_helper \
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index de5670c..e10a716 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -30,7 +30,16 @@
using namespace android;
-TEST(AudioPolicyManagerTestInit, Failure) {
+TEST(AudioPolicyManagerTestInit, EngineFailure) {
+ AudioPolicyTestClient client;
+ AudioPolicyTestManager manager(&client);
+ manager.getConfig().setDefault();
+ manager.getConfig().setEngineLibraryNameSuffix("non-existent");
+ ASSERT_EQ(NO_INIT, manager.initialize());
+ ASSERT_EQ(NO_INIT, manager.initCheck());
+}
+
+TEST(AudioPolicyManagerTestInit, ClientFailure) {
AudioPolicyTestClient client;
AudioPolicyTestManager manager(&client);
manager.getConfig().setDefault();
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 17c2e02..e3893e5 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -43,6 +43,7 @@
"libcodec2_soft_vp8dec",
"libcodec2_soft_vp9dec",
"libcodec2_soft_av1dec",
+ "libcodec2_soft_gav1dec",
"libcodec2_soft_vp8enc",
"libcodec2_soft_vp9enc",
"libcodec2_soft_rawdec",
diff --git a/services/mediaextractor/Android.bp b/services/mediaextractor/Android.bp
index b812244..98cc69f 100644
--- a/services/mediaextractor/Android.bp
+++ b/services/mediaextractor/Android.bp
@@ -12,6 +12,7 @@
"libstagefright",
"libbinder",
"libutils",
+ "liblog",
],
}
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index e6a8375..af8c67b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -78,6 +78,11 @@
AAudioClientTracker::getInstance().registerClient(pid, client);
}
+bool AAudioService::isCallerInService() {
+ return mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
+ mAudioClient.clientUid == IPCThreadState::self()->getCallingUid();
+}
+
aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
aaudio::AAudioStreamConfiguration &configurationOutput) {
aaudio_result_t result = AAUDIO_OK;
@@ -105,8 +110,7 @@
if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
// only trust audioserver for in service indication
bool inService = false;
- if (mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
- mAudioClient.clientUid == IPCThreadState::self()->getCallingUid()) {
+ if (isCallerInService()) {
inService = request.isInService();
}
serviceStream = new AAudioServiceStreamMMAP(*this, inService);
@@ -274,12 +278,14 @@
result = AAUDIO_ERROR_INVALID_STATE;
} else {
const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ int32_t priority = isCallerInService()
+ ? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
serviceStream->setRegisteredThread(clientThreadId);
int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ priority, true /* isForApp */);
if (err != 0) {
ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ clientThreadId, errno, priority);
result = AAUDIO_ERROR_INTERNAL;
}
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index d21b1cd..43a59c3 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -87,6 +87,10 @@
private:
+ /** @return true if the client is the audioserver
+ */
+ bool isCallerInService();
+
/**
* Lookup stream and then validate access to the stream.
* @param streamHandle
@@ -106,9 +110,10 @@
aaudio::AAudioStreamTracker mStreamTracker;
- enum constants {
- DEFAULT_AUDIO_PRIORITY = 2
- };
+ // TODO Extract the priority constants from services/audioflinger/Threads.cpp
+ // and share them with this code. Look for "kPriorityFastMixer".
+ static constexpr int32_t kRealTimeAudioPriorityClient = 2;
+ static constexpr int32_t kRealTimeAudioPriorityService = 3;
};
} /* namespace android */
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
new file mode 100644
index 0000000..1b7a20c
--- /dev/null
+++ b/services/oboeservice/Android.bp
@@ -0,0 +1,57 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_shared {
+
+ name: "libaaudioservice",
+
+ srcs: [
+ "AAudioClientTracker.cpp",
+ "AAudioEndpointManager.cpp",
+ "AAudioMixer.cpp",
+ "AAudioService.cpp",
+ "AAudioServiceEndpoint.cpp",
+ "AAudioServiceEndpointCapture.cpp",
+ "AAudioServiceEndpointMMAP.cpp",
+ "AAudioServiceEndpointPlay.cpp",
+ "AAudioServiceEndpointShared.cpp",
+ "AAudioServiceStreamBase.cpp",
+ "AAudioServiceStreamMMAP.cpp",
+ "AAudioServiceStreamShared.cpp",
+ "AAudioStreamTracker.cpp",
+ "AAudioThread.cpp",
+ "SharedMemoryProxy.cpp",
+ "SharedRingBuffer.cpp",
+ "TimestampScheduler.cpp",
+ ],
+
+ cflags: [
+ "-Wno-unused-parameter",
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "libaaudio_internal",
+ "libaudioclient",
+ "libaudioflinger",
+ "libbase",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libmediautils",
+ "libutils",
+ ],
+
+}
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
deleted file mode 100644
index 5e4cd39..0000000
--- a/services/oboeservice/Android.mk
+++ /dev/null
@@ -1,60 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# AAudio Service
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := libaaudioservice
-LOCAL_MODULE_TAGS := optional
-
-LIBAAUDIO_DIR := ../../media/libaaudio
-LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
-
-LOCAL_C_INCLUDES := \
- $(TOPDIR)frameworks/av/services/audioflinger \
- $(call include-path-for, audio-utils) \
- frameworks/native/include \
- system/core/base/include \
- $(TOP)/frameworks/av/media/libaaudio/include \
- $(TOP)/frameworks/av/media/utils/include \
- frameworks/native/include \
- $(TOP)/external/tinyalsa/include \
- $(TOP)/frameworks/av/media/libaaudio/src
-
-LOCAL_SRC_FILES += \
- SharedMemoryProxy.cpp \
- SharedRingBuffer.cpp \
- AAudioClientTracker.cpp \
- AAudioEndpointManager.cpp \
- AAudioMixer.cpp \
- AAudioService.cpp \
- AAudioServiceEndpoint.cpp \
- AAudioServiceEndpointCapture.cpp \
- AAudioServiceEndpointMMAP.cpp \
- AAudioServiceEndpointPlay.cpp \
- AAudioServiceEndpointShared.cpp \
- AAudioServiceStreamBase.cpp \
- AAudioServiceStreamMMAP.cpp \
- AAudioServiceStreamShared.cpp \
- AAudioStreamTracker.cpp \
- TimestampScheduler.cpp \
- AAudioThread.cpp
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-# LOCAL_CFLAGS += -fvisibility=hidden
-LOCAL_CFLAGS += -Wno-unused-parameter
-LOCAL_CFLAGS += -Wall -Werror
-
-LOCAL_SHARED_LIBRARIES := \
- libaaudio_internal \
- libaudioflinger \
- libaudioclient \
- libbinder \
- libcutils \
- libmediautils \
- libutils \
- liblog
-
-include $(BUILD_SHARED_LIBRARY)
-
-