Merge "Fix strategy not used compiler warning"
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 78a77d4..3687b15 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -173,6 +173,13 @@
void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
+ // Change the brightness level of the flash unit associated with cameraId to strengthLevel.
+ // If the torch is in OFF state and strengthLevel > 0 then the torch will also be turned ON.
+ void turnOnTorchWithStrengthLevel(String cameraId, int strengthLevel, IBinder clientBinder);
+
+ // Get the brightness level of the flash unit associated with cameraId.
+ int getTorchStrengthLevel(String cameraId);
+
/**
* Notify the camera service of a system event. Should only be called from system_server.
*
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index c54813c..5f17f5b 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -83,6 +83,8 @@
oneway void onTorchStatusChanged(int status, String cameraId);
+ oneway void onTorchStrengthLevelChanged(String cameraId, int newTorchStrength);
+
/**
* Notify registered clients about camera access priority changes.
* Clients which were previously unable to open a certain camera device
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index 3d78aef..f5d0120 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -43,5 +43,5 @@
* {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
* {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
*/
- int getRotateAndCropOverride(String packageName, int lensFacing);
+ int getRotateAndCropOverride(String packageName, int lensFacing, int userId);
}
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index da887a2..d53d809 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -95,6 +95,9 @@
virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
return binder::Status::ok();
}
+ virtual binder::Status onTorchStrengthLevelChanged(const String16&, int32_t) {
+ return binder::Status::ok();
+ }
virtual binder::Status onCameraAccessPrioritiesChanged();
virtual binder::Status onCameraOpened(const String16&, const String16&) {
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0e9740a..4015417 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3660,7 +3660,8 @@
* YUV_420_888 | all output sizes available for JPEG, up to the maximum video size | LIMITED |
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
- * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,
+ * media performance class 12 or higher by setting
+ * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
* the primary camera devices (first rear/front camera in the camera ID list) will not
* support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
* smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3678,9 +3679,11 @@
* YUV_420_888 | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED |
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
- * to be media performance class S, or if the camera device isn't a primary rear/front
- * camera, the minimum required output stream configurations are the same as for applications
- * targeting SDK version older than 31.</p>
+ * to be media performance class 12 or better by setting
+ * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+ * or if the camera device isn't a primary rear/front camera, the minimum required output
+ * stream configurations are the same as for applications targeting SDK version older than
+ * 31.</p>
* <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
* mandatory stream configurations on a per-capability basis.</p>
* <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 9f2f430..17ea512 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -96,6 +96,12 @@
return binder::Status::ok();
};
+ virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+ int32_t /*torchStrength*/) {
+ // No op
+ return binder::Status::ok();
+ }
+
virtual binder::Status onCameraAccessPrioritiesChanged() {
// No op
return binder::Status::ok();
diff --git a/media/codec2/hidl/1.0/vts/OWNERS b/media/codec2/hidl/1.0/vts/OWNERS
index dbe89cf..32b11b8 100644
--- a/media/codec2/hidl/1.0/vts/OWNERS
+++ b/media/codec2/hidl/1.0/vts/OWNERS
@@ -1,8 +1,5 @@
+# Bug component: 25690
# Media team
lajos@google.com
-pawin@google.com
taklee@google.com
wonsik@google.com
-
-# VTS team
-dshi@google.com
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 70c63f2..b6024ff 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -430,6 +430,10 @@
LOG(DEBUG) << "WrappedDecoderInterface: FilterWrapper not found";
return C2_OK;
}
+ if (!filterWrapper->isFilteringEnabled(next)) {
+ LOG(VERBOSE) << "WrappedDecoderInterface: filtering not enabled";
+ return C2_OK;
+ }
std::vector<std::unique_ptr<C2Param>> params;
c2_status_t err = filterWrapper->queryParamsForPreviousComponent(next, ¶ms);
if (err != C2_OK) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 23a326f..0de0b77 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1467,6 +1467,16 @@
std::list<std::unique_ptr<C2Work>> flushedConfigs;
mFlushedConfigs.lock()->swap(flushedConfigs);
if (!flushedConfigs.empty()) {
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ PipelineWatcher::Clock::time_point now = PipelineWatcher::Clock::now();
+ for (const std::unique_ptr<C2Work> &work : flushedConfigs) {
+ watcher->onWorkQueued(
+ work->input.ordinal.frameIndex.peeku(),
+ std::vector(work->input.buffers),
+ now);
+ }
+ }
err = mComponent->queue(&flushedConfigs);
if (err != C2_OK) {
ALOGW("[%s] Error while queueing a flushed config", mName);
@@ -1533,41 +1543,45 @@
setDescrambler(nullptr);
}
-
void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
ALOGV("[%s] flush", mName);
- std::vector<uint64_t> indices;
std::list<std::unique_ptr<C2Work>> configs;
mInput.lock()->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
- for (const std::unique_ptr<C2Work> &work : flushedWork) {
- indices.push_back(work->input.ordinal.frameIndex.peeku());
- if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
- continue;
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ for (const std::unique_ptr<C2Work> &work : flushedWork) {
+ uint64_t frameIndex = work->input.ordinal.frameIndex.peeku();
+ if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
+ watcher->onWorkDone(frameIndex);
+ continue;
+ }
+ if (work->input.buffers.empty()
+ || work->input.buffers.front() == nullptr
+ || work->input.buffers.front()->data().linearBlocks().empty()) {
+ ALOGD("[%s] no linear codec config data found", mName);
+ watcher->onWorkDone(frameIndex);
+ continue;
+ }
+ std::unique_ptr<C2Work> copy(new C2Work);
+ copy->input.flags = C2FrameData::flags_t(
+ work->input.flags | C2FrameData::FLAG_DROP_FRAME);
+ copy->input.ordinal = work->input.ordinal;
+ copy->input.ordinal.frameIndex = mFrameIndex++;
+ for (size_t i = 0; i < work->input.buffers.size(); ++i) {
+ copy->input.buffers.push_back(watcher->onInputBufferReleased(frameIndex, i));
+ }
+ for (const std::unique_ptr<C2Param> ¶m : work->input.configUpdate) {
+ copy->input.configUpdate.push_back(C2Param::Copy(*param));
+ }
+ copy->input.infoBuffers.insert(
+ copy->input.infoBuffers.begin(),
+ work->input.infoBuffers.begin(),
+ work->input.infoBuffers.end());
+ copy->worklets.emplace_back(new C2Worklet);
+ configs.push_back(std::move(copy));
+ watcher->onWorkDone(frameIndex);
+ ALOGV("[%s] stashed flushed codec config data", mName);
}
- if (work->input.buffers.empty()
- || work->input.buffers.front() == nullptr
- || work->input.buffers.front()->data().linearBlocks().empty()) {
- ALOGD("[%s] no linear codec config data found", mName);
- continue;
- }
- std::unique_ptr<C2Work> copy(new C2Work);
- copy->input.flags = C2FrameData::flags_t(work->input.flags | C2FrameData::FLAG_DROP_FRAME);
- copy->input.ordinal = work->input.ordinal;
- copy->input.ordinal.frameIndex = mFrameIndex++;
- copy->input.buffers.insert(
- copy->input.buffers.begin(),
- work->input.buffers.begin(),
- work->input.buffers.end());
- for (const std::unique_ptr<C2Param> ¶m : work->input.configUpdate) {
- copy->input.configUpdate.push_back(C2Param::Copy(*param));
- }
- copy->input.infoBuffers.insert(
- copy->input.infoBuffers.begin(),
- work->input.infoBuffers.begin(),
- work->input.infoBuffers.end());
- copy->worklets.emplace_back(new C2Worklet);
- configs.push_back(std::move(copy));
- ALOGV("[%s] stashed flushed codec config data", mName);
}
mFlushedConfigs.lock()->swap(configs);
{
@@ -1582,12 +1596,6 @@
output->buffers->flushStash();
}
}
- {
- Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
- for (uint64_t index : indices) {
- watcher->onWorkDone(index);
- }
- }
}
void CCodecBufferChannel::onWorkDone(
diff --git a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
index 7c660dc..3615289 100644
--- a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
@@ -224,6 +224,17 @@
Copy<C2StreamBitrateInfo::output, C2StreamBitrateInfo::input>,
mInputBitrate)
.build());
+
+ addParameter(
+ DefineParam(mOutputProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_UNUSED, LEVEL_UNUSED))
+ .withFields({
+ C2F(mOutputProfileLevel, profile).any(),
+ C2F(mOutputProfileLevel, level).any(),
+ })
+ .withSetter(Setter<C2StreamProfileLevelInfo::output>)
+ .build());
}
// TODO: more SDK params
@@ -241,6 +252,8 @@
std::shared_ptr<C2StreamPixelAspectRatioInfo::output> mPixelAspectRatio;
std::shared_ptr<C2StreamBitrateInfo::input> mInputBitrate;
std::shared_ptr<C2StreamBitrateInfo::output> mOutputBitrate;
+ std::shared_ptr<C2StreamProfileLevelInfo::input> mInputProfileLevel;
+ std::shared_ptr<C2StreamProfileLevelInfo::output> mOutputProfileLevel;
template<typename T>
static C2R Setter(bool, C2P<T> &) {
@@ -576,4 +589,51 @@
<< "mOutputFormat = " << mConfig.mOutputFormat->debugString().c_str();
}
+typedef std::tuple<std::string, C2Config::profile_t, int32_t> HdrProfilesParams;
+
+class HdrProfilesTest
+ : public CCodecConfigTest,
+ public ::testing::WithParamInterface<HdrProfilesParams> {
+};
+
+TEST_P(HdrProfilesTest, SetFromSdk) {
+ HdrProfilesParams params = GetParam();
+ std::string mediaType = std::get<0>(params);
+ C2Config::profile_t c2Profile = std::get<1>(params);
+ int32_t sdkProfile = std::get<2>(params);
+
+ init(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER, mediaType.c_str());
+
+ ASSERT_EQ(OK, mConfig.initialize(mReflector, mConfigurable));
+
+ sp<AMessage> format{new AMessage};
+ format->setInt32(KEY_PROFILE, sdkProfile);
+
+ std::vector<std::unique_ptr<C2Param>> configUpdate;
+ ASSERT_EQ(OK, mConfig.getConfigUpdateFromSdkParams(
+ mConfigurable, format, D::ALL, C2_MAY_BLOCK, &configUpdate));
+
+ ASSERT_EQ(1u, configUpdate.size());
+ C2StreamProfileLevelInfo::input *pl =
+ FindParam<std::remove_pointer<decltype(pl)>::type>(configUpdate);
+ ASSERT_NE(nullptr, pl);
+ ASSERT_EQ(c2Profile, pl->profile);
+}
+
+HdrProfilesParams kHdrProfilesParams[] = {
+ std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10),
+ std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus),
+ std::make_tuple(MIMETYPE_VIDEO_VP9, PROFILE_VP9_2, VP9Profile2HDR),
+ std::make_tuple(MIMETYPE_VIDEO_VP9, PROFILE_VP9_2, VP9Profile2HDR10Plus),
+ std::make_tuple(MIMETYPE_VIDEO_VP9, PROFILE_VP9_3, VP9Profile3HDR),
+ std::make_tuple(MIMETYPE_VIDEO_VP9, PROFILE_VP9_3, VP9Profile3HDR10Plus),
+ std::make_tuple(MIMETYPE_VIDEO_AV1, PROFILE_AV1_0, AV1ProfileMain10HDR10),
+ std::make_tuple(MIMETYPE_VIDEO_AV1, PROFILE_AV1_0, AV1ProfileMain10HDR10Plus),
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ CCodecConfig,
+ HdrProfilesTest,
+ ::testing::ValuesIn(kHdrProfilesParams));
+
} // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index ca6a328..f557830 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -255,6 +255,8 @@
{ C2Config::PROFILE_HEVC_MAIN_STILL, HEVCProfileMainStill },
{ C2Config::PROFILE_HEVC_MAIN_INTRA, HEVCProfileMain },
{ C2Config::PROFILE_HEVC_MAIN_10_INTRA, HEVCProfileMain10 },
+ { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10 },
+ { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus },
};
ALookup<C2Config::profile_t, int32_t> sHevcHdrProfiles = {
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 01aaa2a..38bcb7c 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -207,10 +207,15 @@
"binding/RingBufferParcelable.cpp",
"binding/SharedMemoryParcelable.cpp",
"binding/SharedRegionParcelable.cpp",
- "flowgraph/AudioProcessorBase.cpp",
+ "flowgraph/ChannelCountConverter.cpp",
"flowgraph/ClipToRange.cpp",
+ "flowgraph/FlowGraphNode.cpp",
+ "flowgraph/ManyToMultiConverter.cpp",
+ "flowgraph/MonoBlend.cpp",
"flowgraph/MonoToMultiConverter.cpp",
+ "flowgraph/MultiToMonoConverter.cpp",
"flowgraph/RampLinear.cpp",
+ "flowgraph/SampleRateConverter.cpp",
"flowgraph/SinkFloat.cpp",
"flowgraph/SinkI16.cpp",
"flowgraph/SinkI24.cpp",
@@ -219,6 +224,14 @@
"flowgraph/SourceI16.cpp",
"flowgraph/SourceI24.cpp",
"flowgraph/SourceI32.cpp",
+ "flowgraph/resampler/IntegerRatio.cpp",
+ "flowgraph/resampler/LinearResampler.cpp",
+ "flowgraph/resampler/MultiChannelResampler.cpp",
+ "flowgraph/resampler/PolyphaseResampler.cpp",
+ "flowgraph/resampler/PolyphaseResamplerMono.cpp",
+ "flowgraph/resampler/PolyphaseResamplerStereo.cpp",
+ "flowgraph/resampler/SincResampler.cpp",
+ "flowgraph/resampler/SincResamplerStereo.cpp",
],
sanitize: {
integer_overflow: true,
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 61b50f3..d3e2912 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -21,6 +21,7 @@
#include "AAudioFlowGraph.h"
#include <flowgraph/ClipToRange.h>
+#include <flowgraph/MonoBlend.h>
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/RampLinear.h>
#include <flowgraph/SinkFloat.h>
@@ -37,8 +38,9 @@
aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
audio_format_t sinkFormat,
- int32_t sinkChannelCount) {
- AudioFloatOutputPort *lastOutput = nullptr;
+ int32_t sinkChannelCount,
+ bool useMonoBlend) {
+ FlowGraphPortFloatOutput *lastOutput = nullptr;
// TODO change back to ALOGD
ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
@@ -76,6 +78,12 @@
lastOutput = &mClipper->output;
}
+ if (useMonoBlend) {
+ mMonoBlend = std::make_unique<MonoBlend>(sourceChannelCount);
+ lastOutput->connect(&mMonoBlend->input);
+ lastOutput = &mMonoBlend->output;
+ }
+
// Expand the number of channels if required.
if (sourceChannelCount == 1 && sinkChannelCount > 1) {
mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index a49f64e..e719d91 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -24,6 +24,7 @@
#include <aaudio/AAudio.h>
#include <flowgraph/ClipToRange.h>
+#include <flowgraph/MonoBlend.h>
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/RampLinear.h>
@@ -41,7 +42,8 @@
aaudio_result_t configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
audio_format_t sinkFormat,
- int32_t sinkChannelCount);
+ int32_t sinkChannelCount,
+ bool useMonoBlend);
void process(const void *source, void *destination, int32_t numFrames);
@@ -53,11 +55,12 @@
void setRampLengthInFrames(int32_t numFrames);
private:
- std::unique_ptr<flowgraph::AudioSource> mSource;
- std::unique_ptr<flowgraph::RampLinear> mVolumeRamp;
- std::unique_ptr<flowgraph::ClipToRange> mClipper;
- std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
- std::unique_ptr<flowgraph::AudioSink> mSink;
+ std::unique_ptr<flowgraph::FlowGraphSourceBuffered> mSource;
+ std::unique_ptr<flowgraph::MonoBlend> mMonoBlend;
+ std::unique_ptr<flowgraph::RampLinear> mVolumeRamp;
+ std::unique_ptr<flowgraph::ClipToRange> mClipper;
+ std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
+ std::unique_ptr<flowgraph::FlowGraphSink> mSink;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 89d42bf..1b8e224 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,7 @@
#include <aaudio/AAudio.h>
#include <cutils/properties.h>
+#include <media/AudioParameter.h>
#include <media/AudioSystem.h>
#include <media/MediaMetricsItem.h>
#include <utils/Trace.h>
@@ -270,6 +271,15 @@
mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
}
+ // Exclusive output streams should combine channels when mono audio adjustment
+ // is enabled.
+ if ((getDirection() == AAUDIO_DIRECTION_OUTPUT) &&
+ (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE)) {
+ bool isMasterMono = false;
+ android::AudioSystem::getMasterMono(&isMasterMono);
+ setRequireMonoBlend(isMasterMono);
+ }
+
// For debugging and analyzing the distribution of MMAP timestamps.
// For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
// For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5921799..c17c7a0 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -52,7 +52,8 @@
result = mFlowGraph.configure(getFormat(),
getSamplesPerFrame(),
getDeviceFormat(),
- getDeviceChannelCount());
+ getDeviceChannelCount(),
+ getRequireMonoBlend());
if (result != AAUDIO_OK) {
safeReleaseClose();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index afb8551..a3af753 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -277,6 +277,10 @@
return mIsPrivacySensitive;
}
+ bool getRequireMonoBlend() const {
+ return mRequireMonoBlend;
+ }
+
/**
* This is only valid after setChannelMask() and setFormat()
* have been called.
@@ -631,6 +635,13 @@
mIsPrivacySensitive = privacySensitive;
}
+ /**
+ * This should not be called after the open() call.
+ */
+ void setRequireMonoBlend(bool requireMonoBlend) {
+ mRequireMonoBlend = requireMonoBlend;
+ }
+
std::string mMetricsId; // set once during open()
std::mutex mStreamLock;
@@ -672,6 +683,7 @@
aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
bool mIsPrivacySensitive = false;
+ bool mRequireMonoBlend = false;
int32_t mSessionId = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
deleted file mode 100644
index d8ffd00..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <sys/types.h>
-#include "AudioProcessorBase.h"
-
-using namespace flowgraph;
-
-/***************************************************************************/
-int32_t AudioProcessorBase::pullData(int64_t framePosition, int32_t numFrames) {
- if (framePosition > mLastFramePosition) {
- mLastFramePosition = framePosition;
- mFramesValid = onProcess(framePosition, numFrames);
- }
- return mFramesValid;
-}
-
-/***************************************************************************/
-AudioFloatBlockPort::AudioFloatBlockPort(AudioProcessorBase &parent,
- int32_t samplesPerFrame,
- int32_t framesPerBlock)
- : AudioPort(parent, samplesPerFrame)
- , mFramesPerBlock(framesPerBlock) {
- int32_t numFloats = framesPerBlock * getSamplesPerFrame();
- mSampleBlock = new float[numFloats]{0.0f};
-}
-
-AudioFloatBlockPort::~AudioFloatBlockPort() {
- delete[] mSampleBlock;
-}
-
-/***************************************************************************/
-int32_t AudioFloatOutputPort::pullData(int64_t framePosition, int32_t numFrames) {
- numFrames = std::min(getFramesPerBlock(), numFrames);
- return mParent.pullData(framePosition, numFrames);
-}
-
-// These need to be in the .cpp file because of forward cross references.
-void AudioFloatOutputPort::connect(AudioFloatInputPort *port) {
- port->connect(this);
-}
-
-void AudioFloatOutputPort::disconnect(AudioFloatInputPort *port) {
- port->disconnect(this);
-}
-
-/***************************************************************************/
-int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
- return (mConnected == nullptr)
- ? std::min(getFramesPerBlock(), numFrames)
- : mConnected->pullData(framePosition, numFrames);
-}
-
-float *AudioFloatInputPort::getBlock() {
- if (mConnected == nullptr) {
- return AudioFloatBlockPort::getBlock(); // loaded using setValue()
- } else {
- return mConnected->getBlock();
- }
-}
-
-/***************************************************************************/
-int32_t AudioSink::pull(int32_t numFrames) {
- int32_t actualFrames = input.pullData(mFramePosition, numFrames);
- mFramePosition += actualFrames;
- return actualFrames;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.h b/media/libaaudio/src/flowgraph/AudioProcessorBase.h
deleted file mode 100644
index 972932f..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * AudioProcessorBase.h
- *
- * Audio processing node and ports that can be used in a simple data flow graph.
- */
-
-#ifndef FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-#define FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-
-#include <cassert>
-#include <cstring>
-#include <math.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// TODO consider publishing all header files under "include/libaaudio/FlowGraph.h"
-
-namespace flowgraph {
-
-// Default block size that can be overridden when the AudioFloatBlockPort is created.
-// If it is too small then we will have too much overhead from switching between nodes.
-// If it is too high then we will thrash the caches.
-constexpr int kDefaultBlockSize = 8; // arbitrary
-
-class AudioFloatInputPort;
-
-/***************************************************************************/
-class AudioProcessorBase {
-public:
- virtual ~AudioProcessorBase() = default;
-
- /**
- * Perform custom function.
- *
- * @param framePosition index of first frame to be processed
- * @param numFrames maximum number of frames requested for processing
- * @return number of frames actually processed
- */
- virtual int32_t onProcess(int64_t framePosition, int32_t numFrames) = 0;
-
- /**
- * If the framePosition is at or after the last frame position then call onProcess().
- * This prevents infinite recursion in case of cyclic graphs.
- * It also prevents nodes upstream from a branch from being executed twice.
- *
- * @param framePosition
- * @param numFrames
- * @return
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-protected:
- int64_t mLastFramePosition = -1; // Start at -1 so that the first pull works.
-
-private:
- int32_t mFramesValid = 0; // num valid frames in the block
-};
-
-/***************************************************************************/
-/**
- * This is a connector that allows data to flow between modules.
- */
-class AudioPort {
-public:
- AudioPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : mParent(parent)
- , mSamplesPerFrame(samplesPerFrame) {
- }
-
- // Ports are often declared public. So let's make them non-copyable.
- AudioPort(const AudioPort&) = delete;
- AudioPort& operator=(const AudioPort&) = delete;
-
- int32_t getSamplesPerFrame() const {
- return mSamplesPerFrame;
- }
-
-protected:
- AudioProcessorBase &mParent;
-
-private:
- const int32_t mSamplesPerFrame = 1;
-};
-
-/***************************************************************************/
-/**
- * This port contains a float type buffer.
- * The size is framesPerBlock * samplesPerFrame).
- */
-class AudioFloatBlockPort : public AudioPort {
-public:
- AudioFloatBlockPort(AudioProcessorBase &mParent,
- int32_t samplesPerFrame,
- int32_t framesPerBlock = kDefaultBlockSize
- );
-
- virtual ~AudioFloatBlockPort();
-
- int32_t getFramesPerBlock() const {
- return mFramesPerBlock;
- }
-
-protected:
-
- /**
- * @return buffer internal to the port or from a connected port
- */
- virtual float *getBlock() {
- return mSampleBlock;
- }
-
-
-private:
- const int32_t mFramesPerBlock = 1;
- float *mSampleBlock = nullptr; // allocated in constructor
-};
-
-/***************************************************************************/
-/**
- * The results of a module are stored in the buffer of the output ports.
- */
-class AudioFloatOutputPort : public AudioFloatBlockPort {
-public:
- AudioFloatOutputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : AudioFloatBlockPort(parent, samplesPerFrame) {
- }
-
- virtual ~AudioFloatOutputPort() = default;
-
- using AudioFloatBlockPort::getBlock;
-
- /**
- * Call the parent module's onProcess() method.
- * That may pull data from its inputs and recursively
- * process the entire graph.
- * @return number of frames actually pulled
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
- /**
- * Connect to the input of another module.
- * An input port can only have one connection.
- * An output port can have multiple connections.
- * If you connect a second output port to an input port
- * then it overwrites the previous connection.
- *
- * This not thread safe. Do not modify the graph topology form another thread while running.
- */
- void connect(AudioFloatInputPort *port);
-
- /**
- * Disconnect from the input of another module.
- * This not thread safe.
- */
- void disconnect(AudioFloatInputPort *port);
-};
-
-/***************************************************************************/
-class AudioFloatInputPort : public AudioFloatBlockPort {
-public:
- AudioFloatInputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
- : AudioFloatBlockPort(parent, samplesPerFrame) {
- }
-
- virtual ~AudioFloatInputPort() = default;
-
- /**
- * If connected to an output port then this will return
- * that output ports buffers.
- * If not connected then it returns the input ports own buffer
- * which can be loaded using setValue().
- */
- float *getBlock() override;
-
- /**
- * Pull data from any output port that is connected.
- */
- int32_t pullData(int64_t framePosition, int32_t numFrames);
-
- /**
- * Write every value of the float buffer.
- * This value will be ignored if an output port is connected
- * to this port.
- */
- void setValue(float value) {
- int numFloats = kDefaultBlockSize * getSamplesPerFrame();
- float *buffer = getBlock();
- for (int i = 0; i < numFloats; i++) {
- *buffer++ = value;
- }
- }
-
- /**
- * Connect to the output of another module.
- * An input port can only have one connection.
- * An output port can have multiple connections.
- * This not thread safe.
- */
- void connect(AudioFloatOutputPort *port) {
- assert(getSamplesPerFrame() == port->getSamplesPerFrame());
- mConnected = port;
- }
-
- void disconnect(AudioFloatOutputPort *port) {
- assert(mConnected == port);
- (void) port;
- mConnected = nullptr;
- }
-
- void disconnect() {
- mConnected = nullptr;
- }
-
-private:
- AudioFloatOutputPort *mConnected = nullptr;
-};
-
-/***************************************************************************/
-class AudioSource : public AudioProcessorBase {
-public:
- explicit AudioSource(int32_t channelCount)
- : output(*this, channelCount) {
- }
-
- virtual ~AudioSource() = default;
-
- AudioFloatOutputPort output;
-
- void setData(const void *data, int32_t numFrames) {
- mData = data;
- mSizeInFrames = numFrames;
- mFrameIndex = 0;
- }
-
-protected:
- const void *mData = nullptr;
- int32_t mSizeInFrames = 0; // number of frames in mData
- int32_t mFrameIndex = 0; // index of next frame to be processed
-};
-
-/***************************************************************************/
-class AudioSink : public AudioProcessorBase {
-public:
- explicit AudioSink(int32_t channelCount)
- : input(*this, channelCount) {
- }
-
- virtual ~AudioSink() = default;
-
- AudioFloatInputPort input;
-
- /**
- * Do nothing. The work happens in the read() method.
- *
- * @param framePosition index of first frame to be processed
- * @param numFrames
- * @return number of frames actually processed
- */
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override {
- (void) framePosition;
- (void) numFrames;
- return 0;
- };
-
- virtual int32_t read(void *data, int32_t numFrames) = 0;
-
-protected:
- int32_t pull(int32_t numFrames);
-
-private:
- int64_t mFramePosition = 0;
-};
-
-} /* namespace flowgraph */
-
-#endif /* FLOWGRAPH_AUDIO_PROCESSOR_BASE_H */
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
new file mode 100644
index 0000000..351def2
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "ChannelCountConverter.h"
+
+using namespace flowgraph;
+
+ChannelCountConverter::ChannelCountConverter(
+ int32_t inputChannelCount,
+ int32_t outputChannelCount)
+ : input(*this, inputChannelCount)
+ , output(*this, outputChannelCount) {
+}
+
+ChannelCountConverter::~ChannelCountConverter() = default;
+
+int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
+ int32_t inputChannelCount = input.getSamplesPerFrame();
+ int32_t outputChannelCount = output.getSamplesPerFrame();
+ for (int i = 0; i < numFrames; i++) {
+ int inputChannel = 0;
+ for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
+ // Copy input channels to output channels.
+ // Wrap if we run out of inputs.
+ // Discard if we run out of outputs.
+ outputBuffer[outputChannel] = inputBuffer[inputChannel];
+ inputChannel = (inputChannel == inputChannelCount)
+ ? 0 : inputChannel + 1;
+ }
+ inputBuffer += inputChannelCount;
+ outputBuffer += outputChannelCount;
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.h b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
new file mode 100644
index 0000000..e4b6f4e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Change the number of number of channels without mixing.
+ * When increasing the channel count, duplicate input channels.
+ * When decreasing the channel count, drop input channels.
+ */
+ class ChannelCountConverter : public FlowGraphNode {
+ public:
+ explicit ChannelCountConverter(
+ int32_t inputChannelCount,
+ int32_t outputChannelCount);
+
+ virtual ~ChannelCountConverter();
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "ChannelCountConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+ };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.cpp b/media/libaaudio/src/flowgraph/ClipToRange.cpp
index bd9c22a..d2f8a02 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.cpp
+++ b/media/libaaudio/src/flowgraph/ClipToRange.cpp
@@ -16,25 +16,23 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "ClipToRange.h"
using namespace flowgraph;
ClipToRange::ClipToRange(int32_t channelCount)
- : input(*this, channelCount)
- , output(*this, channelCount) {
+ : FlowGraphFilter(channelCount) {
}
-int32_t ClipToRange::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t ClipToRange::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
- int32_t numSamples = framesToProcess * output.getSamplesPerFrame();
+ int32_t numSamples = numFrames * output.getSamplesPerFrame();
for (int32_t i = 0; i < numSamples; i++) {
*outputBuffer++ = std::min(mMaximum, std::max(mMinimum, *inputBuffer++));
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.h b/media/libaaudio/src/flowgraph/ClipToRange.h
index 9eef254..22b7804 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.h
+++ b/media/libaaudio/src/flowgraph/ClipToRange.h
@@ -21,7 +21,7 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
@@ -30,13 +30,13 @@
constexpr float kDefaultMaxHeadroom = 1.41253754f;
constexpr float kDefaultMinHeadroom = -kDefaultMaxHeadroom;
-class ClipToRange : public AudioProcessorBase {
+class ClipToRange : public FlowGraphFilter {
public:
explicit ClipToRange(int32_t channelCount);
virtual ~ClipToRange() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
void setMinimum(float min) {
mMinimum = min;
@@ -54,8 +54,9 @@
return mMaximum;
}
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "ClipToRange";
+ }
private:
float mMinimum = kDefaultMinHeadroom;
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.cpp b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
new file mode 100644
index 0000000..4c76e77
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stdio.h"
+#include <algorithm>
+#include <sys/types.h>
+#include "FlowGraphNode.h"
+
+using namespace flowgraph;
+
+/***************************************************************************/
+int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
+ int32_t frameCount = numFrames;
+ // Prevent recursion and multiple execution of nodes.
+ if (callCount > mLastCallCount) {
+ mLastCallCount = callCount;
+ if (mDataPulledAutomatically) {
+ // Pull from all the upstream nodes.
+ for (auto &port : mInputPorts) {
+ // TODO fix bug of leaving unused data in some ports if using multiple AudioSource
+ frameCount = port.get().pullData(callCount, frameCount);
+ }
+ }
+ if (frameCount > 0) {
+ frameCount = onProcess(frameCount);
+ }
+ mLastFrameCount = frameCount;
+ } else {
+ frameCount = mLastFrameCount;
+ }
+ return frameCount;
+}
+
+void FlowGraphNode::pullReset() {
+ if (!mBlockRecursion) {
+ mBlockRecursion = true; // for cyclic graphs
+ // Pull reset from all the upstream nodes.
+ for (auto &port : mInputPorts) {
+ port.get().pullReset();
+ }
+ mBlockRecursion = false;
+ reset();
+ }
+}
+
+void FlowGraphNode::reset() {
+ mLastFrameCount = 0;
+ mLastCallCount = kInitialCallCount;
+}
+
+/***************************************************************************/
+FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
+ int32_t samplesPerFrame,
+ int32_t framesPerBuffer)
+ : FlowGraphPort(parent, samplesPerFrame)
+ , mFramesPerBuffer(framesPerBuffer)
+ , mBuffer(nullptr) {
+ size_t numFloats = framesPerBuffer * getSamplesPerFrame();
+ mBuffer = std::make_unique<float[]>(numFloats);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
+ numFrames = std::min(getFramesPerBuffer(), numFrames);
+ return mContainingNode.pullData(numFrames, callCount);
+}
+
+void FlowGraphPortFloatOutput::pullReset() {
+ mContainingNode.pullReset();
+}
+
+// These need to be in the .cpp file because of forward cross references.
+void FlowGraphPortFloatOutput::connect(FlowGraphPortFloatInput *port) {
+ port->connect(this);
+}
+
+void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
+ port->disconnect(this);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
+ return (mConnected == nullptr)
+ ? std::min(getFramesPerBuffer(), numFrames)
+ : mConnected->pullData(callCount, numFrames);
+}
+void FlowGraphPortFloatInput::pullReset() {
+ if (mConnected != nullptr) mConnected->pullReset();
+}
+
+float *FlowGraphPortFloatInput::getBuffer() {
+ if (mConnected == nullptr) {
+ return FlowGraphPortFloat::getBuffer(); // loaded using setValue()
+ } else {
+ return mConnected->getBuffer();
+ }
+}
+
+int32_t FlowGraphSink::pullData(int32_t numFrames) {
+ return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
+}
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.h b/media/libaaudio/src/flowgraph/FlowGraphNode.h
new file mode 100644
index 0000000..69c83dd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * FlowGraph.h
+ *
+ * Processing node and ports that can be used in a simple data flow graph.
+ * This was designed to work with audio but could be used for other
+ * types of data.
+ */
+
+#ifndef FLOWGRAPH_FLOW_GRAPH_NODE_H
+#define FLOWGRAPH_FLOW_GRAPH_NODE_H
+
+#include <cassert>
+#include <cstring>
+#include <math.h>
+#include <memory>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <vector>
+
+// TODO Move these classes into separate files.
+// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
+// run-time deallocation in audio thread.
+
+// Set this to 1 if using it inside the Android framework.
+// This code is kept here so that it can be moved easily between Oboe and AAudio.
+#ifndef FLOWGRAPH_ANDROID_INTERNAL
+#define FLOWGRAPH_ANDROID_INTERNAL 0
+#endif
+
+namespace flowgraph {
+
+// Default block size that can be overridden when the FlowGraphPortFloat is created.
+// If it is too small then we will have too much overhead from switching between nodes.
+// If it is too high then we will thrash the caches.
+constexpr int kDefaultBufferSize = 8; // arbitrary
+
+class FlowGraphPort;
+class FlowGraphPortFloatInput;
+
+/***************************************************************************/
+/**
+ * Base class for all nodes in the flowgraph.
+ */
+class FlowGraphNode {
+public:
+ FlowGraphNode() = default;
+ virtual ~FlowGraphNode() = default;
+
+ /**
+ * Read from the input ports,
+ * generate multiple frames of data then write the results to the output ports.
+ *
+ * @param numFrames maximum number of frames requested for processing
+ * @return number of frames actually processed
+ */
+ virtual int32_t onProcess(int32_t numFrames) = 0;
+
+ /**
+ * If the callCount is at or after the previous callCount then call
+ * pullData on all of the upstreamNodes.
+ * Then call onProcess().
+ * This prevents infinite recursion in case of cyclic graphs.
+ * It also prevents nodes upstream from a branch from being executed twice.
+ *
+ * @param callCount
+ * @param numFrames
+ * @return number of frames valid
+ */
+ int32_t pullData(int32_t numFrames, int64_t callCount);
+
+ /**
+ * Recursively reset all the nodes in the graph, starting from a Sink.
+ *
+ * This must not be called at the same time as pullData!
+ */
+ void pullReset();
+
+ /**
+ * Reset framePosition counters.
+ */
+ virtual void reset();
+
+ void addInputPort(FlowGraphPort &port) {
+ mInputPorts.emplace_back(port);
+ }
+
+ bool isDataPulledAutomatically() const {
+ return mDataPulledAutomatically;
+ }
+
+ /**
+ * Set true if you want the data pulled through the graph automatically.
+ * This is the default.
+ *
+ * Set false if you want to pull the data from the input ports in the onProcess() method.
+ * You might do this, for example, in a sample rate converting node.
+ *
+ * @param automatic
+ */
+ void setDataPulledAutomatically(bool automatic) {
+ mDataPulledAutomatically = automatic;
+ }
+
+ virtual const char *getName() {
+ return "FlowGraph";
+ }
+
+ int64_t getLastCallCount() {
+ return mLastCallCount;
+ }
+
+protected:
+
+ static constexpr int64_t kInitialCallCount = -1;
+ int64_t mLastCallCount = kInitialCallCount;
+
+ std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
+
+private:
+ bool mDataPulledAutomatically = true;
+ bool mBlockRecursion = false;
+ int32_t mLastFrameCount = 0;
+
+};
+
+/***************************************************************************/
+/**
+ * This is a connector that allows data to flow between modules.
+ *
+ * The ports are the primary means of interacting with a module.
+ * So they are generally declared as public.
+ *
+ */
+class FlowGraphPort {
+public:
+ FlowGraphPort(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : mContainingNode(parent)
+ , mSamplesPerFrame(samplesPerFrame) {
+ }
+
+ virtual ~FlowGraphPort() = default;
+
+ // Ports are often declared public. So let's make them non-copyable.
+ FlowGraphPort(const FlowGraphPort&) = delete;
+ FlowGraphPort& operator=(const FlowGraphPort&) = delete;
+
+ int32_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ virtual int32_t pullData(int64_t framePosition, int32_t numFrames) = 0;
+
+ virtual void pullReset() {}
+
+protected:
+ FlowGraphNode &mContainingNode;
+
+private:
+ const int32_t mSamplesPerFrame = 1;
+};
+
+/***************************************************************************/
+/**
+ * This port contains a 32-bit float buffer that can contain several frames of data.
+ * Processing the data in a block improves performance.
+ *
+ * The size is framesPerBuffer * samplesPerFrame).
+ */
+class FlowGraphPortFloat : public FlowGraphPort {
+public:
+ FlowGraphPortFloat(FlowGraphNode &parent,
+ int32_t samplesPerFrame,
+ int32_t framesPerBuffer = kDefaultBufferSize
+ );
+
+ virtual ~FlowGraphPortFloat() = default;
+
+ int32_t getFramesPerBuffer() const {
+ return mFramesPerBuffer;
+ }
+
+protected:
+
+ /**
+ * @return buffer internal to the port or from a connected port
+ */
+ virtual float *getBuffer() {
+ return mBuffer.get();
+ }
+
+private:
+ const int32_t mFramesPerBuffer = 1;
+ std::unique_ptr<float[]> mBuffer; // allocated in constructor
+};
+
+/***************************************************************************/
+/**
+ * The results of a node's processing are stored in the buffers of the output ports.
+ */
+class FlowGraphPortFloatOutput : public FlowGraphPortFloat {
+public:
+ FlowGraphPortFloatOutput(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : FlowGraphPortFloat(parent, samplesPerFrame) {
+ }
+
+ virtual ~FlowGraphPortFloatOutput() = default;
+
+ using FlowGraphPortFloat::getBuffer;
+
+ /**
+ * Connect to the input of another module.
+ * An input port can only have one connection.
+ * An output port can have multiple connections.
+ * If you connect a second output port to an input port
+ * then it overwrites the previous connection.
+ *
+ * This not thread safe. Do not modify the graph topology from another thread while running.
+ * Also do not delete a module while it is connected to another port if the graph is running.
+ */
+ void connect(FlowGraphPortFloatInput *port);
+
+ /**
+ * Disconnect from the input of another module.
+ * This not thread safe.
+ */
+ void disconnect(FlowGraphPortFloatInput *port);
+
+ /**
+ * Call the parent module's onProcess() method.
+ * That may pull data from its inputs and recursively
+ * process the entire graph.
+ * @return number of frames actually pulled
+ */
+ int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+
+ void pullReset() override;
+
+};
+
+/***************************************************************************/
+
+/**
+ * An input port for streaming audio data.
+ * You can set a value that will be used for processing.
+ * If you connect an output port to this port then its value will be used instead.
+ */
+class FlowGraphPortFloatInput : public FlowGraphPortFloat {
+public:
+ FlowGraphPortFloatInput(FlowGraphNode &parent, int32_t samplesPerFrame)
+ : FlowGraphPortFloat(parent, samplesPerFrame) {
+ // Add to parent so it can pull data from each input.
+ parent.addInputPort(*this);
+ }
+
+ virtual ~FlowGraphPortFloatInput() = default;
+
+ /**
+ * If connected to an output port then this will return
+ * that output ports buffers.
+ * If not connected then it returns the input ports own buffer
+ * which can be loaded using setValue().
+ */
+ float *getBuffer() override;
+
+ /**
+ * Write every value of the float buffer.
+ * This value will be ignored if an output port is connected
+ * to this port.
+ */
+ void setValue(float value) {
+ int numFloats = kDefaultBufferSize * getSamplesPerFrame();
+ float *buffer = getBuffer();
+ for (int i = 0; i < numFloats; i++) {
+ *buffer++ = value;
+ }
+ }
+
+ /**
+ * Connect to the output of another module.
+ * An input port can only have one connection.
+ * An output port can have multiple connections.
+ * This not thread safe.
+ */
+ void connect(FlowGraphPortFloatOutput *port) {
+ assert(getSamplesPerFrame() == port->getSamplesPerFrame());
+ mConnected = port;
+ }
+
+ void disconnect(FlowGraphPortFloatOutput *port) {
+ assert(mConnected == port);
+ (void) port;
+ mConnected = nullptr;
+ }
+
+ void disconnect() {
+ mConnected = nullptr;
+ }
+
+ /**
+ * Pull data from any output port that is connected.
+ */
+ int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+ void pullReset() override;
+
+private:
+ FlowGraphPortFloatOutput *mConnected = nullptr;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSource : public FlowGraphNode {
+public:
+ explicit FlowGraphSource(int32_t channelCount)
+ : output(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphSource() = default;
+
+ FlowGraphPortFloatOutput output;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSourceBuffered : public FlowGraphSource {
+public:
+ explicit FlowGraphSourceBuffered(int32_t channelCount)
+ : FlowGraphSource(channelCount) {}
+
+ virtual ~FlowGraphSourceBuffered() = default;
+
+ /**
+ * Specify buffer that the node will read from.
+ *
+ * @param data TODO Consider using std::shared_ptr.
+ * @param numFrames
+ */
+ void setData(const void *data, int32_t numFrames) {
+ mData = data;
+ mSizeInFrames = numFrames;
+ mFrameIndex = 0;
+ }
+
+protected:
+ const void *mData = nullptr;
+ int32_t mSizeInFrames = 0; // number of frames in mData
+ int32_t mFrameIndex = 0; // index of next frame to be processed
+};
+
+/***************************************************************************/
+/**
+ * Base class for an edge node in a graph that has no downstream nodes.
+ * It consumes data but does not output data.
+ * This graph will be executed when data is read() from this node
+ * by pulling data from upstream nodes.
+ */
+class FlowGraphSink : public FlowGraphNode {
+public:
+ explicit FlowGraphSink(int32_t channelCount)
+ : input(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphSink() = default;
+
+ FlowGraphPortFloatInput input;
+
+ /**
+ * Do nothing. The work happens in the read() method.
+ *
+ * @param numFrames
+ * @return number of frames actually processed
+ */
+ int32_t onProcess(int32_t numFrames) override {
+ return numFrames;
+ }
+
+ virtual int32_t read(void *data, int32_t numFrames) = 0;
+
+protected:
+ /**
+ * Pull data through the graph using this nodes last callCount.
+ * @param numFrames
+ * @return
+ */
+ int32_t pullData(int32_t numFrames);
+};
+
+/***************************************************************************/
+/**
+ * Base class for a node that has an input and an output with the same number of channels.
+ * This may include traditional filters, eg. FIR, but also include
+ * any processing node that converts input to output.
+ */
+class FlowGraphFilter : public FlowGraphNode {
+public:
+ explicit FlowGraphFilter(int32_t channelCount)
+ : input(*this, channelCount)
+ , output(*this, channelCount) {
+ }
+
+ virtual ~FlowGraphFilter() = default;
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+};
+
+} /* namespace flowgraph */
+
+#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
new file mode 100644
index 0000000..879685e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "ManyToMultiConverter.h"
+
+using namespace flowgraph;
+
+ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
+ : inputs(channelCount)
+ , output(*this, channelCount) {
+ for (int i = 0; i < channelCount; i++) {
+ inputs[i] = std::make_unique<FlowGraphPortFloatInput>(*this, 1);
+ }
+}
+
+int32_t ManyToMultiConverter::onProcess(int32_t numFrames) {
+ int32_t channelCount = output.getSamplesPerFrame();
+
+ for (int ch = 0; ch < channelCount; ch++) {
+ const float *inputBuffer = inputs[ch]->getBuffer();
+ float *outputBuffer = output.getBuffer() + ch;
+
+ for (int i = 0; i < numFrames; i++) {
+ // read one, write into the proper interleaved output channel
+ float sample = *inputBuffer++;
+ *outputBuffer = sample;
+ outputBuffer += channelCount; // advance to next multichannel frame
+ }
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.h b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
new file mode 100644
index 0000000..c7460ff
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+#define FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine multiple mono inputs into one interleaved multi-channel output.
+ */
+class ManyToMultiConverter : public flowgraph::FlowGraphNode {
+public:
+ explicit ManyToMultiConverter(int32_t channelCount);
+
+ virtual ~ManyToMultiConverter() = default;
+
+ int32_t onProcess(int numFrames) override;
+
+ void setEnabled(bool /*enabled*/) {}
+
+ std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatInput>> inputs;
+ flowgraph::FlowGraphPortFloatOutput output;
+
+ const char *getName() override {
+ return "ManyToMultiConverter";
+ }
+
+private:
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.cpp b/media/libaaudio/src/flowgraph/MonoBlend.cpp
new file mode 100644
index 0000000..62e2809
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "MonoBlend.h"
+
+using namespace flowgraph;
+
+MonoBlend::MonoBlend(int32_t channelCount)
+ : FlowGraphFilter(channelCount)
+ , mInvChannelCount(1. / channelCount)
+{
+}
+
+int32_t MonoBlend::onProcess(int32_t numFrames) {
+ int32_t channelCount = output.getSamplesPerFrame();
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
+
+ for (size_t i = 0; i < numFrames; ++i) {
+ float accum = 0;
+ for (size_t j = 0; j < channelCount; ++j) {
+ accum += *inputBuffer++;
+ }
+ accum *= mInvChannelCount;
+ for (size_t j = 0; j < channelCount; ++j) {
+ *outputBuffer++ = accum;
+ }
+ }
+
+ return numFrames;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.h b/media/libaaudio/src/flowgraph/MonoBlend.h
new file mode 100644
index 0000000..7e3c35b
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MONO_BLEND_H
+#define FLOWGRAPH_MONO_BLEND_H
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine data between multiple channels so each channel is an average
+ * of all channels.
+ */
+class MonoBlend : public FlowGraphFilter {
+public:
+ explicit MonoBlend(int32_t channelCount);
+
+ virtual ~MonoBlend() = default;
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "MonoBlend";
+ }
+private:
+ const float mInvChannelCount;
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MONO_BLEND
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index c6fcac6..c8d60b9 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -14,32 +14,28 @@
* limitations under the License.
*/
-
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "MonoToMultiConverter.h"
using namespace flowgraph;
-MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
+MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
: input(*this, 1)
- , output(*this, channelCount) {
+ , output(*this, outputChannelCount) {
}
-int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
-
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
- // TODO maybe move to audio_util as audio_mono_to_multi()
- for (int i = 0; i < framesToProcess; i++) {
+ for (int i = 0; i < numFrames; i++) {
// read one, write many
float sample = *inputBuffer++;
for (int channel = 0; channel < channelCount; channel++) {
*outputBuffer++ = sample;
}
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 5058ae0..6e87ccb 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -14,27 +14,34 @@
* limitations under the License.
*/
-
#ifndef FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
#define FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class MonoToMultiConverter : public AudioProcessorBase {
+/**
+ * Convert a monophonic stream to a multi-channel interleaved stream
+ * with the same signal on each channel.
+ */
+class MonoToMultiConverter : public FlowGraphNode {
public:
- explicit MonoToMultiConverter(int32_t channelCount);
+ explicit MonoToMultiConverter(int32_t outputChannelCount);
virtual ~MonoToMultiConverter() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "MonoToMultiConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
new file mode 100644
index 0000000..c745108
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToMonoConverter.h"
+
+using namespace flowgraph;
+
+MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
+ : input(*this, inputChannelCount)
+ , output(*this, 1) {
+}
+
+MultiToMonoConverter::~MultiToMonoConverter() = default;
+
+int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
+ int32_t channelCount = input.getSamplesPerFrame();
+ for (int i = 0; i < numFrames; i++) {
+ // read first channel of multi stream, write many
+ *outputBuffer++ = *inputBuffer;
+ inputBuffer += channelCount;
+ }
+ return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.h b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
new file mode 100644
index 0000000..37c53bd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to a monophonic stream
+ * by extracting channel[0].
+ */
+ class MultiToMonoConverter : public FlowGraphNode {
+ public:
+ explicit MultiToMonoConverter(int32_t inputChannelCount);
+
+ virtual ~MultiToMonoConverter();
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "MultiToMonoConverter";
+ }
+
+ FlowGraphPortFloatInput input;
+ FlowGraphPortFloatOutput output;
+ };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index 0cc32e5..905ae07 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -14,20 +14,15 @@
* limitations under the License.
*/
-#define LOG_TAG "RampLinear"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "RampLinear.h"
using namespace flowgraph;
RampLinear::RampLinear(int32_t channelCount)
- : input(*this, channelCount)
- , output(*this, channelCount) {
+ : FlowGraphFilter(channelCount) {
mTarget.store(1.0f);
}
@@ -38,7 +33,7 @@
void RampLinear::setTarget(float target) {
mTarget.store(target);
// If the ramp has not been used then start immediately at this level.
- if (mLastFramePosition < 0) {
+ if (mLastCallCount == kInitialCallCount) {
forceCurrent(target);
}
}
@@ -47,10 +42,9 @@
return mLevelTo - (mRemaining * mScaler);
}
-int32_t RampLinear::onProcess(int64_t framePosition, int32_t numFrames) {
- int32_t framesToProcess = input.pullData(framePosition, numFrames);
- const float *inputBuffer = input.getBlock();
- float *outputBuffer = output.getBlock();
+int32_t RampLinear::onProcess(int32_t numFrames) {
+ const float *inputBuffer = input.getBuffer();
+ float *outputBuffer = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
float target = getTarget();
@@ -59,12 +53,10 @@
mLevelFrom = interpolateCurrent();
mLevelTo = target;
mRemaining = mLengthInFrames;
- ALOGV("%s() mLevelFrom = %f, mLevelTo = %f, mRemaining = %d, mScaler = %f",
- __func__, mLevelFrom, mLevelTo, mRemaining, mScaler);
mScaler = (mLevelTo - mLevelFrom) / mLengthInFrames; // for interpolation
}
- int32_t framesLeft = framesToProcess;
+ int32_t framesLeft = numFrames;
if (mRemaining > 0) { // Ramping? This doesn't happen very often.
int32_t framesToRamp = std::min(framesLeft, mRemaining);
@@ -85,5 +77,5 @@
*outputBuffer++ = *inputBuffer++ * mLevelTo;
}
- return framesToProcess;
+ return numFrames;
}
diff --git a/media/libaaudio/src/flowgraph/RampLinear.h b/media/libaaudio/src/flowgraph/RampLinear.h
index bdc8f41..f285704 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.h
+++ b/media/libaaudio/src/flowgraph/RampLinear.h
@@ -21,17 +21,25 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class RampLinear : public AudioProcessorBase {
+/**
+ * When the target is modified then the output will ramp smoothly
+ * between the original and the new target value.
+ * This can be used to smooth out control values and reduce pops.
+ *
+ * The target may be updated while a ramp is in progress, which will trigger
+ * a new ramp from the current value.
+ */
+class RampLinear : public FlowGraphFilter {
public:
explicit RampLinear(int32_t channelCount);
virtual ~RampLinear() = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
/**
* This is used for the next ramp.
@@ -66,8 +74,9 @@
mLevelTo = level;
}
- AudioFloatInputPort input;
- AudioFloatOutputPort output;
+ const char *getName() override {
+ return "RampLinear";
+ }
private:
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.cpp b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
new file mode 100644
index 0000000..5c3ed1f
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleRateConverter.h"
+
+using namespace flowgraph;
+using namespace resampler;
+
+SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
+ : FlowGraphFilter(channelCount)
+ , mResampler(resampler) {
+ setDataPulledAutomatically(false);
+}
+
+void SampleRateConverter::reset() {
+ FlowGraphNode::reset();
+ mInputCursor = kInitialCallCount;
+}
+
+// Return true if there is a sample available.
+bool SampleRateConverter::isInputAvailable() {
+ // If we have consumed all of the input data then go out and get some more.
+ if (mInputCursor >= mNumValidInputFrames) {
+ mInputCallCount++;
+ mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
+ mInputCursor = 0;
+ }
+ return (mInputCursor < mNumValidInputFrames);
+}
+
+const float *SampleRateConverter::getNextInputFrame() {
+ const float *inputBuffer = input.getBuffer();
+ return &inputBuffer[mInputCursor++ * input.getSamplesPerFrame()];
+}
+
+int32_t SampleRateConverter::onProcess(int32_t numFrames) {
+ float *outputBuffer = output.getBuffer();
+ int32_t channelCount = output.getSamplesPerFrame();
+ int framesLeft = numFrames;
+ while (framesLeft > 0) {
+ // Gather input samples as needed.
+ if(mResampler.isWriteNeeded()) {
+ if (isInputAvailable()) {
+ const float *frame = getNextInputFrame();
+ mResampler.writeNextFrame(frame);
+ } else {
+ break;
+ }
+ } else {
+ // Output frame is interpolated from input samples.
+ mResampler.readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ framesLeft--;
+ }
+ }
+ return numFrames - framesLeft;
+}
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.h b/media/libaaudio/src/flowgraph/SampleRateConverter.h
new file mode 100644
index 0000000..57d76a4
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
+#define OBOE_SAMPLE_RATE_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+#include "resampler/MultiChannelResampler.h"
+
+namespace flowgraph {
+
+class SampleRateConverter : public FlowGraphFilter {
+public:
+ explicit SampleRateConverter(int32_t channelCount,
+ resampler::MultiChannelResampler &mResampler);
+
+ virtual ~SampleRateConverter() = default;
+
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SampleRateConverter";
+ }
+
+ void reset() override;
+
+private:
+
+ // Return true if there is a sample available.
+ bool isInputAvailable();
+
+ // This assumes data is available. Only call after calling isInputAvailable().
+ const float *getNextInputFrame();
+
+ resampler::MultiChannelResampler &mResampler;
+
+ int32_t mInputCursor = 0; // offset into the input port buffer
+ int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
+ // We need our own callCount for upstream calls because calls occur at a different rate.
+ // This means we cannot have cyclic graphs or merges that contain an SRC.
+ int64_t mInputCallCount = 0;
+
+};
+
+} /* namespace flowgraph */
+
+#endif //OBOE_SAMPLE_RATE_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.cpp b/media/libaaudio/src/flowgraph/SinkFloat.cpp
index fb3dcbc..0588848 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SinkFloat.cpp
@@ -16,31 +16,31 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SinkFloat.h"
using namespace flowgraph;
SinkFloat::SinkFloat(int32_t channelCount)
- : AudioSink(channelCount) {
+ : FlowGraphSink(channelCount) {
}
int32_t SinkFloat::read(void *data, int32_t numFrames) {
float *floatData = (float *) data;
- int32_t channelCount = input.getSamplesPerFrame();
+ const int32_t channelCount = input.getSamplesPerFrame();
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
- if (framesRead <= 0) {
+ int32_t framesPulled = pullData(framesLeft);
+ if (framesPulled <= 0) {
break;
}
- const float *signal = input.getBlock();
- int32_t numSamples = framesRead * channelCount;
+ const float *signal = input.getBuffer();
+ int32_t numSamples = framesPulled * channelCount;
memcpy(floatData, signal, numSamples * sizeof(float));
floatData += numSamples;
- framesLeft -= framesRead;
+ framesLeft -= framesPulled;
}
return numFrames - framesLeft;
}
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.h b/media/libaaudio/src/flowgraph/SinkFloat.h
index 7775c08..c812373 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.h
+++ b/media/libaaudio/src/flowgraph/SinkFloat.h
@@ -21,16 +21,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkFloat : public AudioSink {
+/**
+ * AudioSink that lets you read data as 32-bit floats.
+ */
+class SinkFloat : public FlowGraphSink {
public:
explicit SinkFloat(int32_t channelCount);
+ ~SinkFloat() override = default;
int32_t read(void *data, int32_t numFrames) override;
+ const char *getName() override {
+ return "SinkFloat";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI16.cpp b/media/libaaudio/src/flowgraph/SinkI16.cpp
index ffec8f5..da7fd6b 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI16.cpp
@@ -17,17 +17,16 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#include "SinkI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SinkI16.h"
-
using namespace flowgraph;
SinkI16::SinkI16(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI16::read(void *data, int32_t numFrames) {
int16_t *shortData = (int16_t *) data;
@@ -36,13 +35,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *signal = input.getBlock();
+ const float *signal = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_i16_from_float(shortData, signal, numSamples);
shortData += numSamples;
signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI16.h b/media/libaaudio/src/flowgraph/SinkI16.h
index 6d86266..1e1ce3a 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.h
+++ b/media/libaaudio/src/flowgraph/SinkI16.h
@@ -20,15 +20,22 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI16 : public AudioSink {
+/**
+ * AudioSink that lets you read data as 16-bit signed integers.
+ */
+class SinkI16 : public FlowGraphSink {
public:
explicit SinkI16(int32_t channelCount);
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI16";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI24.cpp b/media/libaaudio/src/flowgraph/SinkI24.cpp
index 0cb077d..a9fb5d2 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI24.cpp
@@ -15,19 +15,20 @@
*/
#include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
-#ifdef __ANDROID__
+
+#include "FlowGraphNode.h"
+#include "SinkI24.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SinkI24.h"
-
using namespace flowgraph;
SinkI24::SinkI24(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI24::read(void *data, int32_t numFrames) {
uint8_t *byteData = (uint8_t *) data;
@@ -36,13 +37,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *floatData = input.getBlock();
+ const float *floatData = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_p24_from_float(byteData, floatData, numSamples);
static const int kBytesPerI24Packed = 3;
byteData += numSamples * kBytesPerI24Packed;
diff --git a/media/libaaudio/src/flowgraph/SinkI24.h b/media/libaaudio/src/flowgraph/SinkI24.h
index 5b9b505..44078a9 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.h
+++ b/media/libaaudio/src/flowgraph/SinkI24.h
@@ -20,15 +20,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI24 : public AudioSink {
+/**
+ * AudioSink that lets you read data as packed 24-bit signed integers.
+ * The sample size is 3 bytes.
+ */
+class SinkI24 : public FlowGraphSink {
public:
explicit SinkI24(int32_t channelCount);
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI24";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI32.cpp b/media/libaaudio/src/flowgraph/SinkI32.cpp
index eab863d..9fd4e96 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI32.cpp
@@ -14,18 +14,18 @@
* limitations under the License.
*/
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "FlowgraphUtilities.h"
#include "SinkI32.h"
using namespace flowgraph;
SinkI32::SinkI32(int32_t channelCount)
- : AudioSink(channelCount) {}
+ : FlowGraphSink(channelCount) {}
int32_t SinkI32::read(void *data, int32_t numFrames) {
int32_t *intData = (int32_t *) data;
@@ -34,13 +34,13 @@
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
- int32_t framesRead = pull(framesLeft);
+ int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
- const float *signal = input.getBlock();
+ const float *signal = input.getBuffer();
int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_i32_from_float(intData, signal, numSamples);
intData += numSamples;
signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI32.h b/media/libaaudio/src/flowgraph/SinkI32.h
index 09d23b7..7456d5f 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.h
+++ b/media/libaaudio/src/flowgraph/SinkI32.h
@@ -19,16 +19,20 @@
#include <stdint.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SinkI32 : public AudioSink {
+class SinkI32 : public FlowGraphSink {
public:
explicit SinkI32(int32_t channelCount);
~SinkI32() override = default;
int32_t read(void *data, int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SinkI32";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 5b3a51e..1b3daf1 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -16,23 +16,22 @@
#include <algorithm>
#include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceFloat.h"
using namespace flowgraph;
SourceFloat::SourceFloat(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceFloat::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int32_t numFrames) {
+ float *outputBuffer = output.getBuffer();
+ const int32_t channelCount = output.getSamplesPerFrame();
- float *outputBuffer = output.getBlock();
- int32_t channelCount = output.getSamplesPerFrame();
-
- int32_t framesLeft = mSizeInFrames - mFrameIndex;
- int32_t framesToProcess = std::min(numFrames, framesLeft);
- int32_t numSamples = framesToProcess * channelCount;
+ const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+ const int32_t framesToProcess = std::min(numFrames, framesLeft);
+ const int32_t numSamples = framesToProcess * channelCount;
const float *floatBase = (float *) mData;
const float *floatData = &floatBase[mFrameIndex * channelCount];
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.h b/media/libaaudio/src/flowgraph/SourceFloat.h
index e6eed9f..4719669 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.h
+++ b/media/libaaudio/src/flowgraph/SourceFloat.h
@@ -20,15 +20,23 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceFloat : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined float data.
+ */
+class SourceFloat : public FlowGraphSourceBuffered {
public:
explicit SourceFloat(int32_t channelCount);
+ ~SourceFloat() override = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceFloat";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index a645cc2..8813023 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -17,21 +17,21 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#include "FlowGraphNode.h"
+#include "SourceI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
-#include "SourceI16.h"
-
using namespace flowgraph;
SourceI16::SourceI16(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI16::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
- float *floatData = output.getBlock();
+int32_t SourceI16::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -41,7 +41,7 @@
const int16_t *shortBase = static_cast<const int16_t *>(mData);
const int16_t *shortData = &shortBase[mFrameIndex * channelCount];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_i16(floatData, shortData, numSamples);
#else
for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI16.h b/media/libaaudio/src/flowgraph/SourceI16.h
index 2b116cf..fe440b2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.h
+++ b/media/libaaudio/src/flowgraph/SourceI16.h
@@ -20,15 +20,21 @@
#include <unistd.h>
#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-
-class SourceI16 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 16-bit integer data.
+ */
+class SourceI16 : public FlowGraphSourceBuffered {
public:
explicit SourceI16(int32_t channelCount);
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceI16";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 50fb98e..1975878 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -15,13 +15,13 @@
*/
#include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceI24.h"
using namespace flowgraph;
@@ -29,11 +29,11 @@
constexpr int kBytesPerI24Packed = 3;
SourceI24::SourceI24(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI24::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
- float *floatData = output.getBlock();
+int32_t SourceI24::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
int32_t channelCount = output.getSamplesPerFrame();
int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -43,7 +43,7 @@
const uint8_t *byteBase = (uint8_t *) mData;
const uint8_t *byteData = &byteBase[mFrameIndex * channelCount * kBytesPerI24Packed];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_p24(floatData, byteData, numSamples);
#else
static const float scale = 1. / (float)(1UL << 31);
diff --git a/media/libaaudio/src/flowgraph/SourceI24.h b/media/libaaudio/src/flowgraph/SourceI24.h
index 2ed6f18..3779534 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.h
+++ b/media/libaaudio/src/flowgraph/SourceI24.h
@@ -17,17 +17,25 @@
#ifndef FLOWGRAPH_SOURCE_I24_H
#define FLOWGRAPH_SOURCE_I24_H
-#include <stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceI24 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 24-bit packed integer data.
+ */
+class SourceI24 : public FlowGraphSourceBuffered {
public:
explicit SourceI24(int32_t channelCount);
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+
+ const char *getName() override {
+ return "SourceI24";
+ }
};
} /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index 95bfd8f..4b2e8c4 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -17,31 +17,31 @@
#include <algorithm>
#include <unistd.h>
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
#include <audio_utils/primitives.h>
#endif
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
#include "SourceI32.h"
using namespace flowgraph;
SourceI32::SourceI32(int32_t channelCount)
- : AudioSource(channelCount) {
+ : FlowGraphSourceBuffered(channelCount) {
}
-int32_t SourceI32::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
- float *floatData = output.getBlock();
- int32_t channelCount = output.getSamplesPerFrame();
+int32_t SourceI32::onProcess(int32_t numFrames) {
+ float *floatData = output.getBuffer();
+ const int32_t channelCount = output.getSamplesPerFrame();
- int32_t framesLeft = mSizeInFrames - mFrameIndex;
- int32_t framesToProcess = std::min(numFrames, framesLeft);
- int32_t numSamples = framesToProcess * channelCount;
+ const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+ const int32_t framesToProcess = std::min(numFrames, framesLeft);
+ const int32_t numSamples = framesToProcess * channelCount;
const int32_t *intBase = static_cast<const int32_t *>(mData);
const int32_t *intData = &intBase[mFrameIndex * channelCount];
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
memcpy_to_float_from_i32(floatData, intData, numSamples);
#else
for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI32.h b/media/libaaudio/src/flowgraph/SourceI32.h
index e50f9be..b4e0d7b 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.h
+++ b/media/libaaudio/src/flowgraph/SourceI32.h
@@ -19,17 +19,20 @@
#include <stdint.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
namespace flowgraph {
-class SourceI32 : public AudioSource {
+class SourceI32 : public FlowGraphSourceBuffered {
public:
explicit SourceI32(int32_t channelCount);
~SourceI32() override = default;
- int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+ int32_t onProcess(int32_t numFrames) override;
+ const char *getName() override {
+ return "SourceI32";
+ }
private:
static constexpr float kScale = 1.0 / (1UL << 31);
};
diff --git a/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
new file mode 100644
index 0000000..f6479ae
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+#define RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a HyperbolicCosineWindow window centered at 0.
+ * This can be used in place of a Kaiser window.
+ *
+ * The code is based on an anonymous contribution by "a concerned citizen":
+ * https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ */
+class HyperbolicCosineWindow {
+public:
+ HyperbolicCosineWindow() {
+ setStopBandAttenuation(60);
+ }
+
+ /**
+ * @param attenuation typical values range from 30 to 90 dB
+ * @return beta
+ */
+ double setStopBandAttenuation(double attenuation) {
+ double alpha = ((-325.1e-6 * attenuation + 0.1677) * attenuation) - 3.149;
+ setAlpha(alpha);
+ return alpha;
+ }
+
+ void setAlpha(double alpha) {
+ mAlpha = alpha;
+ mInverseCoshAlpha = 1.0 / cosh(alpha);
+ }
+
+ /**
+ * @param x ranges from -1.0 to +1.0
+ */
+ double operator()(double x) {
+ double x2 = x * x;
+ if (x2 >= 1.0) return 0.0;
+ double w = mAlpha * sqrt(1.0 - x2);
+ return cosh(w) * mInverseCoshAlpha;
+ }
+
+private:
+ double mAlpha = 0.0;
+ double mInverseCoshAlpha = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
new file mode 100644
index 0000000..4bd75b3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntegerRatio.h"
+
+using namespace resampler;
+
+// Enough primes to cover the common sample rates.
+static const int kPrimes[] = {
+ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+ 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+ 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+ 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199};
+
+void IntegerRatio::reduce() {
+ for (int prime : kPrimes) {
+ if (mNumerator < prime || mDenominator < prime) {
+ break;
+ }
+
+ // Find biggest prime factor for numerator.
+ while (true) {
+ int top = mNumerator / prime;
+ int bottom = mDenominator / prime;
+ if ((top >= 1)
+ && (bottom >= 1)
+ && (top * prime == mNumerator) // divided evenly?
+ && (bottom * prime == mDenominator)) {
+ mNumerator = top;
+ mDenominator = bottom;
+ } else {
+ break;
+ }
+ }
+
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
new file mode 100644
index 0000000..8c044d8
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_INTEGER_RATIO_H
+#define OBOE_INTEGER_RATIO_H
+
+#include <sys/types.h>
+
+namespace resampler {
+
+/**
+ * Represent the ratio of two integers.
+ */
+class IntegerRatio {
+public:
+ IntegerRatio(int32_t numerator, int32_t denominator)
+ : mNumerator(numerator), mDenominator(denominator) {}
+
+ /**
+ * Reduce by removing common prime factors.
+ */
+ void reduce();
+
+ int32_t getNumerator() {
+ return mNumerator;
+ }
+
+ int32_t getDenominator() {
+ return mDenominator;
+ }
+
+private:
+ int32_t mNumerator;
+ int32_t mDenominator;
+};
+
+} // namespace resampler
+
+#endif //OBOE_INTEGER_RATIO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
new file mode 100644
index 0000000..73dbc41
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_KAISER_WINDOW_H
+#define RESAMPLER_KAISER_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a Kaiser window centered at 0.
+ */
+class KaiserWindow {
+public:
+ KaiserWindow() {
+ setStopBandAttenuation(60);
+ }
+
+ /**
+ * @param attenuation typical values range from 30 to 90 dB
+ * @return beta
+ */
+ double setStopBandAttenuation(double attenuation) {
+ double beta = 0.0;
+ if (attenuation > 50) {
+ beta = 0.1102 * (attenuation - 8.7);
+ } else if (attenuation >= 21) {
+ double a21 = attenuation - 21;
+ beta = 0.5842 * pow(a21, 0.4) + (0.07886 * a21);
+ }
+ setBeta(beta);
+ return beta;
+ }
+
+ void setBeta(double beta) {
+ mBeta = beta;
+ mInverseBesselBeta = 1.0 / bessel(beta);
+ }
+
+ /**
+ * @param x ranges from -1.0 to +1.0
+ */
+ double operator()(double x) {
+ double x2 = x * x;
+ if (x2 >= 1.0) return 0.0;
+ double w = mBeta * sqrt(1.0 - x2);
+ return bessel(w) * mInverseBesselBeta;
+ }
+
+ // Approximation of a
+ // modified zero order Bessel function of the first kind.
+ // Based on a discussion at:
+ // https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ static double bessel(double x) {
+ double y = cosh(0.970941817426052 * x);
+ y += cosh(0.8854560256532099 * x);
+ y += cosh(0.7485107481711011 * x);
+ y += cosh(0.5680647467311558 * x);
+ y += cosh(0.3546048870425356 * x);
+ y += cosh(0.120536680255323 * x);
+ y *= 2;
+ y += cosh(x);
+ y /= 13;
+ return y;
+ }
+
+private:
+ double mBeta = 0.0;
+ double mInverseBesselBeta = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_KAISER_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
new file mode 100644
index 0000000..a7748c1
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearResampler.h"
+
+using namespace resampler;
+
+LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder) {
+ mPreviousFrame = std::make_unique<float[]>(getChannelCount());
+ mCurrentFrame = std::make_unique<float[]>(getChannelCount());
+}
+
+void LinearResampler::writeFrame(const float *frame) {
+ memcpy(mPreviousFrame.get(), mCurrentFrame.get(), sizeof(float) * getChannelCount());
+ memcpy(mCurrentFrame.get(), frame, sizeof(float) * getChannelCount());
+}
+
+void LinearResampler::readFrame(float *frame) {
+ float *previous = mPreviousFrame.get();
+ float *current = mCurrentFrame.get();
+ float phase = (float) getIntegerPhase() / mDenominator;
+ // iterate across samples in the frame
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float f0 = *previous++;
+ float f1 = *current++;
+ *frame++ = f0 + (phase * (f1 - f0));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.h b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
new file mode 100644
index 0000000..6bde81d
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_LINEAR_RESAMPLER_H
+#define OBOE_LINEAR_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Simple resampler that uses bi-linear interpolation.
+ */
+class LinearResampler : public MultiChannelResampler {
+public:
+ explicit LinearResampler(const MultiChannelResampler::Builder &builder);
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+
+private:
+ std::unique_ptr<float[]> mPreviousFrame;
+ std::unique_ptr<float[]> mCurrentFrame;
+};
+
+} // namespace resampler
+#endif //OBOE_LINEAR_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
new file mode 100644
index 0000000..d630520
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+
+#include "IntegerRatio.h"
+#include "LinearResampler.h"
+#include "MultiChannelResampler.h"
+#include "PolyphaseResampler.h"
+#include "PolyphaseResamplerMono.h"
+#include "PolyphaseResamplerStereo.h"
+#include "SincResampler.h"
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
+ : mNumTaps(builder.getNumTaps())
+ , mX(builder.getChannelCount() * builder.getNumTaps() * 2)
+ , mSingleFrame(builder.getChannelCount())
+ , mChannelCount(builder.getChannelCount())
+ {
+ // Reduce sample rates to the smallest ratio.
+ // For example 44100/48000 would become 147/160.
+ IntegerRatio ratio(builder.getInputRate(), builder.getOutputRate());
+ ratio.reduce();
+ mNumerator = ratio.getNumerator();
+ mDenominator = ratio.getDenominator();
+ mIntegerPhase = mDenominator;
+}
+
+// static factory method
+MultiChannelResampler *MultiChannelResampler::make(int32_t channelCount,
+ int32_t inputRate,
+ int32_t outputRate,
+ Quality quality) {
+ Builder builder;
+ builder.setInputRate(inputRate);
+ builder.setOutputRate(outputRate);
+ builder.setChannelCount(channelCount);
+
+ switch (quality) {
+ case Quality::Fastest:
+ builder.setNumTaps(2);
+ break;
+ case Quality::Low:
+ builder.setNumTaps(4);
+ break;
+ case Quality::Medium:
+ default:
+ builder.setNumTaps(8);
+ break;
+ case Quality::High:
+ builder.setNumTaps(16);
+ break;
+ case Quality::Best:
+ builder.setNumTaps(32);
+ break;
+ }
+
+ // Set the cutoff frequency so that we do not get aliasing when down-sampling.
+ if (inputRate > outputRate) {
+ builder.setNormalizedCutoff(kDefaultNormalizedCutoff);
+ }
+ return builder.build();
+}
+
+MultiChannelResampler *MultiChannelResampler::Builder::build() {
+ if (getNumTaps() == 2) {
+ // Note that this does not do low pass filteringh.
+ return new LinearResampler(*this);
+ }
+ IntegerRatio ratio(getInputRate(), getOutputRate());
+ ratio.reduce();
+ bool usePolyphase = (getNumTaps() * ratio.getDenominator()) <= kMaxCoefficients;
+ if (usePolyphase) {
+ if (getChannelCount() == 1) {
+ return new PolyphaseResamplerMono(*this);
+ } else if (getChannelCount() == 2) {
+ return new PolyphaseResamplerStereo(*this);
+ } else {
+ return new PolyphaseResampler(*this);
+ }
+ } else {
+ // Use less optimized resampler that uses a float phaseIncrement.
+ // TODO mono resampler
+ if (getChannelCount() == 2) {
+ return new SincResamplerStereo(*this);
+ } else {
+ return new SincResampler(*this);
+ }
+ }
+}
+
+void MultiChannelResampler::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * getChannelCount()];
+ int offset = getNumTaps() * getChannelCount();
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ // Write twice so we avoid having to wrap when reading.
+ dest[channel] = dest[channel + offset] = frame[channel];
+ }
+}
+
+float MultiChannelResampler::sinc(float radians) {
+ if (abs(radians) < 1.0e-9) return 1.0f; // avoid divide by zero
+ return sinf(radians) / radians; // Sinc function
+}
+
+// Generate coefficients in the order they will be used by readFrame().
+// This is more complicated but readFrame() is called repeatedly and should be optimized.
+void MultiChannelResampler::generateCoefficients(int32_t inputRate,
+ int32_t outputRate,
+ int32_t numRows,
+ double phaseIncrement,
+ float normalizedCutoff) {
+ mCoefficients.resize(getNumTaps() * numRows);
+ int coefficientIndex = 0;
+ double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
+ // Stretch the sinc function for low pass filtering.
+ const float cutoffScaler = normalizedCutoff *
+ ((outputRate < inputRate)
+ ? ((float)outputRate / inputRate)
+ : ((float)inputRate / outputRate));
+ const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
+ const float numTapsHalfInverse = 1.0f / numTapsHalf;
+ for (int i = 0; i < numRows; i++) {
+ float tapPhase = phase - numTapsHalf;
+ float gain = 0.0; // sum of raw coefficients
+ int gainCursor = coefficientIndex;
+ for (int tap = 0; tap < getNumTaps(); tap++) {
+ float radians = tapPhase * M_PI;
+
+#if MCR_USE_KAISER
+ float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
+#else
+ float window = mCoshWindow(tapPhase * numTapsHalfInverse);
+#endif
+ float coefficient = sinc(radians * cutoffScaler) * window;
+ mCoefficients.at(coefficientIndex++) = coefficient;
+ gain += coefficient;
+ tapPhase += 1.0;
+ }
+ phase += phaseIncrement;
+ while (phase >= 1.0) {
+ phase -= 1.0;
+ }
+
+ // Correct for gain variations.
+ float gainCorrection = 1.0 / gain; // normalize the gain
+ for (int tap = 0; tap < getNumTaps(); tap++) {
+ mCoefficients.at(gainCursor + tap) *= gainCorrection;
+ }
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
new file mode 100644
index 0000000..da79cad
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
+#define OBOE_MULTICHANNEL_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef MCR_USE_KAISER
+// It appears from the spectrogram that the HyperbolicCosine window leads to fewer artifacts.
+// And it is faster to calculate.
+#define MCR_USE_KAISER 0
+#endif
+
+#if MCR_USE_KAISER
+#include "KaiserWindow.h"
+#else
+#include "HyperbolicCosineWindow.h"
+#endif
+
+namespace resampler {
+
+class MultiChannelResampler {
+
+public:
+
+ enum class Quality : int32_t {
+ Fastest,
+ Low,
+ Medium,
+ High,
+ Best,
+ };
+
+ class Builder {
+ public:
+ /**
+ * Construct an optimal resampler based on the specified parameters.
+ * @return address of a resampler
+ */
+ MultiChannelResampler *build();
+
+ /**
+ * The number of taps in the resampling filter.
+ * More taps gives better quality but uses more CPU time.
+ * This typically ranges from 4 to 64. Default is 16.
+ *
+ * For polyphase filters, numTaps must be a multiple of four for loop unrolling.
+ * @param numTaps number of taps for the filter
+ * @return address of this builder for chaining calls
+ */
+ Builder *setNumTaps(int32_t numTaps) {
+ mNumTaps = numTaps;
+ return this;
+ }
+
+ /**
+ * Use 1 for mono, 2 for stereo, etc. Default is 1.
+ *
+ * @param channelCount number of channels
+ * @return address of this builder for chaining calls
+ */
+ Builder *setChannelCount(int32_t channelCount) {
+ mChannelCount = channelCount;
+ return this;
+ }
+
+ /**
+ * Default is 48000.
+ *
+ * @param inputRate sample rate of the input stream
+ * @return address of this builder for chaining calls
+ */
+ Builder *setInputRate(int32_t inputRate) {
+ mInputRate = inputRate;
+ return this;
+ }
+
+ /**
+ * Default is 48000.
+ *
+ * @param outputRate sample rate of the output stream
+ * @return address of this builder for chaining calls
+ */
+ Builder *setOutputRate(int32_t outputRate) {
+ mOutputRate = outputRate;
+ return this;
+ }
+
+ /**
+ * Set cutoff frequency relative to the Nyquist rate of the output sample rate.
+ * Set to 1.0 to match the Nyquist frequency.
+ * Set lower to reduce aliasing.
+ * Default is 0.70.
+ *
+ * @param normalizedCutoff anti-aliasing filter cutoff
+ * @return address of this builder for chaining calls
+ */
+ Builder *setNormalizedCutoff(float normalizedCutoff) {
+ mNormalizedCutoff = normalizedCutoff;
+ return this;
+ }
+
+ int32_t getNumTaps() const {
+ return mNumTaps;
+ }
+
+ int32_t getChannelCount() const {
+ return mChannelCount;
+ }
+
+ int32_t getInputRate() const {
+ return mInputRate;
+ }
+
+ int32_t getOutputRate() const {
+ return mOutputRate;
+ }
+
+ float getNormalizedCutoff() const {
+ return mNormalizedCutoff;
+ }
+
+ protected:
+ int32_t mChannelCount = 1;
+ int32_t mNumTaps = 16;
+ int32_t mInputRate = 48000;
+ int32_t mOutputRate = 48000;
+ float mNormalizedCutoff = kDefaultNormalizedCutoff;
+ };
+
+ virtual ~MultiChannelResampler() = default;
+
+ /**
+ * Factory method for making a resampler that is optimal for the given inputs.
+ *
+ * @param channelCount number of channels, 2 for stereo
+ * @param inputRate sample rate of the input stream
+ * @param outputRate sample rate of the output stream
+ * @param quality higher quality sounds better but uses more CPU
+ * @return an optimal resampler
+ */
+ static MultiChannelResampler *make(int32_t channelCount,
+ int32_t inputRate,
+ int32_t outputRate,
+ Quality quality);
+
+ bool isWriteNeeded() const {
+ return mIntegerPhase >= mDenominator;
+ }
+
+ /**
+ * Write a frame containing N samples.
+ *
+ * @param frame pointer to the first sample in a frame
+ */
+ void writeNextFrame(const float *frame) {
+ writeFrame(frame);
+ advanceWrite();
+ }
+
+ /**
+ * Read a frame containing N samples.
+ *
+ * @param frame pointer to the first sample in a frame
+ */
+ void readNextFrame(float *frame) {
+ readFrame(frame);
+ advanceRead();
+ }
+
+ int getNumTaps() const {
+ return mNumTaps;
+ }
+
+ int getChannelCount() const {
+ return mChannelCount;
+ }
+
+ static float hammingWindow(float radians, float spread);
+
+ static float sinc(float radians);
+
+protected:
+
+ explicit MultiChannelResampler(const MultiChannelResampler::Builder &builder);
+
+ /**
+ * Write a frame containing N samples.
+ * Call advanceWrite() after calling this.
+ * @param frame pointer to the first sample in a frame
+ */
+ virtual void writeFrame(const float *frame);
+
+ /**
+ * Read a frame containing N samples using interpolation.
+ * Call advanceRead() after calling this.
+ * @param frame pointer to the first sample in a frame
+ */
+ virtual void readFrame(float *frame) = 0;
+
+ void advanceWrite() {
+ mIntegerPhase -= mDenominator;
+ }
+
+ void advanceRead() {
+ mIntegerPhase += mNumerator;
+ }
+
+ /**
+ * Generate the filter coefficients in optimal order.
+ * @param inputRate sample rate of the input stream
+ * @param outputRate sample rate of the output stream
+ * @param numRows number of rows in the array that contain a set of tap coefficients
+ * @param phaseIncrement how much to increment the phase between rows
+ * @param normalizedCutoff filter cutoff frequency normalized to Nyquist rate of output
+ */
+ void generateCoefficients(int32_t inputRate,
+ int32_t outputRate,
+ int32_t numRows,
+ double phaseIncrement,
+ float normalizedCutoff);
+
+
+ int32_t getIntegerPhase() {
+ return mIntegerPhase;
+ }
+
+ static constexpr int kMaxCoefficients = 8 * 1024;
+ std::vector<float> mCoefficients;
+
+ const int mNumTaps;
+ int mCursor = 0;
+ std::vector<float> mX; // delayed input values for the FIR
+ std::vector<float> mSingleFrame; // one frame for temporary use
+ int32_t mIntegerPhase = 0;
+ int32_t mNumerator = 0;
+ int32_t mDenominator = 0;
+
+
+private:
+
+#if MCR_USE_KAISER
+ KaiserWindow mKaiserWindow;
+#else
+ HyperbolicCosineWindow mCoshWindow;
+#endif
+
+ static constexpr float kDefaultNormalizedCutoff = 0.70f;
+
+ const int mChannelCount;
+};
+
+} // namespace resampler
+#endif //OBOE_MULTICHANNEL_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
new file mode 100644
index 0000000..aa4ffd9
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "IntegerRatio.h"
+#include "PolyphaseResampler.h"
+
+using namespace resampler;
+
+PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder)
+ {
+ assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+
+ int32_t inputRate = builder.getInputRate();
+ int32_t outputRate = builder.getOutputRate();
+
+ int32_t numRows = mDenominator;
+ double phaseIncrement = (double) inputRate / (double) outputRate;
+ generateCoefficients(inputRate, outputRate,
+ numRows, phaseIncrement,
+ builder.getNormalizedCutoff());
+}
+
+void PolyphaseResampler::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+
+ // Multiply input times windowed sinc function.
+ float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient = *coefficients++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ mSingleFrame[channel] += *xFrame++ * coefficient;
+ }
+ }
+
+ // Advance and wrap through coefficients.
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulator to output.
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ frame[channel] = mSingleFrame[channel];
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
new file mode 100644
index 0000000..1aeb680
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_H
+#define OBOE_POLYPHASE_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+/**
+ * Resampler that is optimized for a reduced ratio of sample rates.
+ * All of the coefficients for each possible phase value are pre-calculated.
+ */
+class PolyphaseResampler : public MultiChannelResampler {
+public:
+ /**
+ *
+ * @param builder containing lots of parameters
+ */
+ explicit PolyphaseResampler(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResampler() = default;
+
+ void readFrame(float *frame) override;
+
+protected:
+
+ int32_t mCoefficientCursor = 0;
+
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
new file mode 100644
index 0000000..c0e29b7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerMono.h"
+
+using namespace resampler;
+
+#define MONO 1
+
+PolyphaseResamplerMono::PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder)
+ : PolyphaseResampler(builder) {
+ assert(builder.getChannelCount() == MONO);
+}
+
+void PolyphaseResamplerMono::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * MONO];
+ const int offset = mNumTaps * MONO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float sample = frame[0];
+ // Put ordered writes together.
+ dest[0] = sample;
+ dest[offset] = sample;
+}
+
+void PolyphaseResamplerMono::readFrame(float *frame) {
+ // Clear accumulator.
+ float sum = 0.0;
+
+ // Multiply input times precomputed windowed sinc function.
+ const float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * MONO];
+ const int numLoops = mNumTaps >> 2; // n/4
+ for (int i = 0; i < numLoops; i++) {
+ // Manual loop unrolling, might get converted to SIMD.
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ sum += *xFrame++ * *coefficients++;
+ }
+
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulator to output.
+ frame[0] = sum;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
new file mode 100644
index 0000000..0a691a3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
+#define OBOE_POLYPHASE_RESAMPLER_MONO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerMono : public PolyphaseResampler {
+public:
+ explicit PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResamplerMono() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
new file mode 100644
index 0000000..e4bef74
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO 2
+
+PolyphaseResamplerStereo::PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder)
+ : PolyphaseResampler(builder) {
+ assert(builder.getChannelCount() == STEREO);
+}
+
+void PolyphaseResamplerStereo::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * STEREO];
+ const int offset = mNumTaps * STEREO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float left = frame[0];
+ const float right = frame[1];
+ // Put ordered writes together.
+ dest[0] = left;
+ dest[1] = right;
+ dest[offset] = left;
+ dest[1 + offset] = right;
+}
+
+void PolyphaseResamplerStereo::readFrame(float *frame) {
+ // Clear accumulators.
+ float left = 0.0;
+ float right = 0.0;
+
+ // Multiply input times precomputed windowed sinc function.
+ const float *coefficients = &mCoefficients[mCoefficientCursor];
+ float *xFrame = &mX[mCursor * STEREO];
+ const int numLoops = mNumTaps >> 2; // n/4
+ for (int i = 0; i < numLoops; i++) {
+ // Manual loop unrolling, might get converted to SIMD.
+ float coefficient = *coefficients++;
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+
+ coefficient = *coefficients++; // next tap
+ left += *xFrame++ * coefficient;
+ right += *xFrame++ * coefficient;
+ }
+
+ mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+ // Copy accumulators to output.
+ frame[0] = left;
+ frame[1] = right;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
new file mode 100644
index 0000000..e608483
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
+#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerStereo : public PolyphaseResampler {
+public:
+ explicit PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+ virtual ~PolyphaseResamplerStereo() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
new file mode 100644
index 0000000..05d8a89
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -0,0 +1,91 @@
+# Sample Rate Converter
+
+This folder contains a sample rate converter, or "resampler".
+
+The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
+We found this had fewer artifacts than the more traditional Kaiser window.
+
+## Creating a Resampler
+
+Include the [main header](MultiChannelResampler.h) for the resampler.
+
+ #include "resampler/MultiChannelResampler.h"
+
+Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
+Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
+
+ MultiChannelResampler *resampler = MultiChannelResampler::make(
+ 2, // channel count
+ 44100, // input sampleRate
+ 48000, // output sampleRate
+ MultiChannelResampler::Quality::Medium); // conversion quality
+
+Possible values for quality include { Fastest, Low, Medium, High, Best }.
+Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
+
+## Fractional Frame Counts
+
+Note that the number of output frames generated for a given number of input frames can vary.
+
+For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
+
+ 960 * 48000 * 44100 = 1044.897959...
+
+You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
+
+You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
+
+## Calling the Resampler with a fixed number of OUTPUT frames
+
+In this example, suppose we have a fixed number of output frames and a variable number of input frames.
+
+Assume you start with these variables and a method that returns the next input frame:
+
+ float *outputBuffer; // multi-channel buffer to be filled
+ int numOutputFrames; // number of frames of output
+
+The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
+
+ int outputFramesLeft = numOutputFrames;
+ while (outputFramesLeft > 0) {
+ if(resampler->isWriteNeeded()) {
+ const float *frame = getNextInputFrame(); // you provide this
+ resampler->writeNextFrame(frame);
+ } else {
+ resampler->readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ outputFramesLeft--;
+ }
+ }
+
+## Calling the Resampler with a fixed number of INPUT frames
+
+In this example, suppose we have a fixed number of input frames and a variable number of output frames.
+
+Assume you start with these variables:
+
+ float *inputBuffer; // multi-channel buffer to be consumed
+ float *outputBuffer; // multi-channel buffer to be filled
+ int numInputFrames; // number of frames of input
+ int numOutputFrames = 0;
+ int channelCount; // 1 for mono, 2 for stereo
+
+ int inputFramesLeft = numInputFrames;
+ while (inputFramesLeft > 0) {
+ if(resampler->isWriteNeeded()) {
+ resampler->writeNextFrame(inputBuffer);
+ inputBuffer += channelCount;
+ inputFramesLeft--;
+ } else {
+ resampler->readNextFrame(outputBuffer);
+ outputBuffer += channelCount;
+ numOutputFrames++;
+ }
+ }
+
+## Deleting the Resampler
+
+When you are done, you should delete the Resampler to avoid a memory leak.
+
+ delete resampler;
+
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
new file mode 100644
index 0000000..5e8a9e0
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "SincResampler.h"
+
+using namespace resampler;
+
+SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
+ : MultiChannelResampler(builder)
+ , mSingleFrame2(builder.getChannelCount()) {
+ assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+ mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
+ mPhaseScaler = (double) mNumRows / mDenominator;
+ double phaseIncrement = 1.0 / mNumRows;
+ generateCoefficients(builder.getInputRate(),
+ builder.getOutputRate(),
+ mNumRows,
+ phaseIncrement,
+ builder.getNormalizedCutoff());
+}
+
+void SincResampler::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+ std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+ // Determine indices into coefficients table.
+ double tablePhase = getIntegerPhase() * mPhaseScaler;
+ int index1 = static_cast<int>(floor(tablePhase));
+ if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
+ tablePhase -= mNumRows;
+ index1 -= mNumRows;
+ }
+
+ int index2 = index1 + 1;
+ if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+ index2 -= mNumRows;
+ }
+
+ float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+ float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient1 = *coefficients1++;
+ float coefficient2 = *coefficients2++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float sample = *xFrame++;
+ mSingleFrame[channel] += sample * coefficient1;
+ mSingleFrame2[channel] += sample * coefficient2;
+ }
+ }
+
+ // Interpolate and copy to output.
+ float fraction = tablePhase - index1;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float low = mSingleFrame[channel];
+ float high = mSingleFrame2[channel];
+ frame[channel] = low + (fraction * (high - low));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.h b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
new file mode 100644
index 0000000..b235188
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_H
+#define OBOE_SINC_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Resampler that can interpolate between coefficients.
+ * This can be used to support arbitrary ratios.
+ */
+class SincResampler : public MultiChannelResampler {
+public:
+ explicit SincResampler(const MultiChannelResampler::Builder &builder);
+
+ virtual ~SincResampler() = default;
+
+ void readFrame(float *frame) override;
+
+protected:
+
+ std::vector<float> mSingleFrame2; // for interpolation
+ int32_t mNumRows = 0;
+ double mPhaseScaler = 1.0;
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
new file mode 100644
index 0000000..ce00302
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO 2
+
+SincResamplerStereo::SincResamplerStereo(const MultiChannelResampler::Builder &builder)
+ : SincResampler(builder) {
+ assert(builder.getChannelCount() == STEREO);
+}
+
+void SincResamplerStereo::writeFrame(const float *frame) {
+ // Move cursor before write so that cursor points to last written frame in read.
+ if (--mCursor < 0) {
+ mCursor = getNumTaps() - 1;
+ }
+ float *dest = &mX[mCursor * STEREO];
+ const int offset = mNumTaps * STEREO;
+ // Write each channel twice so we avoid having to wrap when running the FIR.
+ const float left = frame[0];
+ const float right = frame[1];
+ // Put ordered writes together.
+ dest[0] = left;
+ dest[1] = right;
+ dest[offset] = left;
+ dest[1 + offset] = right;
+}
+
+// Multiply input times windowed sinc function.
+void SincResamplerStereo::readFrame(float *frame) {
+ // Clear accumulator for mixing.
+ std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+ std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+ // Determine indices into coefficients table.
+ double tablePhase = getIntegerPhase() * mPhaseScaler;
+ int index1 = static_cast<int>(floor(tablePhase));
+ float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+ int index2 = (index1 + 1);
+ if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+ index2 = 0;
+ }
+ float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+ float *xFrame = &mX[mCursor * getChannelCount()];
+ for (int i = 0; i < mNumTaps; i++) {
+ float coefficient1 = *coefficients1++;
+ float coefficient2 = *coefficients2++;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float sample = *xFrame++;
+ mSingleFrame[channel] += sample * coefficient1;
+ mSingleFrame2[channel] += sample * coefficient2;
+ }
+ }
+
+ // Interpolate and copy to output.
+ float fraction = tablePhase - index1;
+ for (int channel = 0; channel < getChannelCount(); channel++) {
+ float low = mSingleFrame[channel];
+ float high = mSingleFrame2[channel];
+ frame[channel] = low + (fraction * (high - low));
+ }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
new file mode 100644
index 0000000..7d49ec7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_STEREO_H
+#define OBOE_SINC_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "SincResampler.h"
+
+namespace resampler {
+
+class SincResamplerStereo : public SincResampler {
+public:
+ explicit SincResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+ virtual ~SincResamplerStereo() = default;
+
+ void writeFrame(const float *frame) override;
+
+ void readFrame(float *frame) override;
+
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index 611cbf7..0792fc5 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -23,6 +23,7 @@
#include <gtest/gtest.h>
#include "flowgraph/ClipToRange.h"
+#include "flowgraph/MonoBlend.h"
#include "flowgraph/MonoToMultiConverter.h"
#include "flowgraph/SourceFloat.h"
#include "flowgraph/RampLinear.h"
@@ -164,3 +165,29 @@
EXPECT_NEAR(expected[i], output[i], tolerance);
}
}
+
+TEST(test_flowgraph, module_mono_blend) {
+ // Two channel to two channel with 3 inputs and outputs.
+ constexpr int numChannels = 2;
+ constexpr int numFrames = 3;
+
+ static const float input[] = {-0.7, 0.5, -0.25, 1.25, 1000, 2000};
+ static const float expected[] = {-0.1, -0.1, 0.5, 0.5, 1500, 1500};
+ float output[100];
+ SourceFloat sourceFloat{numChannels};
+ MonoBlend monoBlend{numChannels};
+ SinkFloat sinkFloat{numChannels};
+
+ sourceFloat.setData(input, numFrames);
+
+ sourceFloat.output.connect(&monoBlend.input);
+ monoBlend.output.connect(&sinkFloat.input);
+
+ int32_t numRead = sinkFloat.read(output, numFrames);
+ ASSERT_EQ(numRead, numFrames);
+ constexpr float tolerance = 0.000001f; // arbitrary
+ for (int i = 0; i < numRead; i++) {
+ EXPECT_NEAR(expected[i], output[i], tolerance);
+ }
+}
+
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 4c83406..f81aa87 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -2301,6 +2301,8 @@
return AUDIO_FLAG_CONTENT_SPATIALIZED;
case media::AudioFlag::NEVER_SPATIALIZE:
return AUDIO_FLAG_NEVER_SPATIALIZE;
+ case media::AudioFlag::CALL_REDIRECTION:
+ return AUDIO_FLAG_CALL_REDIRECTION;
}
return unexpected(BAD_VALUE);
}
@@ -2342,6 +2344,8 @@
return media::AudioFlag::CONTENT_SPATIALIZED;
case AUDIO_FLAG_NEVER_SPATIALIZE:
return media::AudioFlag::NEVER_SPATIALIZE;
+ case AUDIO_FLAG_CALL_REDIRECTION:
+ return media::AudioFlag::CALL_REDIRECTION;
}
return unexpected(BAD_VALUE);
}
@@ -2957,11 +2961,8 @@
}));
legacy.num_gains = aidl.hal.gains.size();
- media::AudioPortConfig aidlPortConfig;
- aidlPortConfig.hal = aidl.hal.activeConfig;
- aidlPortConfig.sys = aidl.sys.activeConfig;
legacy.active_config = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfig_audio_port_config(aidlPortConfig));
+ aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
legacy.ext = VALUE_OR_RETURN(
aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
return legacy;
@@ -3007,10 +3008,9 @@
}));
aidl.sys.gains.resize(legacy.num_gains);
- media::AudioPortConfig aidlPortConfig = VALUE_OR_RETURN(
+ aidl.sys.activeConfig = VALUE_OR_RETURN(
legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
- aidl.hal.activeConfig = aidlPortConfig.hal;
- aidl.sys.activeConfig = aidlPortConfig.sys;
+ aidl.sys.activeConfig.hal.portId = aidl.hal.id;
RETURN_IF_ERROR(
legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
return aidl;
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index ac128e6..ea4faa8 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -238,16 +238,18 @@
// Otherwise the callback thread will never exit.
stop();
if (mAudioRecordThread != 0) {
- mProxy->interrupt();
mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
+ mProxy->interrupt();
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
- // No lock here: worst case we remove a NULL callback which will be a nop
+
+ AutoMutex lock(mLock);
if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
// This may not stop all of these device callbacks!
// TODO: Add some sort of protection.
AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
+ mDeviceCallback.clear();
}
}
namespace {
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 407b294..066a7ae 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -199,6 +199,7 @@
#define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
+ // Do not change this without changing the MediaMetricsService side.
// Java API 28 entries, do not change.
mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
mMetricsItem->setCString(MM_PREFIX "type",
@@ -214,6 +215,7 @@
mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
mMetricsItem->setCString(MM_PREFIX "logSessionId", track->mLogSessionId.c_str());
+ mMetricsItem->setInt32(MM_PREFIX "underrunFrames", (int32_t)track->getUnderrunFrames());
}
// hand the user a snapshot of the metrics.
@@ -474,7 +476,8 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- // No lock here: worst case we remove a NULL callback which will be a nop
+
+ AutoMutex lock(mLock);
if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
// This may not stop all of these device callbacks!
// TODO: Add some sort of protection.
@@ -550,8 +553,18 @@
sessionId, transferType, attributionSource.uid, attributionSource.pid);
mThreadCanCallJava = threadCanCallJava;
+
+ // These variables are pulled in an error report, so we initialize them early.
mSelectedDeviceId = selectedDeviceId;
mSessionId = sessionId;
+ mChannelMask = channelMask;
+ mFormat = format;
+ mOrigFlags = mFlags = flags;
+ mReqFrameCount = mFrameCount = frameCount;
+ mSampleRate = sampleRate;
+ mOriginalSampleRate = sampleRate;
+ mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+ mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -626,7 +639,6 @@
} else {
// stream type shouldn't be looked at, this track has audio attributes
- memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
ALOGV("%s(): Building AudioTrack with attributes:"
" usage=%d content=%d flags=0x%x tags=[%s]",
__func__,
@@ -648,14 +660,12 @@
status = BAD_VALUE;
goto error;
}
- mFormat = format;
if (!audio_is_output_channel(channelMask)) {
errorMessage = StringPrintf("%s: Invalid channel mask %#x", __func__, channelMask);
status = BAD_VALUE;
goto error;
}
- mChannelMask = channelMask;
channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
@@ -697,9 +707,6 @@
status = BAD_VALUE;
goto error;
}
- mSampleRate = sampleRate;
- mOriginalSampleRate = sampleRate;
- mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
// 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
@@ -719,7 +726,6 @@
mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
mSendLevel = 0.0f;
// mFrameCount is initialized in createTrack_l
- mReqFrameCount = frameCount;
if (notificationFrames >= 0) {
mNotificationFramesReq = notificationFrames;
mNotificationsPerBufferReq = 0;
@@ -760,7 +766,6 @@
mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
}
mAuxEffectId = 0;
- mOrigFlags = mFlags = flags;
mCallback = callback;
if (_callback != nullptr) {
@@ -2155,7 +2160,8 @@
if (status == NO_ERROR) return;
// We report error on the native side because some callers do not come
// from Java.
- mediametrics::LogItem(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + "error")
+ // Ensure these variables are initialized in set().
+ mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR)
.set(AMEDIAMETRICS_PROP_EVENT, event)
.set(AMEDIAMETRICS_PROP_ERROR, mediametrics::statusToErrorString(status))
.set(AMEDIAMETRICS_PROP_ERRORMESSAGE, message)
@@ -2166,8 +2172,10 @@
.set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
.set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
.set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
- .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mReqFrameCount) // requested frame count
// the following are NOT immutable
+ // frame count is initially the requested frame count, but may be adjusted
+ // by AudioFlinger after creation.
+ .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
.set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
.set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
.set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 35719be..da27dc8 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -409,7 +409,7 @@
android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
// it seems that a FUTEX_WAKE_PRIVATE will not wake a FUTEX_WAIT, even within same process
(void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
- 1);
+ INT_MAX);
}
}
@@ -419,7 +419,7 @@
if (!(android_atomic_or(CBLK_INTERRUPT, &cblk->mFlags) & CBLK_INTERRUPT)) {
android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
(void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
- 1);
+ INT_MAX);
}
}
@@ -747,7 +747,7 @@
int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
if (!(old & CBLK_FUTEX_WAKE)) {
(void) syscall(__NR_futex, &cblk->mFutex,
- mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+ mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
}
}
mFlushed += (newFront - front) & mask;
@@ -917,7 +917,7 @@
int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
if (!(old & CBLK_FUTEX_WAKE)) {
(void) syscall(__NR_futex, &cblk->mFutex,
- mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+ mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
}
}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 91361fb..acf4e6d 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -36,4 +36,5 @@
CAPTURE_PRIVATE = 13,
CONTENT_SPATIALIZED = 14,
NEVER_SPATIALIZE = 15,
+ CALL_REDIRECTION = 16,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
index 27c0fe5..f3b5c19 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -17,7 +17,7 @@
package android.media;
import android.media.AudioGainSys;
-import android.media.AudioPortConfigSys;
+import android.media.AudioPortConfig;
import android.media.AudioPortExtSys;
import android.media.AudioPortRole;
import android.media.AudioPortType;
@@ -35,8 +35,8 @@
AudioProfileSys[] profiles;
/** System-only parameters for each AudioGain from 'port.gains'. */
AudioGainSys[] gains;
- /** System-only parameters for 'port.activeConfig'. */
- AudioPortConfigSys activeConfig;
+ /** Current audio port configuration. */
+ AudioPortConfig activeConfig;
/** System-only extra parameters for 'port.ext'. */
AudioPortExtSys ext;
}
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 3df9378..117d188 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -70,48 +70,34 @@
return audioDeviceOutAllBleSet;
}
-bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
if (deviceTypes.empty()) {
- str = "Empty device types";
- return true;
+ return "Empty device types";
}
- bool ret = true;
- for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
- std::string deviceTypeStr;
- ret = audio_is_output_device(*it) ?
- OutputDeviceConverter::toString(*it, deviceTypeStr) :
- InputDeviceConverter::toString(*it, deviceTypeStr);
- if (!ret) {
- break;
+ std::stringstream ss;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+ if (it != deviceTypes.begin()) {
+ ss << ", ";
}
- str.append(deviceTypeStr);
- if (++it != deviceTypes.end()) {
- str.append(" , ");
+ const char* strType = audio_device_to_string(*it);
+ if (strlen(strType) != 0) {
+ ss << strType;
+ } else {
+ ss << "unknown type:0x" << std::hex << *it;
}
}
- if (!ret) {
- str = "Unknown values";
- }
- return ret;
+ return ss.str();
}
std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
- std::string ret;
- for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
- std::stringstream ss;
- ss << "0x" << std::hex << (*it);
- ret.append(ss.str());
- if (++it != deviceTypes.end()) {
- ret.append(" , ");
+ std::stringstream ss;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+ if (it != deviceTypes.begin()) {
+ ss << ", ";
}
+ ss << "0x" << std::hex << (*it);
}
- return ret;
-}
-
-std::string toString(const DeviceTypeSet& deviceTypes) {
- std::string ret;
- deviceTypesToString(deviceTypes, ret);
- return ret;
+ return ss.str();
}
} // namespace android
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index 26eea87..4a7e956 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -100,10 +100,13 @@
std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
std::stringstream sstream;
- sstream << "type:0x" << std::hex << mType;
+ sstream << audio_device_to_string(mType);
+ if (sstream.str().empty()) {
+ sstream << "unknown type:0x" << std::hex << mType;
+ }
// IP and MAC address are sensitive information. The sensitive information will be suppressed
// is `includeSensitiveInfo` is false.
- sstream << ",@:"
+ sstream << ", @:"
<< (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
return sstream.str();
}
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index ec10bc9..9a67bb7 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -319,7 +319,7 @@
{
dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
for (size_t i = 0; i < size(); i++) {
- const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
dst->append(prefix);
std::string profileStr;
at(i)->dump(&profileStr, prefix.size());
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 88ba544..5ffbffc 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -110,27 +110,23 @@
return NO_ERROR;
}
-void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+void DeviceDescriptorBase::dump(std::string *dst, int spaces,
const char* extraInfo, bool verbose) const
{
- const std::string prefix = base::StringPrintf("%*s %d. ", spaces, "", index + 1);
- dst->append(prefix);
if (mId != 0) {
dst->append(base::StringPrintf("Port ID: %d; ", mId));
}
if (extraInfo != nullptr) {
dst->append(base::StringPrintf("%s; ", extraInfo));
}
- dst->append(base::StringPrintf("%s (%s)\n",
- audio_device_to_string(mDeviceTypeAddr.mType),
+ dst->append(base::StringPrintf("{%s}\n",
mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
dst->append(base::StringPrintf(
- "%*sEncapsulation modes: %u, metadata types: %u\n",
- static_cast<int>(prefix.size()), "",
+ "%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
mEncapsulationModes, mEncapsulationMetadataTypes));
- AudioPort::dump(dst, prefix.size(), nullptr, verbose);
+ AudioPort::dump(dst, spaces, nullptr, verbose);
}
std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
@@ -180,8 +176,9 @@
status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
AudioPort::writeToParcelable(parcelable);
- AudioPortConfig::writeToParcelable(&parcelable->hal.activeConfig, useInputChannelMask());
+ AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+ parcelable->sys.activeConfig.hal.portId = parcelable->hal.id;
media::audio::common::AudioPortDeviceExt deviceExt;
deviceExt.device = VALUE_OR_RETURN_STATUS(
@@ -205,7 +202,7 @@
}
status_t status = AudioPort::readFromParcelable(parcelable)
?: AudioPortConfig::readFromParcelable(
- parcelable.hal.activeConfig, useInputChannelMask());
+ parcelable.sys.activeConfig.hal, useInputChannelMask());
if (status != OK) {
return status;
}
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 60b42fb..707ab68 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -131,14 +131,16 @@
return deviceTypes;
}
-bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
+std::string deviceTypesToString(const DeviceTypeSet& deviceTypes);
std::string dumpDeviceTypes(const DeviceTypeSet& deviceTypes);
/**
* Return human readable string for device types.
*/
-std::string toString(const DeviceTypeSet& deviceTypes);
+inline std::string toString(const DeviceTypeSet& deviceTypes) {
+ return deviceTypesToString(deviceTypes);
+}
} // namespace android
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index b70da8a..1f0c768 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -65,7 +65,7 @@
status_t setEncapsulationModes(uint32_t encapsulationModes);
status_t setEncapsulationMetadataTypes(uint32_t encapsulationMetadataTypes);
- void dump(std::string *dst, int spaces, int index,
+ void dump(std::string *dst, int spaces,
const char* extraInfo = nullptr, bool verbose = true) const;
void log() const;
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 19a8b2f..61a2bf5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -150,6 +150,7 @@
bool sHasAuxChannels[PREPROC_NUM_EFFECTS] = {
false, // PREPROC_AGC
+ false, // PREPROC_AGC2
true, // PREPROC_AEC
true, // PREPROC_NS
};
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index 5d0eca0..aeaa49c 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -61,6 +61,9 @@
#define AMEDIAMETRICS_KEY_AUDIO_FLINGER AMEDIAMETRICS_KEY_PREFIX_AUDIO "flinger"
#define AMEDIAMETRICS_KEY_AUDIO_POLICY AMEDIAMETRICS_KEY_PREFIX_AUDIO "policy"
+// Error keys
+#define AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "error"
+
/*
* MediaMetrics Properties are unified space for consistency and readability.
*/
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index f2cd505..87f608f 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -27,6 +27,7 @@
#include <variant>
#include <binder/Parcel.h>
+#include <log/log.h>
#include <utils/Errors.h>
#include <utils/Timers.h> // nsecs_t
@@ -502,6 +503,7 @@
do {
if (ptr >= bufferptrmax) {
ALOGE("%s: buffer exceeded", __func__);
+ android_errorWriteLog(0x534e4554, "204445255");
return BAD_VALUE;
}
} while (*ptr++ != 0);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index bffd7b3..6347b7a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -16,6 +16,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "StagefrightRecorder"
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
#include <inttypes.h>
// TODO/workaround: including base logging now as it conflicts with ADebug.h
// and it must be included first.
@@ -1856,6 +1858,7 @@
// Set up the appropriate MediaSource depending on the chosen option
status_t StagefrightRecorder::setupMediaSource(
sp<MediaSource> *mediaSource) {
+ ATRACE_CALL();
if (mVideoSource == VIDEO_SOURCE_DEFAULT
|| mVideoSource == VIDEO_SOURCE_CAMERA) {
sp<CameraSource> cameraSource;
@@ -1936,6 +1939,7 @@
status_t StagefrightRecorder::setupVideoEncoder(
const sp<MediaSource> &cameraSource,
sp<MediaCodecSource> *source) {
+ ATRACE_CALL();
source->clear();
sp<AMessage> format = new AMessage();
@@ -2114,6 +2118,7 @@
}
status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
+ ATRACE_CALL();
status_t status = BAD_VALUE;
if (OK != (status = checkAudioEncoderCapabilities())) {
return status;
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
index 9311e2e..ea63da8 100644
--- a/media/libmediatranscoding/include/media/ControllerClientInterface.h
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -66,7 +66,7 @@
* Returns false if the session doesn't exist, or the client is already requesting the
* session. Returns true otherwise.
*/
- virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid);
+ virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) = 0;
/**
* Retrieves the (unsorted) list of all clients requesting the session identified by
@@ -81,7 +81,7 @@
* Returns false if the session doesn't exist. Returns true otherwise.
*/
virtual bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
- std::vector<int32_t>* out_clientUids);
+ std::vector<int32_t>* out_clientUids) = 0;
protected:
virtual ~ControllerClientInterface() = default;
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index 2c03f27..f070aac 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -105,7 +105,6 @@
if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
int32_t remoteRefcount =
reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->getRemoteRefcount();
- // Sanity check so that remoteRefCount() is non-negative.
return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
#else
return 0;
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 4f0909b..15d6d3697 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -45,6 +45,7 @@
static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
static const String16 sModifyAudioRouting("android.permission.MODIFY_AUDIO_ROUTING");
+static const String16 sCallAudioInterception("android.permission.CALL_AUDIO_INTERCEPTION");
static String16 resolveCallingPackage(PermissionController& permissionController,
const std::optional<String16> opPackageName, uid_t uid) {
@@ -309,6 +310,17 @@
return ok;
}
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+
+ // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+ bool ok = PermissionCache::checkPermission(sCallAudioInterception, pid, uid);
+ if (!ok) ALOGV("%s(): android.permission.CALL_AUDIO_INTERCEPTION denied for uid %d",
+ __func__, uid);
+ return ok;
+}
+
AttributionSourceState getCallingAttributionSource() {
AttributionSourceState attributionSource = AttributionSourceState();
attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 878ae8c..2b765cc 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "TimeCheck"
#include <optional>
+#include <sstream>
#include <mediautils/EventLog.h>
#include <mediautils/TimeCheck.h>
@@ -25,6 +26,15 @@
namespace android {
+namespace {
+
+std::string formatTime(std::chrono::system_clock::time_point t) {
+ auto msSinceEpoch = std::chrono::round<std::chrono::milliseconds>(t.time_since_epoch());
+ return (std::ostringstream() << msSinceEpoch.count()).str();
+}
+
+} // namespace
+
// Audio HAL server pids vector used to generate audio HAL processes tombstone
// when audioserver watchdog triggers.
// We use a lockless storage to avoid potential deadlocks in the context of watchdog
@@ -66,15 +76,18 @@
}
TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
- : mTimerHandle(getTimeCheckThread()->scheduleTask([tag] { crash(tag); },
- std::chrono::milliseconds(timeoutMs))) {}
+ : mTimerHandle(getTimeCheckThread()->scheduleTask(
+ [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+ std::chrono::milliseconds(timeoutMs))) {}
TimeCheck::~TimeCheck() {
getTimeCheckThread()->cancelTask(mTimerHandle);
}
/* static */
-void TimeCheck::crash(const char* tag) {
+void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
+ std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+
// Generate audio HAL processes tombstones and allow time to complete
// before forcing restart
std::vector<pid_t> pids = getAudioHalPids();
@@ -88,7 +101,8 @@
ALOGI("No HAL process pid available, skipping tombstones");
}
LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
- LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+ LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
+ formatTime(startTime).c_str(), formatTime(endTime).c_str());
}
}; // namespace android
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 734313c..2fe2451 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -104,6 +104,7 @@
bool dumpAllowed();
bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource);
void purgePermissionCache();
int32_t getOpForSource(audio_source_t source);
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 2411f97..0d6e80d 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -38,7 +38,7 @@
private:
static TimerThread* getTimeCheckThread();
static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
- static void crash(const char* tag);
+ static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
const TimerThread::Handle mTimerHandle;
};
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 2e9ecb1..ca7ffdb 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -3258,6 +3258,8 @@
} else {
mHalEffect->setDevices({mDevice});
}
+ mHalEffect->configure();
+
*handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
mNotifyFramesProcessed);
status = (*handle)->initCheck();
@@ -3306,8 +3308,14 @@
}
void AudioFlinger::DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
- Mutex::Autolock _l(mProxyLock);
- mEffectHandles.erase(patchHandle);
+ sp<EffectHandle> effect;
+ {
+ Mutex::Autolock _l(mProxyLock);
+ if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
+ effect = mEffectHandles.at(patchHandle);
+ mEffectHandles.erase(patchHandle);
+ }
+ }
}
@@ -3315,6 +3323,7 @@
{
Mutex::Autolock _l(mProxyLock);
if (effect == mHalEffect) {
+ mHalEffect->release_l();
mHalEffect.clear();
mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
}
@@ -3462,7 +3471,7 @@
if (proxy == nullptr) {
return NO_INIT;
}
- return proxy->addEffectToHal(effect);
+ return proxy->removeEffectFromHal(effect);
}
bool AudioFlinger::DeviceEffectProxy::ProxyCallback::isOutput() const {
@@ -3514,4 +3523,22 @@
return proxy->channelCount();
}
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectEnable(
+ const sp<EffectBase>& effectBase) {
+ sp<EffectModule> effect = effectBase->asEffectModule();
+ if (effect == nullptr) {
+ return;
+ }
+ effect->start();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectDisable(
+ const sp<EffectBase>& effectBase) {
+ sp<EffectModule> effect = effectBase->asEffectModule();
+ if (effect == nullptr) {
+ return;
+ }
+ effect->stop();
+}
+
} // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 5ebf483..e2bea67 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -766,8 +766,8 @@
void resetVolume() override {}
product_strategy_t strategy() const override { return static_cast<product_strategy_t>(0); }
int32_t activeTrackCnt() const override { return 0; }
- void onEffectEnable(const sp<EffectBase>& effect __unused) override {}
- void onEffectDisable(const sp<EffectBase>& effect __unused) override {}
+ void onEffectEnable(const sp<EffectBase>& effect __unused) override;
+ void onEffectDisable(const sp<EffectBase>& effect __unused) override;
wp<EffectChain> chain() const override { return nullptr; }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 43fa781..dd278f0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -5889,6 +5889,20 @@
return trackCount;
}
+bool AudioFlinger::PlaybackThread::checkRunningTimestamp()
+{
+ uint64_t position = 0;
+ struct timespec unused;
+ const status_t ret = mOutput->getPresentationPosition(&position, &unused);
+ if (ret == NO_ERROR) {
+ if (position != mLastCheckedTimestampPosition) {
+ mLastCheckedTimestampPosition = position;
+ return true;
+ }
+ }
+ return false;
+}
+
// isTrackAllowed_l() must be called with ThreadBase::mLock held
bool AudioFlinger::MixerThread::isTrackAllowed_l(
audio_channel_mask_t channelMask, audio_format_t format,
@@ -6317,19 +6331,24 @@
// fill a buffer, then remove it from active list.
// Only consider last track started for mixer state control
if (--(track->mRetryCount) <= 0) {
- ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
- tracksToRemove->add(track);
- // indicate to client process that the track was disabled because of underrun;
- // it will then automatically call start() when data is available
- track->disable();
- // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
- // unlike mixerthread, HAL can be paused for direct output
- ALOGW("pause because of UNDERRUN, framesReady = %zu,"
- "minFrames = %u, mFormat = %#x",
- framesReady, minFrames, mFormat);
- if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
- doHwPause = true;
- mHwPaused = true;
+ const bool running = checkRunningTimestamp();
+ if (running) { // still running, give us more time.
+ track->mRetryCount = kMaxTrackRetriesOffload;
+ } else {
+ ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
+ tracksToRemove->add(track);
+ // indicate to client process that the track was disabled because of
+ // underrun; it will then automatically call start() when data is available
+ track->disable();
+ // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
+ // unlike mixerthread, HAL can be paused for direct output
+ ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+ "minFrames = %u, mFormat = %#x",
+ framesReady, minFrames, mFormat);
+ if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
+ doHwPause = true;
+ mHwPaused = true;
+ }
}
} else if (last) {
mixerStatus = MIXER_TRACKS_ENABLED;
@@ -6540,6 +6559,7 @@
void AudioFlinger::DirectOutputThread::flushHw_l()
{
+ PlaybackThread::flushHw_l();
mOutput->flush();
mHwPaused = false;
mFlushPending = false;
@@ -6675,8 +6695,7 @@
AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
: DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
- mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
- mOffloadUnderrunPosition(~0LL)
+ mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
{
//FIXME: mStandby should be set to true by ThreadBase constructo
mStandby = true;
@@ -6893,19 +6912,7 @@
// No buffers for this track. Give it a few chances to
// fill a buffer, then remove it from active list.
if (--(track->mRetryCount) <= 0) {
- bool running = false;
- uint64_t position = 0;
- struct timespec unused;
- // The running check restarts the retry counter at least once.
- status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
- if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
- running = true;
- mOffloadUnderrunPosition = position;
- }
- if (ret == NO_ERROR) {
- ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
- (long long)position, (long long)mOffloadUnderrunPosition);
- }
+ const bool running = checkRunningTimestamp();
if (running) { // still running, give us more time.
track->mRetryCount = kMaxTrackRetriesOffload;
} else {
@@ -6976,7 +6983,6 @@
mPausedBytesRemaining = 0;
// reset bytes written count to reflect that DSP buffers are empty after flush.
mBytesWritten = 0;
- mOffloadUnderrunPosition = ~0LL;
if (mUseAsyncWrite) {
// discard any pending drain or write ack by incrementing sequence
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 43d1055..61537a8 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1376,6 +1376,14 @@
struct audio_patch mDownStreamPatch;
std::atomic_bool mCheckOutputStageEffects{};
+
+ // A differential check on the timestamps to see if there is a change in the
+ // timestamp frame position between the last call to checkRunningTimestamp.
+ uint64_t mLastCheckedTimestampPosition = ~0LL;
+
+ bool checkRunningTimestamp();
+
+ virtual void flushHw_l() { mLastCheckedTimestampPosition = ~0LL; }
};
class MixerThread : public PlaybackThread {
@@ -1493,7 +1501,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
- virtual void flushHw_l();
+ void flushHw_l() override;
void setMasterBalance(float balance) override;
@@ -1558,7 +1566,7 @@
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, bool systemReady);
virtual ~OffloadThread() {};
- virtual void flushHw_l();
+ void flushHw_l() override;
protected:
// threadLoop snippets
@@ -1575,10 +1583,6 @@
size_t mPausedWriteLength; // length in bytes of write interrupted by pause
size_t mPausedBytesRemaining; // bytes still waiting in mixbuffer after resume
bool mKeepWakeLock; // keep wake lock while waiting for write callback
- uint64_t mOffloadUnderrunPosition; // Current frame position for offloaded playback
- // used and valid only during underrun. ~0 if
- // no underrun has occurred during playback and
- // is not reset on standby.
};
class AsyncCallbackThread : public Thread {
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index a5de655..955b0cf 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -41,7 +41,7 @@
void setUid(uid_t uid) { mUid = uid; }
- void dump(String8 *dst, int spaces, int index) const;
+ void dump(String8 *dst, int spaces) const;
struct audio_patch mPatch;
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index e421c94..dc2403c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -16,19 +16,21 @@
#pragma once
-#include <vector>
-#include <map>
-#include <unistd.h>
#include <sys/types.h>
+#include <unistd.h>
-#include <system/audio.h>
+#include <map>
+#include <vector>
+
+#include <android-base/stringprintf.h>
#include <audiomanager/AudioManager.h>
#include <media/AudioProductStrategy.h>
+#include <policy.h>
+#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
-#include <policy.h>
#include <Volume.h>
#include "AudioPatch.h"
#include "EffectDescriptor.h"
@@ -52,7 +54,7 @@
mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
~ClientDescriptor() override = default;
- virtual void dump(String8 *dst, int spaces, int index) const;
+ virtual void dump(String8 *dst, int spaces) const;
virtual std::string toShortString() const;
audio_port_handle_t portId() const { return mPortId; }
@@ -100,7 +102,7 @@
~TrackClientDescriptor() override = default;
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
std::string toShortString() const override;
audio_output_flags_t flags() const { return mFlags; }
@@ -168,7 +170,7 @@
~RecordClientDescriptor() override = default;
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
audio_unique_id_t riid() const { return mRIId; }
audio_source_t source() const { return mSource; }
@@ -219,7 +221,7 @@
void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
using ClientDescriptor::dump;
- void dump(String8 *dst, int spaces, int index) const override;
+ void dump(String8 *dst, int spaces) const override;
private:
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -273,7 +275,9 @@
(void)extraInfo;
size_t index = 0;
for (const auto& client: getClientIterable()) {
- client->dump(dst, spaces, index++);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", ++index);
+ dst->appendFormat("%s", prefix.c_str());
+ client->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index b444fd7..4adc920 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -91,7 +91,7 @@
void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
- void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
+ void dump(String8 *dst, int spaces, bool verbose = true) const;
private:
template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index 0fe5c16..580938e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -44,7 +44,7 @@
}
dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
for (size_t i = 0; i < audioRouteVector.size(); i++) {
- const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
dst->append(prefix.c_str());
audioRouteVector.itemAt(i)->dump(dst, prefix.size());
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1ae66de..235e4aa 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -255,32 +255,35 @@
devices().toString(true /*includeSensitiveInfo*/).c_str());
dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
if (!mRoutingActivities.empty()) {
- dst->appendFormat("%*sProduct Strategies (%zu):\n", spaces, "", mRoutingActivities.size());
+ dst->appendFormat("%*s- Product Strategies (%zu):\n", spaces - 2, "",
+ mRoutingActivities.size());
for (const auto &iter : mRoutingActivities) {
dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
iter.second.dump(dst, 0);
}
}
if (!mVolumeActivities.empty()) {
- dst->appendFormat("%*sVolume Activities (%zu):\n", spaces, "", mVolumeActivities.size());
+ dst->appendFormat("%*s- Volume Activities (%zu):\n", spaces - 2, "",
+ mVolumeActivities.size());
for (const auto &iter : mVolumeActivities) {
dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
iter.second.dump(dst, 0);
}
}
if (getClientCount() != 0) {
- dst->appendFormat("%*sAudioTrack Clients (%zu):\n", spaces, "", getClientCount());
+ dst->appendFormat("%*s- AudioTrack clients (%zu):\n", spaces - 2, "", getClientCount());
ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
- dst->append("\n");
}
if (!mActiveClients.empty()) {
- dst->appendFormat("%*sAudioTrack active (stream) clients (%zu):\n", spaces, "",
+ dst->appendFormat("%*s- AudioTrack active (stream) clients (%zu):\n", spaces - 2, "",
mActiveClients.size());
size_t index = 0;
for (const auto& client : mActiveClients) {
- client->dump(dst, spaces, index++);
+ const std::string prefix = base::StringPrintf(
+ "%*sid %zu: ", spaces + 1, "", index + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ client->dump(dst, prefix.size());
}
- dst->append("\n");
}
}
@@ -708,7 +711,7 @@
{
AudioOutputDescriptor::dump(dst, spaces, extraInfo);
dst->appendFormat("%*sSource:\n", spaces, "");
- mSource->dump(dst, spaces, 0);
+ mSource->dump(dst, spaces);
}
void HwAudioOutputDescriptor::toAudioPortConfig(
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index d79110a..4f03db9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -20,7 +20,9 @@
#include "AudioPatch.h"
#include "TypeConverter.h"
+#include <android-base/stringprintf.h>
#include <log/log.h>
+#include <media/AudioDeviceTypeAddr.h>
#include <utils/String8.h>
namespace android {
@@ -37,20 +39,21 @@
{
for (int i = 0; i < count; ++i) {
const audio_port_config &cfg = cfgs[i];
- dst->appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
+ dst->appendFormat("%*s[%s %d] ", spaces, "", prefix, i + 1);
if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
- dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
+ AudioDeviceTypeAddr device(cfg.ext.device.type, cfg.ext.device.address);
+ dst->appendFormat("Device Port ID: %d; {%s}",
+ cfg.id, device.toString(true /*includeSensitiveInfo*/).c_str());
} else {
- dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+ dst->appendFormat("Mix Port ID: %d; I/O handle: %d;", cfg.id, cfg.ext.mix.handle);
}
dst->append("\n");
}
}
-void AudioPatch::dump(String8 *dst, int spaces, int index) const
+void AudioPatch::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
- spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+ dst->appendFormat("owner uid %4d; handle %2d; af handle %2d\n", mUid, mHandle, mAfPatchHandle);
dumpPatchEndpoints(dst, spaces, "src ", mPatch.num_sources, mPatch.sources);
dumpPatchEndpoints(dst, spaces, "sink", mPatch.num_sinks, mPatch.sinks);
}
@@ -135,9 +138,11 @@
void AudioPatchCollection::dump(String8 *dst) const
{
- dst->append("\nAudio Patches:\n");
+ dst->appendFormat("\n Audio Patches (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- valueAt(i)->dump(dst, 2, i);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b209a88..546f56b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -675,7 +675,7 @@
void AudioPolicyMixCollection::dump(String8 *dst) const
{
- dst->append("\nAudio Policy Mix:\n");
+ dst->append("\n Audio Policy Mix:\n");
for (size_t i = 0; i < size(); i++) {
itemAt(i)->dump(dst, 2, i);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index afc4d01..035bef2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -18,9 +18,12 @@
//#define LOG_NDEBUG 0
#include <sstream>
+
+#include <android-base/stringprintf.h>
+#include <TypeConverter.h>
#include <utils/Log.h>
#include <utils/String8.h>
-#include <TypeConverter.h>
+
#include "AudioOutputDescriptor.h"
#include "AudioPatch.h"
#include "AudioPolicyMix.h"
@@ -39,35 +42,36 @@
return ss.str();
}
-void ClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void ClientDescriptor::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sClient %d:\n", spaces, "", index+1);
- dst->appendFormat("%*s- Port Id: %d Session Id: %d UID: %d\n", spaces, "",
- mPortId, mSessionId, mUid);
- dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
- mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
- dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
- dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
- dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+ dst->appendFormat("Port ID: %d; Session ID: %d; uid %d; State: %s\n",
+ mPortId, mSessionId, mUid, mActive ? "Active" : "Inactive");
+ dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+ audio_format_to_string(mConfig.format), mConfig.sample_rate, mConfig.channel_mask);
+ dst->appendFormat("%*sAttributes: %s\n", spaces, "", toString(mAttributes).c_str());
+ if (mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE) {
+ dst->appendFormat("%*sPreferred Device Port ID: %d;\n", spaces, "", mPreferredDeviceId);
+ }
}
-void TrackClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void TrackClientDescriptor::dump(String8 *dst, int spaces) const
{
- ClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
- dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
- dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
- dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
- for (auto desc : mSecondaryOutputs) {
- dst->appendFormat("%*s - %d\n", spaces, "",
- desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+ ClientDescriptor::dump(dst, spaces);
+ dst->appendFormat("%*sStream: %d; Flags: %08x; Refcount: %d\n", spaces, "",
+ mStream, mFlags, mActivityCount);
+ dst->appendFormat("%*sDAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+ if (!mSecondaryOutputs.empty()) {
+ dst->appendFormat("%*sDAP Secondary Outputs: ", spaces - 2, "");
+ for (auto desc : mSecondaryOutputs) {
+ dst->appendFormat("%d, ", desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+ }
+ dst->append("\n");
}
}
std::string TrackClientDescriptor::toShortString() const
{
std::stringstream ss;
-
ss << ClientDescriptor::toShortString() << " Stream: " << mStream;
return ss.str();
}
@@ -81,10 +85,10 @@
}
}
-void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void RecordClientDescriptor::dump(String8 *dst, int spaces) const
{
- ClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+ ClientDescriptor::dump(dst, spaces);
+ dst->appendFormat("%*sSource: %d; Flags: %08x\n", spaces, "", mSource, mFlags);
mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
}
@@ -109,18 +113,21 @@
mHwOutput = hwOutput;
}
-void SourceClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void SourceClientDescriptor::dump(String8 *dst, int spaces) const
{
- TrackClientDescriptor::dump(dst, spaces, index);
- dst->appendFormat("%*s- Device:\n", spaces, "");
- mSrcDevice->dump(dst, 2, 0);
+ TrackClientDescriptor::dump(dst, spaces);
+ const std::string prefix = base::StringPrintf("%*sDevice: ", spaces, "");
+ dst->appendFormat("%s", prefix.c_str());
+ mSrcDevice->dump(dst, prefix.size());
}
void SourceClientCollection::dump(String8 *dst) const
{
- dst->append("\nAudio sources:\n");
+ dst->append("\n Audio sources (%zu):\n", size());
for (size_t i = 0; i < size(); i++) {
- valueAt(i)->dump(dst, 2, i);
+ const std::string prefix = base::StringPrintf(" %zu. ", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index d76d0c2..a909331 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -19,10 +19,11 @@
#include <set>
-#include <AudioPolicyInterface.h>
+#include <android-base/stringprintf.h>
#include <audio_utils/string.h>
#include <media/AudioParameter.h>
#include <media/TypeConverter.h>
+#include <AudioPolicyInterface.h>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
#include "HwModule.h"
@@ -176,7 +177,7 @@
}
}
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
{
String8 extraInfo;
if (!mTagName.empty()) {
@@ -184,7 +185,7 @@
}
std::string descBaseDumpStr;
- DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+ DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.string(), verbose);
dst->append(descBaseDumpStr.c_str());
}
@@ -449,7 +450,9 @@
}
dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
for (size_t i = 0; i < size(); i++) {
- itemAt(i)->dump(dst, spaces + 2, i, verbose);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ itemAt(i)->dump(dst, prefix.size(), verbose);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 843f5da..3f9c8b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::EffectDescriptor"
//#define LOG_NDEBUG 0
+#include <android-base/stringprintf.h>
#include "EffectDescriptor.h"
#include <utils/String8.h>
@@ -24,13 +25,11 @@
void EffectDescriptor::dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*sID: %d\n", spaces, "", mId);
- dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
- dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
- dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
- dst->appendFormat("%*sName: %s\n", spaces, "", mDesc.name);
- dst->appendFormat("%*s%s\n", spaces, "", mEnabled ? "Enabled" : "Disabled");
- dst->appendFormat("%*s%s\n", spaces, "", mSuspended ? "Suspended" : "Active");
+ dst->appendFormat("Effect ID: %d; Attached to I/O handle: %d; Session: %d;\n",
+ mId, mIo, mSession);
+ dst->appendFormat("%*sMusic Effect? %s; \"%s\"; %s; %s\n", spaces, "",
+ isMusicEffect()? "yes" : "no", mDesc.name,
+ mEnabled ? "Enabled" : "Disabled", mSuspended ? "Suspended" : "Active");
}
EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -237,10 +236,14 @@
mTotalEffectsMemory,
mTotalEffectsMemoryMaxUsed);
}
- dst->appendFormat("%*sEffects:\n", spaces, "");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
- valueAt(i)->dump(dst, spaces + 2);
+ if (size() > 0) {
+ if (spaces > 1) spaces -= 2;
+ dst->appendFormat("%*s- Effects (%zu):\n", spaces, "", size());
+ for (size_t i = 0; i < size(); i++) {
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+ dst->appendFormat("%s", prefix.c_str());
+ valueAt(i)->dump(dst, prefix.size());
+ }
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 2977f38..418b7eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -255,7 +255,7 @@
if (mOutputProfiles.size()) {
dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
for (size_t i = 0; i < mOutputProfiles.size(); i++) {
- const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
dst->append(prefix.c_str());
mOutputProfiles[i]->dump(dst, prefix.size());
}
@@ -263,7 +263,7 @@
if (mInputProfiles.size()) {
dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
for (size_t i = 0; i < mInputProfiles.size(); i++) {
- const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+ const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
dst->append(prefix.c_str());
mInputProfiles[i]->dump(dst, prefix.size());
}
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index b3d144f..fbfcf72 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -150,12 +150,8 @@
void ProductStrategy::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
- std::string deviceLiteral;
- if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
- ALOGE("%s: failed to convert device %s",
- __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
- }
- dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+ std::string deviceLiteral = deviceTypesToString(mApplicableDevices);
+ dst->appendFormat("%*sSelected Device: {%s, @:%s}\n", spaces + 2, "",
deviceLiteral.c_str(), mDeviceAddress.c_str());
for (const auto &attr : mAttributesVector) {
@@ -333,4 +329,3 @@
dst->appendFormat("\n");
}
}
-
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 22eeadd..00c1f26 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -3608,12 +3608,14 @@
dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
- mAvailableOutputDevices.dump(dst, String8("\n Available output"));
- mAvailableInputDevices.dump(dst, String8("\n Available input"));
+ dst->append("\n");
+ mAvailableOutputDevices.dump(dst, String8("Available output"), 1);
+ dst->append("\n");
+ mAvailableInputDevices.dump(dst, String8("Available input"), 1);
mHwModulesAll.dump(dst);
mOutputs.dump(dst);
mInputs.dump(dst);
- mEffects.dump(dst);
+ mEffects.dump(dst, 1);
mAudioPatches.dump(dst);
mPolicyMixes.dump(dst);
mAudioSources.dump(dst);
@@ -5471,8 +5473,7 @@
if (!desc->isDuplicated()) {
// exact match on device
if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
- && desc->containsSingleDeviceSupportingEncodedFormats(device)
- && !mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
+ && desc->containsSingleDeviceSupportingEncodedFormats(device)) {
outputs.add(mOutputs.keyAt(i));
} else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index f7442cb..1fbea7d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -79,15 +79,22 @@
!= std::end(mSupportedSystemUsages);
}
-status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
- return validateUsage(usage, getCallingAttributionSource());
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+ return validateUsage(attr, getCallingAttributionSource());
}
-status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
const AttributionSourceState& attributionSource) {
- if (isSystemUsage(usage)) {
- if (isSupportedSystemUsage(usage)) {
- if (!modifyAudioRoutingAllowed(attributionSource)) {
+ if (isSystemUsage(attr.usage)) {
+ if (isSupportedSystemUsage(attr.usage)) {
+ if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
+ && ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
+ if (!callAudioInterceptionAllowed(attributionSource)) {
+ ALOGE(("permission denied: modify audio routing not allowed "
+ "for attributionSource %s"), attributionSource.toString().c_str());
+ return PERMISSION_DENIED;
+ }
+ } else if (!modifyAudioRoutingAllowed(attributionSource)) {
ALOGE(("permission denied: modify audio routing not allowed "
"for attributionSource %s"), attributionSource.toString().c_str());
return PERMISSION_DENIED;
@@ -344,7 +351,7 @@
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
ALOGV("%s()", __func__);
Mutex::Autolock _l(mLock);
@@ -386,7 +393,12 @@
case AudioPolicyInterface::API_OUTPUT_LEGACY:
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
- if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+ if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
+ && !callAudioInterceptionAllowed(adjAttributionSource)) {
+ ALOGE("%s() permission denied: call redirection not allowed for uid %d",
+ __func__, adjAttributionSource.uid);
+ result = PERMISSION_DENIED;
+ } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
__func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
@@ -613,7 +625,7 @@
adjAttributionSource.pid = callingPid;
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
adjAttributionSource)));
// check calling permissions.
@@ -635,14 +647,18 @@
}
bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
- if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK
- || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
- || inputSource == AUDIO_SOURCE_VOICE_CALL
- || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
+ bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+ bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
+ || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+ || inputSource == AUDIO_SOURCE_VOICE_CALL;
+
+ if (isCallAudioSource && !canInterceptCallAudio && !canCaptureOutput) {
+ return binderStatusFromStatusT(PERMISSION_DENIED);
+ }
+ if (inputSource == AUDIO_SOURCE_ECHO_REFERENCE
&& !canCaptureOutput) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
-
if (inputSource == AUDIO_SOURCE_FM_TUNER
&& !canCaptureOutput
&& !captureTunerAudioInputAllowed(adjAttributionSource)) {
@@ -687,23 +703,30 @@
case AudioPolicyInterface::API_INPUT_LEGACY:
break;
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+ if ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+ && canInterceptCallAudio) {
+ break;
+ }
// FIXME: use the same permission as for remote submix for now.
+ FALLTHROUGH_INTENDED;
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
if (!canCaptureOutput) {
- ALOGE("getInputForAttr() permission denied: capture not allowed");
+ ALOGE("%s permission denied: capture not allowed", __func__);
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
- if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
- ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
+ if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+ || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+ && canInterceptCallAudio))) {
+ ALOGE("%s permission denied for remote submix capture", __func__);
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_INVALID:
default:
- LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
- (int)inputType);
+ LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
+ __func__, (int)inputType);
}
}
@@ -1489,7 +1512,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
Mutex::Autolock _l(mLock);
*_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1805,7 +1828,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
// startAudioSource should be created as the calling uid
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 0471ddc..ef7a83b 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -729,7 +729,8 @@
if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
onlyHotwordActive = false;
}
- if (currentUid == mPhoneStateOwnerUid) {
+ if (currentUid == mPhoneStateOwnerUid &&
+ !isVirtualSource(current->attributes.source)) {
isPhoneStateOwnerActive = true;
}
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8a42b7c..b3ac21b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -385,8 +385,9 @@
app_state_t apmStatFromAmState(int amState);
bool isSupportedSystemUsage(audio_usage_t usage);
- status_t validateUsage(audio_usage_t usage);
- status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
+ status_t validateUsage(const audio_attributes_t& attr);
+ status_t validateUsage(const audio_attributes_t& attr,
+ const AttributionSourceState& attributionSource);
void updateUidStates();
void updateUidStates_l() REQUIRES(mLock);
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index ccdd9e5..015ae2f 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -119,6 +119,59 @@
return res;
}
+status_t CameraFlashlight::turnOnTorchWithStrengthLevel(const String8& cameraId,
+ int32_t torchStrength) {
+ if (!mFlashlightMapInitialized) {
+ ALOGE("%s: findFlashUnits() must be called before this method.",
+ __FUNCTION__);
+ return NO_INIT;
+ }
+
+ ALOGV("%s: set torch strength of camera %s to %d", __FUNCTION__,
+ cameraId.string(), torchStrength);
+ status_t res = OK;
+ Mutex::Autolock l(mLock);
+
+ if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+ ALOGE("%s: Camera device %s is in use, cannot be turned ON.",
+ __FUNCTION__, cameraId.string());
+ return -EBUSY;
+ }
+
+ if (mFlashControl == NULL) {
+ res = createFlashlightControl(cameraId);
+ if (res) {
+ return res;
+ }
+ }
+
+ res = mFlashControl->turnOnTorchWithStrengthLevel(cameraId, torchStrength);
+ return res;
+}
+
+
+status_t CameraFlashlight::getTorchStrengthLevel(const String8& cameraId,
+ int32_t* torchStrength) {
+ status_t res = OK;
+ if (!mFlashlightMapInitialized) {
+ ALOGE("%s: findFlashUnits() must be called before this method.",
+ __FUNCTION__);
+ return false;
+ }
+
+ Mutex::Autolock l(mLock);
+
+ if (mFlashControl == NULL) {
+ res = createFlashlightControl(cameraId);
+ if (res) {
+ return res;
+ }
+ }
+
+ res = mFlashControl->getTorchStrengthLevel(cameraId, torchStrength);
+ return res;
+}
+
status_t CameraFlashlight::findFlashUnits() {
Mutex::Autolock l(mLock);
status_t res;
@@ -306,6 +359,22 @@
return mProviderManager->setTorchMode(cameraId.string(), enabled);
}
+
+status_t ProviderFlashControl::turnOnTorchWithStrengthLevel(const String8& cameraId,
+ int32_t torchStrength) {
+ ALOGV("%s: change torch strength level of camera %s to %d", __FUNCTION__,
+ cameraId.string(), torchStrength);
+
+ return mProviderManager->turnOnTorchWithStrengthLevel(cameraId.string(), torchStrength);
+}
+
+status_t ProviderFlashControl::getTorchStrengthLevel(const String8& cameraId,
+ int32_t* torchStrength) {
+ ALOGV("%s: get torch strength level of camera %s", __FUNCTION__,
+ cameraId.string());
+
+ return mProviderManager->getTorchStrengthLevel(cameraId.string(), torchStrength);
+}
// ProviderFlashControl implementation ends
}
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index b97fa5f..1703ddc 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -44,6 +44,14 @@
// set the torch mode to on or off.
virtual status_t setTorchMode(const String8& cameraId,
bool enabled) = 0;
+
+ // Change the brightness level of the torch. If the torch is OFF and
+ // torchStrength >= 1, then the torch will also be turned ON.
+ virtual status_t turnOnTorchWithStrengthLevel(const String8& cameraId,
+ int32_t torchStrength) = 0;
+
+ // Returns the torch strength level.
+ virtual status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength) = 0;
};
/**
@@ -67,6 +75,12 @@
// set the torch mode to on or off.
status_t setTorchMode(const String8& cameraId, bool enabled);
+ // Change the torch strength level of the flash unit in torch mode.
+ status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+
+ // Get the torch strength level
+ status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
+
// Notify CameraFlashlight that camera service is going to open a camera
// device. CameraFlashlight will free the resources that may cause the
// camera open to fail. Camera service must call this function before
@@ -115,6 +129,8 @@
// FlashControlBase
status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
status_t setTorchMode(const String8& cameraId, bool enabled);
+ status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+ status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
private:
sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 2551ea5..5a18582 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -569,6 +569,15 @@
onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
}
+void CameraService::broadcastTorchStrengthLevel(const String8& cameraId,
+ int32_t newStrengthLevel) {
+ Mutex::Autolock lock(mStatusListenerLock);
+ for (auto& i : mListenerList) {
+ i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
+ newStrengthLevel);
+ }
+}
+
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -804,6 +813,31 @@
return ret;
}
+Status CameraService::getTorchStrengthLevel(const String16& cameraId,
+ int32_t* torchStrength) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mServiceLock);
+ if (!mInitialized) {
+ ALOGE("%s: Camera HAL couldn't be initialized.", __FUNCTION__);
+ return STATUS_ERROR(ERROR_DISCONNECTED, "Camera HAL couldn't be initialized.");
+ }
+
+ if(torchStrength == NULL) {
+ ALOGE("%s: strength level must not be null.", __FUNCTION__);
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Strength level should not be null.");
+ }
+
+ status_t res = mCameraProviderManager->getTorchStrengthLevel(String8(cameraId).string(),
+ torchStrength);
+ if (res != OK) {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve torch "
+ "strength level for device %s: %s (%d)", String8(cameraId).string(),
+ strerror(-res), res);
+ }
+ ALOGI("%s: Torch strength level is: %d", __FUNCTION__, *torchStrength);
+ return Status::ok();
+}
+
String8 CameraService::getFormattedCurrentTime() {
time_t now = time(nullptr);
char formattedTime[64];
@@ -1848,8 +1882,10 @@
if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
} else if (effectiveApiLevel == API_2) {
- client->setRotateAndCropOverride(CameraServiceProxyWrapper::getRotateAndCropOverride(
- clientPackageName, facing));
+
+ client->setRotateAndCropOverride(
+ CameraServiceProxyWrapper::getRotateAndCropOverride(
+ clientPackageName, facing, multiuser_get_user_id(clientUid)));
}
// Set camera muting behavior
@@ -1908,8 +1944,14 @@
status_t res = NO_ERROR;
auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
if (clientDescriptor != nullptr) {
- BasicClient* baseClientPtr = clientDescriptor->getValue().get();
- res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ sp<BasicClient> clientSp = clientDescriptor->getValue();
+ res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+ if(res != OK) {
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ mInjectionExternalCamId.string());
+ }
+ res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
if (res != OK) {
mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
}
@@ -1998,6 +2040,132 @@
return OK;
}
+Status CameraService::turnOnTorchWithStrengthLevel(const String16& cameraId, int32_t torchStrength,
+ const sp<IBinder>& clientBinder) {
+ Mutex::Autolock lock(mServiceLock);
+
+ ATRACE_CALL();
+ if (clientBinder == nullptr) {
+ ALOGE("%s: torch client binder is NULL", __FUNCTION__);
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+ "Torch client binder in null.");
+ }
+
+ String8 id = String8(cameraId.string());
+ int uid = CameraThreadState::getCallingUid();
+
+ if (shouldRejectSystemCameraConnection(id)) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to change the strength level"
+ "for system only device %s: ", id.string());
+ }
+
+ // verify id is valid
+ auto state = getCameraState(id);
+ if (state == nullptr) {
+ ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera ID \"%s\" is a not valid camera ID", id.string());
+ }
+
+ StatusInternal cameraStatus = state->getStatus();
+ if (cameraStatus != StatusInternal::NOT_AVAILABLE &&
+ cameraStatus != StatusInternal::PRESENT) {
+ ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(),
+ (int)cameraStatus);
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera ID \"%s\" is a not valid camera ID", id.string());
+ }
+
+ {
+ Mutex::Autolock al(mTorchStatusMutex);
+ TorchModeStatus status;
+ status_t err = getTorchStatusLocked(id, &status);
+ if (err != OK) {
+ if (err == NAME_NOT_FOUND) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera \"%s\" does not have a flash unit", id.string());
+ }
+ ALOGE("%s: getting current torch status failed for camera %s",
+ __FUNCTION__, id.string());
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error changing torch strength level for camera \"%s\": %s (%d)",
+ id.string(), strerror(-err), err);
+ }
+
+ if (status == TorchModeStatus::NOT_AVAILABLE) {
+ if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
+ ALOGE("%s: torch mode of camera %s is not available because "
+ "camera is in use.", __FUNCTION__, id.string());
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Torch for camera \"%s\" is not available due to an existing camera user",
+ id.string());
+ } else {
+ ALOGE("%s: torch mode of camera %s is not available due to "
+ "insufficient resources", __FUNCTION__, id.string());
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Torch for camera \"%s\" is not available due to insufficient resources",
+ id.string());
+ }
+ }
+ }
+
+ {
+ Mutex::Autolock al(mTorchUidMapMutex);
+ updateTorchUidMapLocked(cameraId, uid);
+ }
+ // Check if the current torch strength level is same as the new one.
+ bool shouldSkipTorchStrengthUpdates = mCameraProviderManager->shouldSkipTorchStrengthUpdate(
+ id.string(), torchStrength);
+
+ status_t err = mFlashlight->turnOnTorchWithStrengthLevel(id, torchStrength);
+
+ if (err != OK) {
+ int32_t errorCode;
+ String8 msg;
+ switch (err) {
+ case -ENOSYS:
+ msg = String8::format("Camera \"%s\" has no flashlight.",
+ id.string());
+ errorCode = ERROR_ILLEGAL_ARGUMENT;
+ break;
+ case -EBUSY:
+ msg = String8::format("Camera \"%s\" is in use",
+ id.string());
+ errorCode = ERROR_CAMERA_IN_USE;
+ break;
+ default:
+ msg = String8::format("Changing torch strength level failed.");
+ errorCode = ERROR_INVALID_OPERATION;
+
+ }
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(errorCode, msg.string());
+ }
+
+ {
+ // update the link to client's death
+ // Store the last client that turns on each camera's torch mode.
+ Mutex::Autolock al(mTorchClientMapMutex);
+ ssize_t index = mTorchClientMap.indexOfKey(id);
+ if (index == NAME_NOT_FOUND) {
+ mTorchClientMap.add(id, clientBinder);
+ } else {
+ mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+ mTorchClientMap.replaceValueAt(index, clientBinder);
+ }
+ clientBinder->linkToDeath(this);
+ }
+
+ int clientPid = CameraThreadState::getCallingPid();
+ const char *id_cstr = id.c_str();
+ ALOGI("%s: Torch strength for camera id %s changed to %d for client PID %d",
+ __FUNCTION__, id_cstr, torchStrength, clientPid);
+ if (!shouldSkipTorchStrengthUpdates) {
+ broadcastTorchStrengthLevel(id, torchStrength);
+ }
+ return Status::ok();
+}
+
Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder) {
Mutex::Autolock lock(mServiceLock);
@@ -2069,13 +2237,7 @@
// Update UID map - this is used in the torch status changed callbacks, so must be done
// before setTorchMode
Mutex::Autolock al(mTorchUidMapMutex);
- if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
- mTorchUidMap[id].first = uid;
- mTorchUidMap[id].second = uid;
- } else {
- // Set the pending UID
- mTorchUidMap[id].first = uid;
- }
+ updateTorchUidMapLocked(cameraId, uid);
}
status_t err = mFlashlight->setTorchMode(id, enabled);
@@ -2130,6 +2292,17 @@
return Status::ok();
}
+void CameraService::updateTorchUidMapLocked(const String16& cameraId, int uid) {
+ String8 id = String8(cameraId.string());
+ if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
+ mTorchUidMap[id].first = uid;
+ mTorchUidMap[id].second = uid;
+ } else {
+ // Set the pending UID
+ mTorchUidMap[id].first = uid;
+ }
+}
+
Status CameraService::notifySystemEvent(int32_t eventId,
const std::vector<int32_t>& args) {
const int pid = CameraThreadState::getCallingPid();
@@ -2252,9 +2425,11 @@
if (current != nullptr) {
const auto basicClient = current->getValue();
if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
- basicClient->setRotateAndCropOverride(
- CameraServiceProxyWrapper::getRotateAndCropOverride(
- basicClient->getPackageName(), basicClient->getCameraFacing()));
+ basicClient->setRotateAndCropOverride(
+ CameraServiceProxyWrapper::getRotateAndCropOverride(
+ basicClient->getPackageName(),
+ basicClient->getCameraFacing(),
+ multiuser_get_user_id(basicClient->getClientUid())));
}
}
}
@@ -2606,6 +2781,8 @@
Mutex::Autolock lock(mInjectionParametersLock);
mInjectionInternalCamId = String8(internalCamId);
mInjectionExternalCamId = String8(externalCamId);
+ mInjectionStatusListener->addListener(callback);
+ *cameraInjectionSession = new CameraInjectionSession(this);
status_t res = NO_ERROR;
auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
// If the client already exists, we can directly connect to the camera device through the
@@ -2613,8 +2790,14 @@
// (execute connectHelper()) before injecting the camera to the camera device.
if (clientDescriptor != nullptr) {
mInjectionInitPending = false;
- BasicClient* baseClientPtr = clientDescriptor->getValue().get();
- res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ sp<BasicClient> clientSp = clientDescriptor->getValue();
+ res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+ if(res != OK) {
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ mInjectionExternalCamId.string());
+ }
+ res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
if(res != OK) {
mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
}
@@ -2622,8 +2805,6 @@
mInjectionInitPending = true;
}
}
- mInjectionStatusListener->addListener(callback);
- *cameraInjectionSession = new CameraInjectionSession(this);
return binder::Status::ok();
}
@@ -5135,10 +5316,39 @@
return mode;
}
+status_t CameraService::checkIfInjectionCameraIsPresent(const String8& externalCamId,
+ sp<BasicClient> clientSp) {
+ std::unique_ptr<AutoConditionLock> lock =
+ AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+ status_t res = NO_ERROR;
+ if ((res = checkIfDeviceIsUsable(externalCamId)) != NO_ERROR) {
+ ALOGW("Device %s is not usable!", externalCamId.string());
+ mInjectionStatusListener->notifyInjectionError(
+ externalCamId, UNKNOWN_TRANSACTION);
+ clientSp->notifyError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ CaptureResultExtras());
+
+ // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+ // other clients from connecting in mServiceLockWrapper if held
+ mServiceLock.unlock();
+
+ // Clear caller identity temporarily so client disconnect PID checks work correctly
+ int64_t token = CameraThreadState::clearCallingIdentity();
+ clientSp->disconnect();
+ CameraThreadState::restoreCallingIdentity(token);
+
+ // Reacquire mServiceLock
+ mServiceLock.lock();
+ }
+
+ return res;
+}
+
void CameraService::clearInjectionParameters() {
{
Mutex::Autolock lock(mInjectionParametersLock);
- mInjectionInitPending = true;
+ mInjectionInitPending = false;
mInjectionInternalCamId = "";
}
mInjectionExternalCamId = "";
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index a3125c6..060f075 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -172,6 +172,12 @@
virtual binder::Status setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder);
+ virtual binder::Status turnOnTorchWithStrengthLevel(const String16& cameraId,
+ int32_t torchStrength, const sp<IBinder>& clientBinder);
+
+ virtual binder::Status getTorchStrengthLevel(const String16& cameraId,
+ int32_t* torchStrength);
+
virtual binder::Status notifySystemEvent(int32_t eventId,
const std::vector<int32_t>& args);
@@ -1232,6 +1238,8 @@
hardware::camera::common::V1_0::TorchModeStatus status,
SystemCameraKind systemCameraKind);
+ void broadcastTorchStrengthLevel(const String8& cameraId, int32_t newTorchStrengthLevel);
+
void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
// Regular online and offline devices must not be in conflict at camera service layer.
@@ -1296,15 +1304,22 @@
wp<CameraService> mParent;
};
+ // When injecting the camera, it will check whether the injecting camera status is unavailable.
+ // If it is, the disconnect function will be called to to prevent camera access on the device.
+ status_t checkIfInjectionCameraIsPresent(const String8& externalCamId,
+ sp<BasicClient> clientSp);
+
void clearInjectionParameters();
// This is the existing camera id being replaced.
String8 mInjectionInternalCamId;
// This is the external camera Id replacing the internalId.
String8 mInjectionExternalCamId;
- bool mInjectionInitPending = true;
+ bool mInjectionInitPending = false;
// Guard mInjectionInternalCamId and mInjectionInitPending.
Mutex mInjectionParametersLock;
+
+ void updateTorchUidMapLocked(const String16& cameraId, int uid);
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index eed2654..a38d7ae 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -78,7 +78,8 @@
}
// Find out buffer size for JPEG
- ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+ ssize_t maxJpegSize = device->getJpegBufferSize(device->infoPhysical(String8("")),
+ params.pictureWidth, params.pictureHeight);
if (maxJpegSize <= 0) {
ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
__FUNCTION__, mId, maxJpegSize);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 06a3d36..c454716 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -102,7 +102,8 @@
virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
/**
- * The physical camera device's static characteristics metadata buffer
+ * The physical camera device's static characteristics metadata buffer, or
+ * the logical camera's static characteristics if physical id is empty.
*/
virtual const CameraMetadata& infoPhysical(const String8& physicalId) const = 0;
@@ -307,7 +308,8 @@
* Get Jpeg buffer size for a given jpeg resolution.
* Negative values are error codes.
*/
- virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const = 0;
+ virtual ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const = 0;
/**
* Connect HAL notifications to a listener. Overwrites previous
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0cce2ca..d37d717 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,7 +20,7 @@
#include "CameraProviderManager.h"
-#include <android/hardware/camera/device/3.7/ICameraDevice.h>
+#include <android/hardware/camera/device/3.8/ICameraDevice.h>
#include <algorithm>
#include <chrono>
@@ -307,6 +307,50 @@
return OK;
}
+status_t CameraProviderManager::getTorchStrengthLevel(const std::string &id,
+ int32_t* torchStrength /*out*/) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->getTorchStrengthLevel(torchStrength);
+}
+
+status_t CameraProviderManager::turnOnTorchWithStrengthLevel(const std::string &id,
+ int32_t torchStrength) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->turnOnTorchWithStrengthLevel(torchStrength);
+}
+
+bool CameraProviderManager::shouldSkipTorchStrengthUpdate(const std::string &id,
+ int32_t torchStrength) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ if (deviceInfo->mTorchStrengthLevel == torchStrength) {
+ ALOGV("%s: Skipping torch strength level updates prev_level: %d, new_level: %d",
+ __FUNCTION__, deviceInfo->mTorchStrengthLevel, torchStrength);
+ return true;
+ }
+ return false;
+}
+
+int32_t CameraProviderManager::getTorchDefaultStrengthLevel(const std::string &id) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->mTorchDefaultStrengthLevel;
+}
+
bool CameraProviderManager::supportSetTorchMode(const std::string &id) const {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
for (auto& provider : mProviders) {
@@ -2385,6 +2429,22 @@
mHasFlashUnit = false;
}
+ camera_metadata_entry entry =
+ mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
+ if (entry.count == 1) {
+ mTorchDefaultStrengthLevel = entry.data.i32[0];
+ } else {
+ mTorchDefaultStrengthLevel = 0;
+ }
+
+ entry = mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL);
+ if (entry.count == 1) {
+ mTorchMaximumStrengthLevel = entry.data.i32[0];
+ } else {
+ mTorchMaximumStrengthLevel = 0;
+ }
+
+ mTorchStrengthLevel = 0;
queryPhysicalCameraIds();
// Get physical camera characteristics if applicable
@@ -2468,6 +2528,80 @@
return setTorchModeForDevice<InterfaceT>(enabled);
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::turnOnTorchWithStrengthLevel(
+ int32_t torchStrength) {
+ const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+ startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+ if (interface == nullptr) {
+ return DEAD_OBJECT;
+ }
+ sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+ auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+ if (castResult_3_8.isOk()) {
+ interface_3_8 = castResult_3_8;
+ }
+
+ if (interface_3_8 == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ Status s = interface_3_8->turnOnTorchWithStrengthLevel(torchStrength);
+ if (s == Status::OK) {
+ mTorchStrengthLevel = torchStrength;
+ }
+ return mapToStatusT(s);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getTorchStrengthLevel(
+ int32_t *torchStrength) {
+ if (torchStrength == nullptr) {
+ return BAD_VALUE;
+ }
+ const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+ startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+ if (interface == nullptr) {
+ return DEAD_OBJECT;
+ }
+ auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+ sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+ if (castResult_3_8.isOk()) {
+ interface_3_8 = castResult_3_8;
+ }
+
+ if (interface_3_8 == nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ Status callStatus;
+ status_t res;
+ hardware::Return<void> ret = interface_3_8->getTorchStrengthLevel([&callStatus, &torchStrength]
+ (Status status, const int32_t& torchStrengthLevel) {
+ callStatus = status;
+ if (status == Status::OK) {
+ *torchStrength = torchStrengthLevel;
+ } });
+
+ if (ret.isOk()) {
+ switch (callStatus) {
+ case Status::OK:
+ // Expected case, do nothing.
+ res = OK;
+ break;
+ case Status::METHOD_NOT_SUPPORTED:
+ res = INVALID_OPERATION;
+ break;
+ default:
+ ALOGE("%s: Get torch strength level failed: %d", __FUNCTION__, callStatus);
+ res = UNKNOWN_ERROR;
+ }
+ } else {
+ ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+ res = UNKNOWN_ERROR;
+ }
+
+ return res;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
hardware::CameraInfo *info) const {
if (info == nullptr) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index f28d128..7d13941 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -258,6 +258,17 @@
bool supportSetTorchMode(const std::string &id) const;
/**
+ * Check if torch strength update should be skipped or not.
+ */
+ bool shouldSkipTorchStrengthUpdate(const std::string &id, int32_t torchStrength) const;
+
+ /**
+ * Return the default torch strength level if the torch strength control
+ * feature is supported.
+ */
+ int32_t getTorchDefaultStrengthLevel(const std::string &id) const;
+
+ /**
* Turn on or off the flashlight on a given camera device.
* May fail if the device does not support this API, is in active use, or if the device
* doesn't exist, etc.
@@ -265,6 +276,24 @@
status_t setTorchMode(const std::string &id, bool enabled);
/**
+ * Change the brightness level of the flash unit associated with the cameraId and
+ * set it to the value in torchStrength.
+ * If the torch is OFF and torchStrength > 0, the torch will be turned ON with the
+ * specified strength level. If the torch is ON, only the brightness level will be
+ * changed.
+ *
+ * This operation will fail if the device does not have flash unit, has flash unit
+ * but does not support this API, torchStrength is invalid or if the device doesn't
+ * exist etc.
+ */
+ status_t turnOnTorchWithStrengthLevel(const std::string &id, int32_t torchStrength);
+
+ /**
+ * Return the torch strength level of this camera device.
+ */
+ status_t getTorchStrengthLevel(const std::string &id, int32_t* torchStrength);
+
+ /**
* Setup vendor tags for all registered providers
*/
status_t setUpVendorTags();
@@ -475,10 +504,17 @@
hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
wp<ProviderInfo> mParentProvider;
+ // Torch strength default, maximum levels if the torch strength control
+ // feature is supported.
+ int32_t mTorchStrengthLevel;
+ int32_t mTorchMaximumStrengthLevel;
+ int32_t mTorchDefaultStrengthLevel;
bool hasFlashUnit() const { return mHasFlashUnit; }
bool supportNativeZoomRatio() const { return mSupportNativeZoomRatio; }
virtual status_t setTorchMode(bool enabled) = 0;
+ virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) = 0;
+ virtual status_t getTorchStrengthLevel(int32_t *torchStrength) = 0;
virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
virtual bool isAPI1Compatible() const = 0;
virtual status_t dumpState(int fd) = 0;
@@ -551,6 +587,9 @@
typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
virtual status_t setTorchMode(bool enabled) override;
+ virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) override;
+ virtual status_t getTorchStrengthLevel(int32_t *torchStrength) override;
+
virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
virtual bool isAPI1Compatible() const override;
virtual status_t dumpState(int fd) override;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 3742a17..2f571a6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -602,15 +602,16 @@
return usage;
}
-ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+ssize_t Camera3Device::getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const {
// Get max jpeg size (area-wise) for default sensor pixel mode
camera3::Size maxDefaultJpegResolution =
- SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+ SessionConfigurationUtils::getMaxJpegResolution(info,
/*isUltraHighResolutionSensor*/false);
// Get max jpeg size (area-wise) for max resolution sensor pixel mode / 0 if
// not ultra high res sensor
camera3::Size uhrMaxJpegResolution =
- SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+ SessionConfigurationUtils::getMaxJpegResolution(info,
/*isUltraHighResolution*/true);
if (maxDefaultJpegResolution.width == 0) {
ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
@@ -626,7 +627,7 @@
// Get max jpeg buffer size
ssize_t maxJpegBufferSize = 0;
- camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+ camera_metadata_ro_entry jpegBufMaxSize = info.find(ANDROID_JPEG_MAX_SIZE);
if (jpegBufMaxSize.count == 0) {
ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
mId.string());
@@ -656,9 +657,9 @@
return jpegBufferSize;
}
-ssize_t Camera3Device::getPointCloudBufferSize() const {
+ssize_t Camera3Device::getPointCloudBufferSize(const CameraMetadata &info) const {
const int FLOATS_PER_POINT=4;
- camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
+ camera_metadata_ro_entry maxPointCount = info.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
if (maxPointCount.count == 0) {
ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
__FUNCTION__, mId.string());
@@ -669,14 +670,14 @@
return maxBytesForPointCloud;
}
-ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height,
- bool maxResolution) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width,
+ int32_t height, bool maxResolution) const {
const int PER_CONFIGURATION_SIZE = 3;
const int WIDTH_OFFSET = 0;
const int HEIGHT_OFFSET = 1;
const int SIZE_OFFSET = 2;
camera_metadata_ro_entry rawOpaqueSizes =
- mDeviceInfo.find(
+ info.find(
camera3::SessionConfigurationUtils::getAppropriateModeTag(
ANDROID_SENSOR_OPAQUE_RAW_SIZE,
maxResolution));
@@ -1477,7 +1478,7 @@
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
if (dataSpace == HAL_DATASPACE_DEPTH) {
- blobBufferSize = getPointCloudBufferSize();
+ blobBufferSize = getPointCloudBufferSize(infoPhysical(physicalCameraId));
if (blobBufferSize <= 0) {
SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
return BAD_VALUE;
@@ -1485,7 +1486,7 @@
} else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
blobBufferSize = width * height;
} else {
- blobBufferSize = getJpegBufferSize(width, height);
+ blobBufferSize = getJpegBufferSize(infoPhysical(physicalCameraId), width, height);
if (blobBufferSize <= 0) {
SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
return BAD_VALUE;
@@ -1499,7 +1500,8 @@
bool maxResolution =
sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
sensorPixelModesUsed.end();
- ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height, maxResolution);
+ ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(infoPhysical(physicalCameraId), width,
+ height, maxResolution);
if (rawOpaqueBufferSize <= 0) {
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
@@ -2730,7 +2732,8 @@
// always occupy the initial entry.
if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
bufferSizes[k] = static_cast<uint32_t>(
- getJpegBufferSize(outputStream->width, outputStream->height));
+ getJpegBufferSize(infoPhysical(String8(outputStream->physical_camera_id)),
+ outputStream->width, outputStream->height));
} else if (outputStream->data_space ==
static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
bufferSizes[k] = outputStream->width * outputStream->height;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d08c41f..3f069f9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -196,9 +196,11 @@
status_t prepare(int maxCount, int streamId) override;
- ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
- ssize_t getPointCloudBufferSize() const;
- ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height, bool maxResolution) const;
+ ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const override;
+ ssize_t getPointCloudBufferSize(const CameraMetadata &info) const;
+ ssize_t getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width, int32_t height,
+ bool maxResolution) const;
// Methods called by subclasses
void notifyStatus(bool idle); // updates from StatusTracker
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 0d79b54..5e4f38a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -1265,6 +1265,14 @@
notify(states, &m);
}
+// The buffers requested through this call are not tied to any CaptureRequest in
+// particular. They may used by the hal for a particular frame's output buffer
+// or for its internal use as well. In the case that the hal does use any buffer
+// from the requested list here, for a particular frame's output buffer, the
+// buffer will be returned with the processCaptureResult call corresponding to
+// the frame. The other buffers will be returned through returnStreamBuffers.
+// The buffers returned via returnStreamBuffers will not have a valid
+// timestamp(0) and will be dropped by the bufferqueue.
void requestStreamBuffers(RequestBufferStates& states,
const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
index 8e619e1..cca3f2e 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
@@ -70,6 +70,11 @@
return binder::Status::ok();
}
+::android::binder::Status H2BCameraServiceListener::onTorchStrengthLevelChanged(
+ const ::android::String16&, int32_t) {
+ return binder::Status::ok();
+}
+
} // implementation
} // V2_0
} // common
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index 7148035..7ef413f 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -54,6 +54,8 @@
virtual ::android::binder::Status onTorchStatusChanged(
int32_t status, const ::android::String16& cameraId) override;
+ virtual ::android::binder::Status onTorchStrengthLevelChanged(
+ const ::android::String16& cameraId, int32_t newStrengthLevel) override;
virtual binder::Status onCameraAccessPrioritiesChanged() {
// TODO: no implementation yet.
return binder::Status::ok();
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index e46bf74..97d7bf4 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -466,6 +466,12 @@
// No op
return binder::Status::ok();
}
+
+ virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+ int32_t /*torchStrength*/) {
+ // No op
+ return binder::Status::ok();
+ }
};
class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 8d170f1..8699543 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -120,11 +120,12 @@
proxyBinder->pingForUserUpdate();
}
-int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing) {
+int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing,
+ int userId) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
if (proxyBinder == nullptr) return true;
int ret = 0;
- auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, &ret);
+ auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, userId, &ret);
if (!status.isOk()) {
ALOGE("%s: Failed during top activity orientation query: %s", __FUNCTION__,
status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index a51e568..f701e94 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -92,7 +92,7 @@
static void pingCameraServiceProxy();
// Return the current top activity rotate and crop override.
- static int getRotateAndCropOverride(String16 packageName, int lensFacing);
+ static int getRotateAndCropOverride(String16 packageName, int lensFacing, int userId);
};
} // android
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index c53b6f3..a7b045e 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -80,16 +80,20 @@
}
int64_t created_millis = -1;
+ // not currently sent from client.
if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
metrics_proto.set_created_millis(created_millis);
}
int64_t duration_millis = -1;
- if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+ double durationMs = 0.;
+ if (item->getDouble("android.media.audiorecord.durationMs", &durationMs)) {
+ duration_millis = (int64_t)durationMs;
metrics_proto.set_duration_millis(duration_millis);
}
int32_t count = -1;
+ // not currently sent from client. (see start count instead).
if (item->getInt32("android.media.audiorecord.n", &count)) {
metrics_proto.set_count(count);
}
@@ -129,7 +133,7 @@
}
int64_t start_count = -1;
- if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+ if (item->getInt64("android.media.audiorecord.startCount", &start_count)) {
metrics_proto.set_start_count(start_count);
}
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 707effd..67514e9 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -56,52 +56,47 @@
// flesh out the protobuf we'll hand off with our data
//
- // static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+ // Do not change this without changing AudioTrack.cpp collection.
+
// optional string streamType;
std::string stream_type;
if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
metrics_proto.set_stream_type(stream_type);
}
- // static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
// optional string contentType;
std::string content_type;
if (item->getString("android.media.audiotrack.type", &content_type)) {
metrics_proto.set_content_type(content_type);
}
- // static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
// optional string trackUsage;
std::string track_usage;
if (item->getString("android.media.audiotrack.usage", &track_usage)) {
metrics_proto.set_track_usage(track_usage);
}
- // static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
- // optional int32 samplerate;
+ // optional int32 sampleRate;
int32_t sample_rate = -1;
- if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+ if (item->getInt32("android.media.audiotrack.sampleRate", &sample_rate)) {
metrics_proto.set_sample_rate(sample_rate);
}
- // static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
// optional int64 channelMask;
int64_t channel_mask = -1;
- if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+ if (item->getInt64("android.media.audiotrack.channelMask", &channel_mask)) {
metrics_proto.set_channel_mask(channel_mask);
}
- // NB: These are not yet exposed as public Java API constants.
- // static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
- // optional int32 underrunframes;
+ // optional int32 underrunFrames;
int32_t underrun_frames = -1;
- if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+ if (item->getInt32("android.media.audiotrack.underrunFrames", &underrun_frames)) {
metrics_proto.set_underrun_frames(underrun_frames);
}
- // static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
- // optional int32 startupglitch;
+ // optional int32 glitch.startup;
int32_t startup_glitch = -1;
+ // Not currently sent from client.
if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
metrics_proto.set_startup_glitch(startup_glitch);
}
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index 7b76093..b738b57 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -1036,6 +1036,8 @@
media.streamId = static_cast<int32_t>(mediaEvent.streamId);
media.isPtsPresent = mediaEvent.isPtsPresent;
media.pts = static_cast<int64_t>(mediaEvent.pts);
+ media.isDtsPresent = false;
+ media.dts = static_cast<int64_t>(-1);
media.dataLength = static_cast<int64_t>(mediaEvent.dataLength);
media.offset = static_cast<int64_t>(mediaEvent.offset);
media.isSecureMemory = mediaEvent.isSecureMemory;
@@ -1078,7 +1080,7 @@
section.tableId = static_cast<int32_t>(sectionEvent.tableId);
section.version = static_cast<int32_t>(sectionEvent.version);
section.sectionNum = static_cast<int32_t>(sectionEvent.sectionNum);
- section.dataLength = static_cast<int32_t>(sectionEvent.dataLength);
+ section.dataLength = static_cast<int64_t>(sectionEvent.dataLength);
DemuxFilterEvent filterEvent;
filterEvent.set<DemuxFilterEvent::section>(move(section));
@@ -1186,6 +1188,7 @@
DemuxFilterDownloadEvent download;
download.itemId = static_cast<int32_t>(downloadEvent.itemId);
+ download.downloadId = -1;
download.itemFragmentIndex = static_cast<int32_t>(downloadEvent.itemFragmentIndex);
download.mpuSequenceNumber = static_cast<int32_t>(downloadEvent.mpuSequenceNumber);
download.lastItemFragmentIndex = static_cast<int32_t>(downloadEvent.lastItemFragmentIndex);