Merge "C2SoftGav1Dec: Update supported levels" into sc-dev
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1609c7b..541c21e 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -9224,10 +9224,10 @@
* respective color channel provided in
* ACAMERA_SENSOR_TEST_PATTERN_DATA.</p>
* <p>For example:</p>
- * <pre><code>android.control.testPatternData = [0, 0xFFFFFFFF, 0xFFFFFFFF, 0]
+ * <pre><code>ACAMERA_SENSOR_TEST_PATTERN_DATA = [0, 0xFFFFFFFF, 0xFFFFFFFF, 0]
* </code></pre>
* <p>All green pixels are 100% green. All red/blue pixels are black.</p>
- * <pre><code>android.control.testPatternData = [0xFFFFFFFF, 0, 0xFFFFFFFF, 0]
+ * <pre><code>ACAMERA_SENSOR_TEST_PATTERN_DATA = [0xFFFFFFFF, 0, 0xFFFFFFFF, 0]
* </code></pre>
* <p>All red pixels are 100% red. Only the odd green pixels
* are 100% green. All blue pixels are 100% black.</p>
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 9d9ed70..7caa457 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -262,6 +262,8 @@
kParamIndexTunneledMode, // struct
kParamIndexTunnelHandle, // int32[]
kParamIndexTunnelSystemTime, // int64
+ kParamIndexTunnelHoldRender, // bool
+ kParamIndexTunnelStartRender, // bool
// dmabuf allocator
kParamIndexStoreDmaBufUsage, // store, struct
@@ -2366,6 +2368,31 @@
C2PortTunnelSystemTime;
constexpr char C2_PARAMKEY_OUTPUT_RENDER_TIME[] = "output.render-time";
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * When a video frame is pushed to the decoder with this parameter set to true,
+ * the decoder must decode the frame, signal partial completion, and hold on the
+ * frame until C2StreamTunnelStartRender is set to true (which resets this
+ * flag). Flush will also result in the frames being returned back to the
+ * client (but not rendered).
+ */
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelHoldRender>
+ C2StreamTunnelHoldRender;
+constexpr char C2_PARAMKEY_TUNNEL_HOLD_RENDER[] = "output.tunnel-hold-render";
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * Upon receiving this flag, the decoder shall set C2StreamTunnelHoldRender to
+ * false, which shall cause any frames held for rendering to be immediately
+ * displayed, regardless of their timestamps.
+*/
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelStartRender>
+ C2StreamTunnelStartRender;
+constexpr char C2_PARAMKEY_TUNNEL_START_RENDER[] = "output.tunnel-start-render";
+
C2ENUM(C2PlatformConfig::encoding_quality_level_t, uint32_t,
NONE,
S_HANDHELD,
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index ce15a30..16398a4 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -673,6 +673,10 @@
mCodec->mCallback->onOutputBuffersChanged();
}
+ void onFirstTunnelFrameReady() override {
+ mCodec->mCallback->onFirstTunnelFrameReady();
+ }
+
private:
CCodec *mCodec;
};
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3c3b41d..f88408e 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -209,6 +209,7 @@
int32_t flags = 0;
int32_t tmp = 0;
bool eos = false;
+ bool tunnelFirstFrame = false;
if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
eos = true;
mInputMetEos = true;
@@ -217,6 +218,9 @@
if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
flags |= C2FrameData::FLAG_CODEC_CONFIG;
}
+ if (buffer->meta()->findInt32("tunnel-first-frame", &tmp) && tmp) {
+ tunnelFirstFrame = true;
+ }
ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
std::list<std::unique_ptr<C2Work>> items;
std::unique_ptr<C2Work> work(new C2Work);
@@ -288,6 +292,13 @@
// TODO: fill info's
work->input.configUpdate = std::move(mParamsToBeSet);
+ if (tunnelFirstFrame) {
+ C2StreamTunnelHoldRender::input tunnelHoldRender{
+ 0u /* stream */,
+ C2_TRUE /* value */
+ };
+ work->input.configUpdate.push_back(C2Param::Copy(tunnelHoldRender));
+ }
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
@@ -1724,6 +1735,15 @@
}
break;
}
+ case C2StreamTunnelHoldRender::CORE_INDEX: {
+ C2StreamTunnelHoldRender::output firstTunnelFrameHoldRender;
+ if (!(worklet->output.flags & C2FrameData::FLAG_INCOMPLETE)) break;
+ if (!firstTunnelFrameHoldRender.updateFrom(*param)) break;
+ if (firstTunnelFrameHoldRender.value != C2_TRUE) break;
+ ALOGV("[%s] onWorkDone: first tunnel frame ready", mName);
+ mCCodecCallback->onFirstTunnelFrameReady();
+ break;
+ }
default:
ALOGV("[%s] onWorkDone: unrecognized config update (%08X)",
mName, param->index());
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 45da003..5a2aca2 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -45,6 +45,7 @@
virtual void onError(status_t err, enum ActionCode actionCode) = 0;
virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
virtual void onOutputBuffersChanged() = 0;
+ virtual void onFirstTunnelFrameReady() = 0;
};
/**
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 27e87e6..2df0ba2 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -938,6 +938,14 @@
return value == 0 ? C2_FALSE : C2_TRUE;
}));
+ add(ConfigMapper("android._trigger-tunnel-peek", C2_PARAMKEY_TUNNEL_START_RENDER, "value")
+ .limitTo(D::PARAM & D::VIDEO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value = 0;
+ (void)v.get(&value);
+ return value == 0 ? C2_FALSE : C2_TRUE;
+ }));
+
/* still to do
constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4fd3a56..443e26c 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -1700,17 +1700,17 @@
return ERROR_MALFORMED;
}
- size_t header_start = 0;
- size_t header_lenth = 0;
+ long header_start = 0;
+ long header_length = 0;
for (header_start = 0; header_start < frame.len - 4; header_start++) {
if (ntohl(0x000001b3) == *(uint32_t*)((uint8_t*)tmpData.get() + header_start)) {
break;
}
}
bool isComplete_csd = false;
- for (header_lenth = 0; header_lenth < frame.len - 4 - header_start; header_lenth++) {
+ for (header_length = 0; header_length < frame.len - 4 - header_start; header_length++) {
if (ntohl(0x000001b8) == *(uint32_t*)((uint8_t*)tmpData.get()
- + header_start + header_lenth)) {
+ + header_start + header_length)) {
isComplete_csd = true;
break;
}
@@ -1720,7 +1720,7 @@
return ERROR_MALFORMED;
}
addESDSFromCodecPrivate(trackInfo->mMeta, false,
- (uint8_t*)(tmpData.get()) + header_start, header_lenth);
+ (uint8_t*)(tmpData.get()) + header_start, header_length);
return OK;
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 5bbabdf..248a39c 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -504,7 +504,14 @@
}
mCurrentTimeUs = seekTimeUs;
- mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
+ int64_t seekTimeUsTimesBitrate;
+ if (__builtin_mul_overflow(seekTimeUs, bitrate, &seekTimeUsTimesBitrate)) {
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+ if (__builtin_add_overflow(
+ mFirstFramePos, seekTimeUsTimesBitrate / 8000000, &mCurrentPos)) {
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
seekCBR = true;
} else {
mCurrentTimeUs = actualSeekTimeUs;
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 0d60120..acfac24 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -25,8 +25,7 @@
// TODO These defines should be moved to a central place in audio.
#define SAMPLES_PER_FRAME_MIN 1
-// TODO Remove 8 channel limitation.
-#define SAMPLES_PER_FRAME_MAX FCC_8
+#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
#define SAMPLE_RATE_HZ_MIN 8000
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 2135c54..e015592 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -43,8 +43,7 @@
// on the edge of being ridiculous.
// TODO These defines should be moved to a central place in audio.
#define SAMPLES_PER_FRAME_MIN 1
-// TODO Remove 8 channel limitation.
-#define SAMPLES_PER_FRAME_MAX FCC_8
+#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
#define SAMPLE_RATE_HZ_MIN 8000
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 4789ad2..88e752b 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -71,6 +71,25 @@
Mutex gSoundTriggerCaptureStateListenerLock;
sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
+// Binder for the AudioFlinger service that's passed to this client process from the system server.
+// This allows specific isolated processes to access the audio system. Currently used only for the
+// HotwordDetectionService.
+sp<IBinder> gAudioFlingerBinder = nullptr;
+
+void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
+ if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
+ ALOGE("setAudioFlingerBinder: received a binder of type %s",
+ String8(audioFlinger->getInterfaceDescriptor()).string());
+ return;
+ }
+ Mutex::Autolock _l(gLock);
+ if (gAudioFlinger != nullptr) {
+ ALOGW("setAudioFlingerBinder: ignoring; AudioFlinger connection already established.");
+ return;
+ }
+ gAudioFlingerBinder = audioFlinger;
+}
+
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger() {
sp<IAudioFlinger> af;
@@ -79,15 +98,19 @@
{
Mutex::Autolock _l(gLock);
if (gAudioFlinger == 0) {
- sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
- do {
- binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
- if (binder != 0)
- break;
- ALOGW("AudioFlinger not published, waiting...");
- usleep(500000); // 0.5 s
- } while (true);
+ if (gAudioFlingerBinder != nullptr) {
+ binder = gAudioFlingerBinder;
+ } else {
+ sp<IServiceManager> sm = defaultServiceManager();
+ do {
+ binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
+ if (binder != 0)
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while (true);
+ }
if (gAudioFlingerClient == NULL) {
gAudioFlingerClient = new AudioFlingerClient();
} else {
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 5e7def1..a9109c8 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -148,6 +148,11 @@
static void setRecordConfigCallback(record_config_callback);
static void setRoutingCallback(routing_callback cb);
+ // Sets the binder to use for accessing the AudioFlinger service. This enables the system server
+ // to grant specific isolated processes access to the audio system. Currently used only for the
+ // HotwordDetectionService.
+ static void setAudioFlingerBinder(const sp<IBinder>& audioFlinger);
+
// helper function to obtain AudioFlinger service handle
static const sp<IAudioFlinger> get_audio_flinger();
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
index a54e22f..f30eb54 100644
--- a/media/libaudioprocessing/AudioMixerBase.cpp
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "AudioMixer"
//#define LOG_NDEBUG 0
+#include <array>
#include <sstream>
#include <string.h>
@@ -1295,8 +1296,29 @@
// Needs to derive a compile time constant (constexpr). Could be targeted to go
// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
- (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+constexpr int MIXTYPE_MONOVOL(int mixtype, int channels) {
+ if (channels <= FCC_2) {
+ return mixtype;
+ } else if (mixtype == MIXTYPE_MULTI) {
+ return MIXTYPE_MULTI_MONOVOL;
+ } else if (mixtype == MIXTYPE_MULTI_SAVEONLY) {
+ return MIXTYPE_MULTI_SAVEONLY_MONOVOL;
+ } else {
+ return mixtype;
+ }
+}
+
+// Helper to make a functional array from volumeRampMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+ std::size_t ... Is>
+static constexpr auto makeVRMArray(std::index_sequence<Is...>)
+{
+ using F = void(*)(TO*, size_t, const TI*, TA*, TV*, const TV*, TAV*, TAV);
+ return std::array<F, sizeof...(Is)>{
+ { &volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ...}
+ };
+}
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
@@ -1308,40 +1330,26 @@
static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
{
- switch (channels) {
- case 1:
- volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 2:
- volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 3:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 4:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 5:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 6:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 7:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 8:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
+ static constexpr auto volumeRampMultiArray =
+ makeVRMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+ if (channels > 0 && channels <= volumeRampMultiArray.size()) {
+ volumeRampMultiArray[channels - 1](out, frameCount, in, aux, vol, volinc, vola, volainc);
+ } else {
+ ALOGE("%s: invalid channel count:%d", __func__, channels);
}
}
+// Helper to make a functional array from volumeMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+ std::size_t ... Is>
+static constexpr auto makeVMArray(std::index_sequence<Is...>)
+{
+ using F = void(*)(TO*, size_t, const TI*, TA*, const TV*, TAV);
+ return std::array<F, sizeof...(Is)>{
+ { &volumeMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ... }
+ };
+}
+
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
@@ -1352,31 +1360,12 @@
static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
const TI* in, TA* aux, const TV *vol, TAV vola)
{
- switch (channels) {
- case 1:
- volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
- break;
- case 2:
- volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
- break;
- case 3:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
- break;
- case 4:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
- break;
- case 5:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
- break;
- case 6:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
- break;
- case 7:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
- break;
- case 8:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
- break;
+ static constexpr auto volumeMultiArray =
+ makeVMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+ if (channels > 0 && channels <= volumeMultiArray.size()) {
+ volumeMultiArray[channels - 1](out, frameCount, in, aux, vol, vola);
+ } else {
+ ALOGE("%s: invalid channel count:%d", __func__, channels);
}
}
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 8d374c9..cd47dc6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_MIXER_OPS_H
#define ANDROID_AUDIO_MIXER_OPS_H
+#include <system/audio.h>
+
namespace android {
// Hack to make static_assert work in a constexpr
@@ -231,7 +233,7 @@
typename TO, typename TI, typename TV,
typename F>
void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
- static_assert(NCHAN > 0 && NCHAN <= 8);
+ static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
|| MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
|| MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -291,6 +293,16 @@
// NCHAN == 8
proc(*out++, f(inp(), vol[0])); // side left
proc(*out++, f(inp(), vol[1])); // side right
+ if constexpr (NCHAN > FCC_8) {
+ // Mutes to zero extended surround channels.
+ // 7.1.4 has the correct behavior.
+ // 22.2 has the behavior that FLC and FRC will be mixed instead
+ // of SL and SR and LFE will be center, not left.
+ for (int i = 8; i < NCHAN; ++i) {
+ // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
+ proc(*out++, f(inp(), 0.f));
+ }
+ }
}
/*
diff --git a/media/libaudioprocessing/AudioResampler.cpp b/media/libaudioprocessing/AudioResampler.cpp
index c761b38..51673d7 100644
--- a/media/libaudioprocessing/AudioResampler.cpp
+++ b/media/libaudioprocessing/AudioResampler.cpp
@@ -268,7 +268,7 @@
mPhaseFraction(0),
mQuality(quality) {
- const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
+ const int maxChannels = quality < DYN_LOW_QUALITY ? FCC_2 : FCC_LIMIT;
if (inChannelCount < 1
|| inChannelCount > maxChannels) {
LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels",
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
index 1aacfd1..2292b19 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -545,64 +545,76 @@
// Note: A stride of 2 is achieved with non-SIMD processing.
int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2;
LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more");
- LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > 8,
- "Resampler channels(%d) must be between 1 to 8", mChannelCount);
+ LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > FCC_LIMIT,
+ "Resampler channels(%d) must be between 1 to %d", mChannelCount, FCC_LIMIT);
// stride 16 (falls back to stride 2 for machines that do not support NEON)
+
+
+// For now use a #define as a compiler generated function table requires renaming.
+#pragma push_macro("AUDIORESAMPLERDYN_CASE")
+#undef AUDIORESAMPLERDYN_CASE
+#define AUDIORESAMPLERDYN_CASE(CHANNEL, LOCKED) \
+ case CHANNEL: if constexpr (CHANNEL <= FCC_LIMIT) {\
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<CHANNEL, LOCKED, 16>; \
+ } break
+
if (locked) {
switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>;
- break;
+ AUDIORESAMPLERDYN_CASE(1, true);
+ AUDIORESAMPLERDYN_CASE(2, true);
+ AUDIORESAMPLERDYN_CASE(3, true);
+ AUDIORESAMPLERDYN_CASE(4, true);
+ AUDIORESAMPLERDYN_CASE(5, true);
+ AUDIORESAMPLERDYN_CASE(6, true);
+ AUDIORESAMPLERDYN_CASE(7, true);
+ AUDIORESAMPLERDYN_CASE(8, true);
+ AUDIORESAMPLERDYN_CASE(9, true);
+ AUDIORESAMPLERDYN_CASE(10, true);
+ AUDIORESAMPLERDYN_CASE(11, true);
+ AUDIORESAMPLERDYN_CASE(12, true);
+ AUDIORESAMPLERDYN_CASE(13, true);
+ AUDIORESAMPLERDYN_CASE(14, true);
+ AUDIORESAMPLERDYN_CASE(15, true);
+ AUDIORESAMPLERDYN_CASE(16, true);
+ AUDIORESAMPLERDYN_CASE(17, true);
+ AUDIORESAMPLERDYN_CASE(18, true);
+ AUDIORESAMPLERDYN_CASE(19, true);
+ AUDIORESAMPLERDYN_CASE(20, true);
+ AUDIORESAMPLERDYN_CASE(21, true);
+ AUDIORESAMPLERDYN_CASE(22, true);
+ AUDIORESAMPLERDYN_CASE(23, true);
+ AUDIORESAMPLERDYN_CASE(24, true);
}
} else {
switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>;
- break;
+ AUDIORESAMPLERDYN_CASE(1, false);
+ AUDIORESAMPLERDYN_CASE(2, false);
+ AUDIORESAMPLERDYN_CASE(3, false);
+ AUDIORESAMPLERDYN_CASE(4, false);
+ AUDIORESAMPLERDYN_CASE(5, false);
+ AUDIORESAMPLERDYN_CASE(6, false);
+ AUDIORESAMPLERDYN_CASE(7, false);
+ AUDIORESAMPLERDYN_CASE(8, false);
+ AUDIORESAMPLERDYN_CASE(9, false);
+ AUDIORESAMPLERDYN_CASE(10, false);
+ AUDIORESAMPLERDYN_CASE(11, false);
+ AUDIORESAMPLERDYN_CASE(12, false);
+ AUDIORESAMPLERDYN_CASE(13, false);
+ AUDIORESAMPLERDYN_CASE(14, false);
+ AUDIORESAMPLERDYN_CASE(15, false);
+ AUDIORESAMPLERDYN_CASE(16, false);
+ AUDIORESAMPLERDYN_CASE(17, false);
+ AUDIORESAMPLERDYN_CASE(18, false);
+ AUDIORESAMPLERDYN_CASE(19, false);
+ AUDIORESAMPLERDYN_CASE(20, false);
+ AUDIORESAMPLERDYN_CASE(21, false);
+ AUDIORESAMPLERDYN_CASE(22, false);
+ AUDIORESAMPLERDYN_CASE(23, false);
+ AUDIORESAMPLERDYN_CASE(24, false);
}
}
+#pragma pop_macro("AUDIORESAMPLERDYN_CASE")
+
#ifdef DEBUG_RESAMPLER
printf("channels:%d %s stride:%d %s coef:%d shift:%d\n",
mChannelCount, locked ? "locked" : "interpolated",
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
index cf84b83..3419816 100644
--- a/media/libaudioprocessing/include/media/AudioMixerBase.h
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -45,8 +45,7 @@
{
public:
// Do not change these unless underlying code changes.
- // This mixer has a hard-coded upper limit of 8 channels for output.
- static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+ static constexpr uint32_t MAX_NUM_CHANNELS = FCC_LIMIT;
static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
static const uint16_t UNITY_GAIN_INT = 0x1000;
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index f838892..1551e33 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -157,7 +157,7 @@
if (pConfig->inputCfg.format != pConfig->outputCfg.format) return -EINVAL;
const uint32_t channelCount = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
#ifdef SUPPORT_MC
- if (channelCount < 1 || channelCount > FCC_8) return -EINVAL;
+ if (channelCount < 1 || channelCount > FCC_LIMIT) return -EINVAL;
#else
if (channelCount != FCC_2) return -EINVAL;
#endif
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index a8350ea..d597a4d 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -308,6 +308,17 @@
switch (uid) {
case AID_RADIO: // telephony subsystem, RIL
return false;
+ default:
+ // Some isolated processes can access the audio system; see
+ // AudioSystem::setAudioFlingerBinder (currently only the HotwordDetectionService). Instead
+ // of also allowing access to the MediaMetrics service, it's simpler to just disable it for
+ // now.
+ // TODO(b/190151205): Either allow the HotwordDetectionService to access MediaMetrics or
+ // make this disabling specific to that process.
+ if (uid >= AID_ISOLATED_START && uid <= AID_ISOLATED_END) {
+ return false;
+ }
+ break;
}
int enabled = property_get_int32(Item::EnabledProperty, -1);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8fa7463..1986272 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -976,6 +976,10 @@
return "BufferDecoded";
case TunnelPeekState::kBufferRendered:
return "BufferRendered";
+ case TunnelPeekState::kDisabledQueued:
+ return "DisabledQueued";
+ case TunnelPeekState::kEnabledQueued:
+ return "EnabledQueued";
default:
return default_string;
}
@@ -986,25 +990,39 @@
if (!msg->findInt32("tunnel-peek", &tunnelPeek)){
return;
}
+
+ TunnelPeekState previousState = mTunnelPeekState;
if(tunnelPeek == 0){
- if (mTunnelPeekState == TunnelPeekState::kEnabledNoBuffer) {
- mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kEnabledNoBuffer),
- asString(TunnelPeekState::kDisabledNoBuffer));
- return;
+ switch (mTunnelPeekState) {
+ case TunnelPeekState::kEnabledNoBuffer:
+ mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
+ break;
+ case TunnelPeekState::kEnabledQueued:
+ mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+ break;
+ default:
+ ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ return;
}
} else {
- if (mTunnelPeekState == TunnelPeekState::kDisabledNoBuffer) {
- mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kDisabledNoBuffer),
- asString(TunnelPeekState::kEnabledNoBuffer));
- return;
+ switch (mTunnelPeekState) {
+ case TunnelPeekState::kDisabledNoBuffer:
+ mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
+ break;
+ case TunnelPeekState::kDisabledQueued:
+ mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+ break;
+ case TunnelPeekState::kBufferDecoded:
+ msg->setInt32("android._trigger-tunnel-peek", 1);
+ mTunnelPeekState = TunnelPeekState::kBufferRendered;
+ break;
+ default:
+ ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ return;
}
}
- ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ ALOGV("TunnelPeekState: %s -> %s", asString(previousState), asString(mTunnelPeekState));
}
void MediaCodec::updatePlaybackDuration(const sp<AMessage> &msg) {
@@ -3294,25 +3312,32 @@
if (mState != STARTED) {
break;
}
+ TunnelPeekState previousState = mTunnelPeekState;
switch(mTunnelPeekState) {
case TunnelPeekState::kDisabledNoBuffer:
+ case TunnelPeekState::kDisabledQueued:
mTunnelPeekState = TunnelPeekState::kBufferDecoded;
+ ALOGV("First tunnel frame ready");
ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kDisabledNoBuffer),
- asString(TunnelPeekState::kBufferDecoded));
+ asString(previousState),
+ asString(mTunnelPeekState));
break;
case TunnelPeekState::kEnabledNoBuffer:
- mTunnelPeekState = TunnelPeekState::kBufferDecoded;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kEnabledNoBuffer),
- asString(TunnelPeekState::kBufferDecoded));
+ case TunnelPeekState::kEnabledQueued:
{
sp<AMessage> parameters = new AMessage();
parameters->setInt32("android._trigger-tunnel-peek", 1);
mCodec->signalSetParameters(parameters);
}
+ mTunnelPeekState = TunnelPeekState::kBufferRendered;
+ ALOGV("First tunnel frame ready");
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
break;
default:
+ ALOGV("Ignoring first tunnel frame ready, TunnelPeekState: %s",
+ asString(mTunnelPeekState));
break;
}
@@ -4777,6 +4802,28 @@
buffer->meta()->setInt32("csd", true);
}
+ if (mTunneled) {
+ TunnelPeekState previousState = mTunnelPeekState;
+ switch(mTunnelPeekState){
+ case TunnelPeekState::kEnabledNoBuffer:
+ buffer->meta()->setInt32("tunnel-first-frame", 1);
+ mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
+ break;
+ case TunnelPeekState::kDisabledNoBuffer:
+ buffer->meta()->setInt32("tunnel-first-frame", 1);
+ mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
+ break;
+ default:
+ break;
+ }
+ }
+
status_t err = OK;
if (hasCryptoOrDescrambler() && !c2Buffer && !memory) {
AString *errorDetailMsg;
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index a15a988..a32bc26 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -183,7 +183,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
- <Limit name="size" min="2x2" max="1920x1080" />
+ <Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-16384" />
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 0e6f0b3..d372140 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -377,15 +377,23 @@
// This type is used to track the tunnel mode video peek state machine:
//
// DisabledNoBuffer -> EnabledNoBuffer when tunnel-peek = true
+ // DisabledQueued -> EnabledQueued when tunnel-peek = true
+ // DisabledNoBuffer -> DisabledQueued when first frame queued
// EnabledNoBuffer -> DisabledNoBuffer when tunnel-peek = false
+ // EnabledQueued -> DisabledQueued when tunnel-peek = false
+ // EnabledNoBuffer -> EnabledQueued when first frame queued
// DisabledNoBuffer -> BufferDecoded when kWhatFirstTunnelFrameReady
+ // DisabledQueued -> BufferDecoded when kWhatFirstTunnelFrameReady
// EnabledNoBuffer -> BufferDecoded when kWhatFirstTunnelFrameReady
+ // EnabledQueued -> BufferDecoded when kWhatFirstTunnelFrameReady
// BufferDecoded -> BufferRendered when kWhatFrameRendered
// <all states> -> EnabledNoBuffer when flush
// <all states> -> EnabledNoBuffer when stop then configure then start
enum struct TunnelPeekState {
kDisabledNoBuffer,
kEnabledNoBuffer,
+ kDisabledQueued,
+ kEnabledQueued,
kBufferDecoded,
kBufferRendered,
};
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index bc413d1..9c7b863 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -215,14 +215,17 @@
}
bool captureHotwordAllowed(const AttributionSourceState& attributionSource) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
- uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
// CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
bool ok = recordingAllowed(attributionSource);
if (ok) {
static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
- ok = PermissionCache::checkPermission(sCaptureHotwordAllowed, pid, uid);
+ // Use PermissionChecker, which includes some logic for allowing the isolated
+ // HotwordDetectionService to hold certain permissions.
+ permission::PermissionChecker permissionChecker;
+ ok = (permissionChecker.checkPermissionForPreflight(
+ sCaptureHotwordAllowed, attributionSource, String16(),
+ AppOpsManager::OP_NONE) != permission::PermissionChecker::PERMISSION_HARD_DENIED);
}
if (!ok) ALOGV("android.permission.CAPTURE_AUDIO_HOTWORD");
return ok;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index a89088a..54a6425 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2896,8 +2896,8 @@
audio_is_linear_pcm(config->format) &&
audio_is_linear_pcm(halconfig.format) &&
(halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
- (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_8) &&
- (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
+ (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_LIMIT) &&
+ (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_LIMIT)) {
// FIXME describe the change proposed by HAL (save old values so we can log them here)
ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
inStream.clear();
@@ -3996,7 +3996,7 @@
// if the move request is not received from audio policy manager, the effect must be
// re-registered with the new strategy and output
if (dstChain == 0) {
- dstChain = effect->callback()->chain().promote();
+ dstChain = effect->getCallback()->chain().promote();
if (dstChain == 0) {
ALOGW("moveEffectChain_l() cannot get chain from effect %p", effect.get());
status = NO_INIT;
@@ -4046,7 +4046,7 @@
goto Exit;
}
- dstChain = effect->callback()->chain().promote();
+ dstChain = effect->getCallback()->chain().promote();
if (dstChain == 0) {
thread->addEffect_l(effect);
status = INVALID_OPERATION;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index d75b13b..d3492d9 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -152,12 +152,12 @@
if (fromHandle) {
if (enabled) {
if (status != NO_ERROR) {
- mCallback->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
+ getCallback()->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
} else {
- mCallback->onEffectEnable(this);
+ getCallback()->onEffectEnable(this);
}
} else {
- mCallback->onEffectDisable(this);
+ getCallback()->onEffectDisable(this);
}
}
return status;
@@ -247,8 +247,9 @@
doRegister = true;
mPolicyRegistered = mHandles.size() > 0;
if (mPolicyRegistered) {
- io = mCallback->io();
- strategy = mCallback->strategy();
+ const auto callback = getCallback();
+ io = callback->io();
+ strategy = callback->strategy();
}
}
// enable effect when registered according to enable state requested by controlling handle
@@ -349,8 +350,9 @@
// unsafe method called when the effect parent thread has been destroyed
ssize_t AudioFlinger::EffectBase::disconnectHandle(EffectHandle *handle, bool unpinIfLast)
{
+ const auto callback = getCallback();
ALOGV("disconnect() %p handle %p", this, handle);
- if (mCallback->disconnectEffectHandle(handle, unpinIfLast)) {
+ if (callback->disconnectEffectHandle(handle, unpinIfLast)) {
return mHandles.size();
}
@@ -358,7 +360,7 @@
ssize_t numHandles = removeHandle_l(handle);
if ((numHandles == 0) && (!mPinned || unpinIfLast)) {
mLock.unlock();
- mCallback->updateOrphanEffectChains(this);
+ callback->updateOrphanEffectChains(this);
mLock.lock();
}
return numHandles;
@@ -377,7 +379,7 @@
}
void AudioFlinger::EffectBase::checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) {
- mCallback->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
+ getCallback()->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
}
static String8 effectFlagsToString(uint32_t flags) {
@@ -835,7 +837,7 @@
mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
// If an insert effect is idle and input buffer is different from output buffer,
// accumulate input onto output
- if (mCallback->activeTrackCnt() != 0) {
+ if (getCallback()->activeTrackCnt() != 0) {
// similar handling with data_bypass above.
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
accumulateInputToOutput();
@@ -860,6 +862,7 @@
status_t status;
uint32_t size;
audio_channel_mask_t channelMask;
+ sp<EffectCallbackInterface> callback;
if (mEffectInterface == 0) {
status = NO_INIT;
@@ -870,7 +873,8 @@
// TODO: handle configuration of input (record) SW effects above the HAL,
// similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
// in which case input channel masks should be used here.
- channelMask = mCallback->channelMask();
+ callback = getCallback();
+ channelMask = callback->channelMask();
mConfig.inputCfg.channels = channelMask;
mConfig.outputCfg.channels = channelMask;
@@ -899,7 +903,7 @@
#endif
}
if (isHapticGenerator()) {
- audio_channel_mask_t hapticChannelMask = mCallback->hapticChannelMask();
+ audio_channel_mask_t hapticChannelMask = callback->hapticChannelMask();
mConfig.inputCfg.channels |= hapticChannelMask;
mConfig.outputCfg.channels |= hapticChannelMask;
}
@@ -912,11 +916,11 @@
mConfig.outputCfg.format = EFFECT_BUFFER_FORMAT;
// Don't use sample rate for thread if effect isn't offloadable.
- if (mCallback->isOffloadOrDirect() && !isOffloaded()) {
+ if (callback->isOffloadOrDirect() && !isOffloaded()) {
mConfig.inputCfg.samplingRate = DEFAULT_OUTPUT_SAMPLE_RATE;
ALOGV("Overriding effect input as 48kHz");
} else {
- mConfig.inputCfg.samplingRate = mCallback->sampleRate();
+ mConfig.inputCfg.samplingRate = callback->sampleRate();
}
mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
mConfig.inputCfg.bufferProvider.cookie = NULL;
@@ -942,11 +946,11 @@
}
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.inputCfg.buffer.frameCount = mCallback->frameCount();
+ mConfig.inputCfg.buffer.frameCount = callback->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
ALOGV("configure() %p chain %p buffer %p framecount %zu",
- this, mCallback->chain().promote().get(),
+ this, callback->chain().promote().get(),
mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
@@ -962,7 +966,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
- mCallback->isOutput() &&
+ callback->isOutput() &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
// Older effects may require exact STEREO position mask.
@@ -1029,7 +1033,7 @@
size = sizeof(int);
*(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
- uint32_t latency = mCallback->latency();
+ uint32_t latency = callback->latency();
*((int32_t *)p->data + 1)= latency;
mEffectInterface->command(EFFECT_CMD_SET_PARAM,
@@ -1076,7 +1080,7 @@
{
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- (void)mCallback->addEffectToHal(mEffectInterface);
+ (void)getCallback()->addEffectToHal(mEffectInterface);
}
}
@@ -1089,7 +1093,7 @@
status = start_l();
}
if (status == NO_ERROR) {
- mCallback->resetVolume();
+ getCallback()->resetVolume();
}
return status;
}
@@ -1139,7 +1143,7 @@
// We have the EffectChain and EffectModule lock, permit a reentrant call to setVolume:
// resetVolume_l --> setVolume_l --> EffectModule::setVolume
mSetVolumeReentrantTid = gettid();
- mCallback->resetVolume();
+ getCallback()->resetVolume();
mSetVolumeReentrantTid = INVALID_PID;
}
@@ -1172,7 +1176,7 @@
{
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- mCallback->removeEffectFromHal(mEffectInterface);
+ getCallback()->removeEffectFromHal(mEffectInterface);
}
return NO_ERROR;
}
@@ -1288,7 +1292,7 @@
bool AudioFlinger::EffectModule::isOffloadedOrDirect() const
{
- return mCallback->isOffloadOrDirect();
+ return getCallback()->isOffloadOrDirect();
}
bool AudioFlinger::EffectModule::isVolumeControlEnabled() const
@@ -1332,7 +1336,7 @@
|| size > mInConversionBuffer->getSize())) {
mInConversionBuffer.clear();
ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
- (void)mCallback->allocateHalBuffer(size, &mInConversionBuffer);
+ (void)getCallback()->allocateHalBuffer(size, &mInConversionBuffer);
}
if (mInConversionBuffer != nullptr) {
mInConversionBuffer->setFrameCount(inFrameCount);
@@ -1376,7 +1380,7 @@
|| size > mOutConversionBuffer->getSize())) {
mOutConversionBuffer.clear();
ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
- (void)mCallback->allocateHalBuffer(size, &mOutConversionBuffer);
+ (void)getCallback()->allocateHalBuffer(size, &mOutConversionBuffer);
}
if (mOutConversionBuffer != nullptr) {
mOutConversionBuffer->setFrameCount(outFrameCount);
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 9da95bc..661881e 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -138,8 +138,9 @@
int32_t __unused,
std::vector<uint8_t>* __unused) { return NO_ERROR; };
+ // mCallback is atomic so this can be lock-free.
void setCallback(const sp<EffectCallbackInterface>& callback) { mCallback = callback; }
- sp<EffectCallbackInterface>& callback() { return mCallback; }
+ sp<EffectCallbackInterface> getCallback() const { return mCallback.load(); }
status_t addHandle(EffectHandle *handle);
ssize_t disconnectHandle(EffectHandle *handle, bool unpinIfLast);
@@ -170,7 +171,7 @@
DISALLOW_COPY_AND_ASSIGN(EffectBase);
mutable Mutex mLock; // mutex for process, commands and handles list protection
- sp<EffectCallbackInterface> mCallback; // parent effect chain
+ mediautils::atomic_sp<EffectCallbackInterface> mCallback; // parent effect chain
const int mId; // this instance unique ID
const audio_session_t mSessionId; // audio session ID
const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index d6d6e25..2963202 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -107,7 +107,7 @@
mSampleRate = Format_sampleRate(mFormat);
#if !LOG_NDEBUG
unsigned channelCount = Format_channelCount(mFormat);
- ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+ ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_LIMIT);
#endif
}
dumpState->mSampleRate = mSampleRate;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 13e2ced..88d4eaf 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -48,6 +48,15 @@
/*static*/ const FastMixerState FastMixer::sInitial;
+static audio_channel_mask_t getChannelMaskFromCount(size_t count) {
+ const audio_channel_mask_t mask = audio_channel_out_mask_from_count(count);
+ if (mask == AUDIO_CHANNEL_INVALID) {
+ // some counts have no positional masks. TODO: Update this to return index count?
+ return audio_channel_mask_for_index_assignment_from_count(count);
+ }
+ return mask;
+}
+
FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
: FastThread("cycle_ms", "load_us"),
// mFastTrackNames
@@ -79,7 +88,7 @@
mDummyDumpState = &mDummyFastMixerDumpState;
// TODO: Add channel mask to NBAIO_Format.
// We assume that the channel mask must be a valid positional channel mask.
- mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
unsigned i;
for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -238,7 +247,7 @@
LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
if (mSinkChannelMask == AUDIO_CHANNEL_NONE) {
- mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
}
mAudioChannelCount = mSinkChannelCount - audio_channel_count_from_out_mask(
mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 47b4b18..f62082e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1638,7 +1638,7 @@
detachAuxEffect_l(effect->id());
}
- sp<EffectChain> chain = effect->callback()->chain().promote();
+ sp<EffectChain> chain = effect->getCallback()->chain().promote();
if (chain != 0) {
// remove effect chain if removing last effect
if (chain->removeEffect_l(effect, release) == 0) {
@@ -8573,7 +8573,7 @@
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
audio_channel_mask_t mask = (audio_channel_mask_t) value;
if (!audio_is_input_channel(mask) ||
- audio_channel_count_from_in_mask(mask) > FCC_8) {
+ audio_channel_count_from_in_mask(mask) > FCC_LIMIT) {
status = BAD_VALUE;
} else {
channelMask = mask;
@@ -8610,7 +8610,7 @@
if (mInput->stream->getAudioProperties(&config) == OK &&
audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
- audio_channel_count_from_in_mask(config.channel_mask) <= FCC_8) {
+ audio_channel_count_from_in_mask(config.channel_mask) <= FCC_LIMIT) {
status = NO_ERROR;
}
}
@@ -8672,10 +8672,10 @@
mFormat = mHALFormat;
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
if (audio_is_linear_pcm(mFormat)) {
- LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
- mChannelCount, FCC_8);
+ LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
+ mChannelCount, FCC_LIMIT);
} else {
- // Can have more that FCC_8 channels in encoded streams.
+ // Can have more that FCC_LIMIT channels in encoded streams.
ALOGI("HAL format %#x is not linear pcm", mFormat);
}
result = mInput->stream->getFrameSize(&mFrameSize);
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 552919d..577f641 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -42,7 +42,7 @@
// For mixed output and inputs, the policy will use max mixer channel count.
// Do not limit channel count otherwise
-#define MAX_MIXER_CHANNEL_COUNT FCC_8
+#define MAX_MIXER_CHANNEL_COUNT FCC_LIMIT
/**
* Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 1c86051..81e803f 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -139,11 +139,24 @@
Collection &collection);
};
-using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
+template <class T>
+constexpr void (*xmlDeleter)(T* t);
+template <>
+constexpr auto xmlDeleter<xmlDoc> = xmlFreeDoc;
+template <>
+constexpr auto xmlDeleter<xmlChar> = [](xmlChar *s) { xmlFree(s); };
+
+/** @return a unique_ptr with the correct deleter for the libxml2 object. */
+template <class T>
+constexpr auto make_xmlUnique(T *t) {
+ // Wrap deleter in lambda to enable empty base optimization
+ auto deleter = [](T *t) { xmlDeleter<T>(t); };
+ return std::unique_ptr<T, decltype(deleter)>{t, deleter};
+}
std::string getXmlAttribute(const xmlNode *cur, const char *attribute)
{
- xmlCharUnique charPtr(xmlGetProp(cur, reinterpret_cast<const xmlChar *>(attribute)), xmlFree);
+ auto charPtr = make_xmlUnique(xmlGetProp(cur, reinterpret_cast<const xmlChar *>(attribute)));
if (charPtr == NULL) {
return "";
}
@@ -441,7 +454,7 @@
for (const xmlNode *child = referenceName.empty() ?
root->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
- xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ auto pointXml = make_xmlUnique(xmlNodeListGetString(doc, child->xmlChildrenNode, 1));
if (pointXml == NULL) {
return BAD_VALUE;
}
@@ -471,14 +484,14 @@
for (const xmlNode *child = root->xmlChildrenNode; child != NULL; child = child->next) {
if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::name)) {
- xmlCharUnique nameXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ auto nameXml = make_xmlUnique(xmlNodeListGetString(doc, child->xmlChildrenNode, 1));
if (nameXml == nullptr) {
return BAD_VALUE;
}
name = reinterpret_cast<const char*>(nameXml.get());
}
if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMin)) {
- xmlCharUnique indexMinXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ auto indexMinXml = make_xmlUnique(xmlNodeListGetString(doc, child->xmlChildrenNode, 1));
if (indexMinXml == nullptr) {
return BAD_VALUE;
}
@@ -488,7 +501,7 @@
}
}
if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMax)) {
- xmlCharUnique indexMaxXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ auto indexMaxXml = make_xmlUnique(xmlNodeListGetString(doc, child->xmlChildrenNode, 1));
if (indexMaxXml == nullptr) {
return BAD_VALUE;
}
@@ -548,7 +561,7 @@
for (const xmlNode *child = referenceName.empty() ?
cur->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
if (!xmlStrcmp(child->name, (const xmlChar *)VolumeTraits::volumePointTag)) {
- xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ auto pointXml = make_xmlUnique(xmlNodeListGetString(doc, child->xmlChildrenNode, 1));
if (pointXml == NULL) {
return BAD_VALUE;
}
@@ -640,8 +653,7 @@
ParsingResult parse(const char* path) {
XmlErrorHandler errorHandler;
- xmlDocPtr doc;
- doc = xmlParseFile(path);
+ auto doc = make_xmlUnique(xmlParseFile(path));
if (doc == NULL) {
// It is OK not to find an engine config file at the default location
// as the caller will default to hardcoded default config
@@ -650,13 +662,12 @@
}
return {nullptr, 0};
}
- xmlNodePtr cur = xmlDocGetRootElement(doc);
+ xmlNodePtr cur = xmlDocGetRootElement(doc.get());
if (cur == NULL) {
ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
- xmlFreeDoc(doc);
return {nullptr, 0};
}
- if (xmlXIncludeProcess(doc) < 0) {
+ if (xmlXIncludeProcess(doc.get()) < 0) {
ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
return {nullptr, 0};
}
@@ -669,37 +680,35 @@
auto config = std::make_unique<Config>();
config->version = std::stof(version);
deserializeCollection<ProductStrategyTraits>(
- doc, cur, config->productStrategies, nbSkippedElements);
+ doc.get(), cur, config->productStrategies, nbSkippedElements);
deserializeCollection<CriterionTraits>(
- doc, cur, config->criteria, nbSkippedElements);
+ doc.get(), cur, config->criteria, nbSkippedElements);
deserializeCollection<CriterionTypeTraits>(
- doc, cur, config->criterionTypes, nbSkippedElements);
+ doc.get(), cur, config->criterionTypes, nbSkippedElements);
deserializeCollection<VolumeGroupTraits>(
- doc, cur, config->volumeGroups, nbSkippedElements);
+ doc.get(), cur, config->volumeGroups, nbSkippedElements);
return {std::move(config), nbSkippedElements};
}
android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
XmlErrorHandler errorHandler;
- xmlDocPtr doc;
- doc = xmlParseFile(path);
+ auto doc = make_xmlUnique(xmlParseFile(path));
if (doc == NULL) {
ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
return BAD_VALUE;
}
- xmlNodePtr cur = xmlDocGetRootElement(doc);
+ xmlNodePtr cur = xmlDocGetRootElement(doc.get());
if (cur == NULL) {
ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
- xmlFreeDoc(doc);
return BAD_VALUE;
}
- if (xmlXIncludeProcess(doc) < 0) {
+ if (xmlXIncludeProcess(doc.get()) < 0) {
ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
return BAD_VALUE;
}
size_t nbSkippedElements = 0;
- return deserializeLegacyVolumeCollection(doc, cur, volumeGroups, nbSkippedElements);
+ return deserializeLegacyVolumeCollection(doc.get(), cur, volumeGroups, nbSkippedElements);
}
android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index cd50e21..201273e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -786,7 +786,7 @@
allowCapture = true;
}
}
- setAppState_l(current->portId,
+ setAppState_l(current,
allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(currentUid)) :
APP_STATE_IDLE);
}
@@ -796,7 +796,7 @@
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!isVirtualSource(current->attributes.source)) {
- setAppState_l(current->portId, APP_STATE_IDLE);
+ setAppState_l(current, APP_STATE_IDLE);
}
}
}
@@ -830,17 +830,32 @@
return false;
}
-void AudioPolicyService::setAppState_l(audio_port_handle_t portId, app_state_t state)
+void AudioPolicyService::setAppState_l(sp<AudioRecordClient> client, app_state_t state)
{
AutoCallerClear acc;
if (mAudioPolicyManager) {
- mAudioPolicyManager->setAppState(portId, state);
+ mAudioPolicyManager->setAppState(client->portId, state);
}
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af) {
bool silenced = state == APP_STATE_IDLE;
- af->setRecordSilenced(portId, silenced);
+ if (client->silenced != silenced) {
+ if (client->active) {
+ if (silenced) {
+ finishRecording(client->attributionSource, client->attributes.source);
+ } else {
+ std::stringstream msg;
+ msg << "Audio recording un-silenced on session " << client->session;
+ if (!startRecording(client->attributionSource, String16(msg.str().c_str()),
+ client->attributes.source)) {
+ silenced = true;
+ }
+ }
+ }
+ af->setRecordSilenced(client->portId, silenced);
+ client->silenced = silenced;
+ }
}
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 48da40c..ac9c20f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -324,8 +324,10 @@
// Handles binder shell commands
virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
+ class AudioRecordClient;
+
// Sets whether the given UID records only silence
- virtual void setAppState_l(audio_port_handle_t portId, app_state_t state) REQUIRES(mLock);
+ virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mLock);
// Overrides the UID state as if it is idle
status_t handleSetUidState(Vector<String16>& args, int err);
@@ -826,13 +828,14 @@
AudioClient(attributes, io, attributionSource,
session, portId, deviceId), attributionSource(attributionSource),
startTimeNs(0), canCaptureOutput(canCaptureOutput),
- canCaptureHotword(canCaptureHotword) {}
+ canCaptureHotword(canCaptureHotword), silenced(false) {}
~AudioRecordClient() override = default;
const AttributionSourceState attributionSource; // attribution source of client
nsecs_t startTimeNs;
const bool canCaptureOutput;
const bool canCaptureHotword;
+ bool silenced;
};
// --- AudioPlaybackClient ---
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 83d2bc9..334ecc0 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -4051,25 +4051,13 @@
ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, cameraId.string());
return;
}
- bool supportsHAL3 = false;
- // supportsCameraApi also holds mInterfaceMutex, we can't call it in the
- // HIDL onStatusChanged wrapper call (we'll hold mStatusListenerLock and
- // mInterfaceMutex together, which can lead to deadlocks)
- binder::Status sRet =
- supportsCameraApi(String16(cameraId), hardware::ICameraService::API_VERSION_2,
- &supportsHAL3);
- if (!sRet.isOk()) {
- ALOGW("%s: Failed to determine if device supports HAL3 %s, supportsCameraApi call failed",
- __FUNCTION__, cameraId.string());
- return;
- }
// Collect the logical cameras without holding mStatusLock in updateStatus
// as that can lead to a deadlock(b/162192331).
auto logicalCameraIds = getLogicalCameras(cameraId);
// Update the status for this camera state, then send the onStatusChangedCallbacks to each
// of the listeners with both the mStatusLock and mStatusListenerLock held
- state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3,
+ state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind,
&logicalCameraIds]
(const String8& cameraId, StatusInternal status) {
@@ -4097,8 +4085,8 @@
bool isVendorListener = listener->isVendorListener();
if (shouldSkipStatusUpdates(deviceKind, isVendorListener,
listener->getListenerPid(), listener->getListenerUid()) ||
- (isVendorListener && !supportsHAL3)) {
- ALOGV("Skipping discovery callback for system-only camera/HAL1 device %s",
+ isVendorListener) {
+ ALOGV("Skipping discovery callback for system-only camera device %s",
cameraId.c_str());
continue;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d05a2e1..4b042f7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -353,9 +353,15 @@
camera_metadata_entry_t availableTestPatternModes = mDeviceInfo.find(
ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES);
for (size_t i = 0; i < availableTestPatternModes.count; i++) {
- if (availableTestPatternModes.data.i32[i] == ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR) {
+ if (availableTestPatternModes.data.i32[i] ==
+ ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR) {
mSupportCameraMute = true;
+ mSupportTestPatternSolidColor = true;
break;
+ } else if (availableTestPatternModes.data.i32[i] ==
+ ANDROID_SENSOR_TEST_PATTERN_MODE_BLACK) {
+ mSupportCameraMute = true;
+ mSupportTestPatternSolidColor = false;
}
}
@@ -4163,7 +4169,7 @@
mCurrentAfTriggerId(0),
mCurrentPreCaptureTriggerId(0),
mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
- mCameraMute(false),
+ mCameraMute(ANDROID_SENSOR_TEST_PATTERN_MODE_OFF),
mCameraMuteChanged(false),
mRepeatingLastFrameNumber(
hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
@@ -5265,11 +5271,11 @@
return OK;
}
-status_t Camera3Device::RequestThread::setCameraMute(bool enabled) {
+status_t Camera3Device::RequestThread::setCameraMute(int32_t muteMode) {
ATRACE_CALL();
Mutex::Autolock l(mTriggerMutex);
- if (enabled != mCameraMute) {
- mCameraMute = enabled;
+ if (muteMode != mCameraMute) {
+ mCameraMute = muteMode;
mCameraMuteChanged = true;
}
return OK;
@@ -5844,8 +5850,8 @@
request->mOriginalTestPatternData[3]
};
- if (mCameraMute) {
- testPatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR;
+ if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
+ testPatternMode = mCameraMute;
testPatternData[0] = 0;
testPatternData[1] = 0;
testPatternData[2] = 0;
@@ -6535,7 +6541,11 @@
if (mRequestThread == nullptr || !mSupportCameraMute) {
return INVALID_OPERATION;
}
- return mRequestThread->setCameraMute(enabled);
+ int32_t muteMode =
+ !enabled ? ANDROID_SENSOR_TEST_PATTERN_MODE_OFF :
+ mSupportTestPatternSolidColor ? ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR :
+ ANDROID_SENSOR_TEST_PATTERN_MODE_BLACK;
+ return mRequestThread->setCameraMute(muteMode);
}
status_t Camera3Device::injectCamera(const String8& injectedCamId,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index f962c78..b27f1a5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -918,7 +918,7 @@
status_t setRotateAndCropAutoBehavior(
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
- status_t setCameraMute(bool enabled);
+ status_t setCameraMute(int32_t muteMode);
status_t setHalInterface(sp<HalInterface> newHalInterface);
@@ -1069,7 +1069,7 @@
uint32_t mCurrentAfTriggerId;
uint32_t mCurrentPreCaptureTriggerId;
camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
- bool mCameraMute;
+ int32_t mCameraMute; // 0 = no mute, otherwise the TEST_PATTERN_MODE to use
bool mCameraMuteChanged;
int64_t mRepeatingLastFrameNumber;
@@ -1342,6 +1342,8 @@
// Whether the HAL supports camera muting via test pattern
bool mSupportCameraMute = false;
+ // Whether the HAL supports SOLID_COLOR or BLACK if mSupportCameraMute is true
+ bool mSupportTestPatternSolidColor = false;
// Injection camera related methods.
class Camera3DeviceInjectionMethods : public virtual RefBase {
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 77e1c40..5b4129a 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -445,90 +445,118 @@
TunerFrontendCapabilities caps;
switch (halInfo.type) {
case FrontendType::ANALOG: {
- TunerFrontendAnalogCapabilities analogCaps{
- .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
- .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
- };
- caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAnalogCapabilities analogCaps{
+ .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
+ .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
+ };
+ caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+ }
break;
}
case FrontendType::ATSC: {
- TunerFrontendAtscCapabilities atscCaps{
- .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
- };
- caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAtscCapabilities atscCaps{
+ .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
+ };
+ caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+ }
break;
}
case FrontendType::ATSC3: {
- TunerFrontendAtsc3Capabilities atsc3Caps{
- .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
- .timeInterleaveModeCap =
- (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
- .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
- .demodOutputFormatCap = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
- .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
- };
- caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAtsc3Capabilities atsc3Caps{
+ .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
+ .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
+ .timeInterleaveModeCap =
+ (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
+ .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
+ .demodOutputFormatCap
+ = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
+ .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
+ };
+ caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+ }
break;
}
case FrontendType::DVBC: {
- TunerFrontendCableCapabilities cableCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
- .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
- .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
- };
- caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendCableCapabilities cableCaps{
+ .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
+ .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
+ .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
+ };
+ caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+ }
break;
}
case FrontendType::DVBS: {
- TunerFrontendDvbsCapabilities dvbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
- .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
- .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
- };
- caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendDvbsCapabilities dvbsCaps{
+ .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
+ .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
+ .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
+ };
+ caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+ }
break;
}
case FrontendType::DVBT: {
- TunerFrontendDvbtCapabilities dvbtCaps{
- .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
- .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
- .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
- .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
- .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
- .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
- };
- caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendDvbtCapabilities dvbtCaps{
+ .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
+ .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
+ .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
+ .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
+ .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
+ .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
+ .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+ };
+ caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+ }
break;
}
case FrontendType::ISDBS: {
- TunerFrontendIsdbsCapabilities isdbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbsCapabilities isdbsCaps{
+ .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+ }
break;
}
case FrontendType::ISDBS3: {
- TunerFrontendIsdbs3Capabilities isdbs3Caps{
- .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbs3Capabilities isdbs3Caps{
+ .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+ }
break;
}
case FrontendType::ISDBT: {
- TunerFrontendIsdbtCapabilities isdbtCaps{
- .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
- };
- caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbtCapabilities isdbtCaps{
+ .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
+ .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
+ .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
+ .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+ }
break;
}
default: