Merge "audio policy: fix voice volume update when BT SCO is active" into main
diff --git a/media/codec2/components/cmds/codec2.cpp b/media/codec2/components/cmds/codec2.cpp
index a17b04e..ca65aa2 100644
--- a/media/codec2/components/cmds/codec2.cpp
+++ b/media/codec2/components/cmds/codec2.cpp
@@ -46,7 +46,6 @@
#include <media/stagefright/Utils.h>
#include <gui/GLConsumer.h>
-#include <gui/IProducerListener.h>
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
@@ -91,7 +90,7 @@
std::shared_ptr<Listener> mListener;
std::shared_ptr<C2Component> mComponent;
- sp<IProducerListener> mProducerListener;
+ sp<SurfaceListener> mSurfaceListener;
std::atomic_int mLinearPoolId;
@@ -138,7 +137,7 @@
SimplePlayer::SimplePlayer()
: mListener(new Listener(this)),
- mProducerListener(new StubProducerListener),
+ mSurfaceListener(new StubSurfaceListener),
mLinearPoolId(C2BlockPool::PLATFORM_START),
mComposerClient(new SurfaceComposerClient) {
CHECK_EQ(mComposerClient->initCheck(), (status_t)OK);
@@ -164,7 +163,7 @@
mSurface = mControl->getSurface();
CHECK(mSurface != nullptr);
- mSurface->connect(NATIVE_WINDOW_API_CPU, mProducerListener);
+ mSurface->connect(NATIVE_WINDOW_API_CPU, mSurfaceListener);
}
SimplePlayer::~SimplePlayer() {
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index ae66a58..0aae23c 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -2627,7 +2627,7 @@
mChannel->setInfoBuffer(std::make_shared<C2InfoBuffer>(info));
}
} else {
- ALOGE("Ignoring param key %s as buffer size %d is less than expected "
+ ALOGE("Ignoring param key %s as buffer size %zu is less than expected "
"buffer size %d",
PARAMETER_KEY_QP_OFFSET_MAP, mapSize, expectedMapSize);
}
diff --git a/media/libaudioclient/tests/audioeffect_analyser.cpp b/media/libaudioclient/tests/audioeffect_analyser.cpp
index f4d37bc..199fb8b 100644
--- a/media/libaudioclient/tests/audioeffect_analyser.cpp
+++ b/media/libaudioclient/tests/audioeffect_analyser.cpp
@@ -62,6 +62,15 @@
constexpr int kNPointFFT = 16384;
constexpr float kBinWidth = (float)kSamplingFrequency / kNPointFFT;
+// frequency used to generate testing tone
+constexpr uint32_t kTestFrequency = 1400;
+
+// Tolerance of audio gain difference in dB, which is 10^(0.1/20) (around 1.0116%) difference in
+// amplitude
+constexpr float kAudioGainDiffTolerancedB = .1f;
+
+const std::string kDataTempPath = "/data/local/tmp";
+
const char* gPackageName = "AudioEffectAnalyser";
static_assert(kPrimeDurationInSec + 2 * kNPointFFT / kSamplingFrequency < kCaptureDurationSec,
@@ -177,21 +186,30 @@
return effect;
}
-void computeFilterGainsAtTones(float captureDuration, int nPointFft, std::vector<int>& binOffsets,
- float* inputMag, float* gaindB, const char* res,
- audio_session_t sessionId) {
+void computeFilterGainsAtTones(float captureDuration, int nPointFft, std::vector<int> binOffsets,
+ float* inputMag, float* gaindB, const std::string res,
+ audio_session_t sessionId, const std::string res2 = "",
+ audio_session_t sessionId2 = AUDIO_SESSION_NONE) {
int totalFrameCount = captureDuration * kSamplingFrequency;
auto output = pffft::AlignedVector<float>(totalFrameCount);
auto fftOutput = pffft::AlignedVector<float>(nPointFft);
- PlaybackEnv argsP;
- argsP.mRes = std::string{res};
+ PlaybackEnv argsP, argsP2;
+ argsP.mRes = res;
argsP.mSessionId = sessionId;
CaptureEnv argsR;
argsR.mCaptureDuration = captureDuration;
std::thread playbackThread(&PlaybackEnv::play, &argsP);
+ std::optional<std::thread> playbackThread2;
+ if (res2 != "") {
+ argsP2 = {.mSessionId = sessionId2, .mRes = res2};
+ playbackThread2 = std::thread(&PlaybackEnv::play, &argsP2);
+ }
std::thread captureThread(&CaptureEnv::capture, &argsR);
captureThread.join();
playbackThread.join();
+ if (playbackThread2 != std::nullopt) {
+ playbackThread2->join();
+ }
ASSERT_EQ(OK, argsR.mStatus) << argsR.mMsg;
ASSERT_EQ(OK, argsP.mStatus) << argsP.mMsg;
ASSERT_FALSE(argsR.mDumpFileName.empty()) << "recorded not written to file";
@@ -210,7 +228,11 @@
auto k = binOffsets[i];
auto outputMag = sqrt((fftOutput[k * 2] * fftOutput[k * 2]) +
(fftOutput[k * 2 + 1] * fftOutput[k * 2 + 1]));
- gaindB[i] = 20 * log10(outputMag / inputMag[i]);
+ if (inputMag == nullptr) {
+ gaindB[i] = 20 * log10(outputMag);
+ } else {
+ gaindB[i] = 20 * log10(outputMag / inputMag[i]);
+ }
}
}
@@ -282,7 +304,7 @@
inputMag[i] = sqrt((fftInput[k * 2] * fftInput[k * 2]) +
(fftInput[k * 2 + 1] * fftInput[k * 2 + 1]));
}
- TemporaryFile tf("/data/local/tmp");
+ TemporaryFile tf(kDataTempPath);
close(tf.release());
std::ofstream fout(tf.path, std::ios::out | std::ios::binary);
fout.write((char*)input.data(), input.size() * sizeof(input[0]));
@@ -386,7 +408,7 @@
inputMag[i] = sqrt((fftInput[k * 2] * fftInput[k * 2]) +
(fftInput[k * 2 + 1] * fftInput[k * 2 + 1]));
}
- TemporaryFile tf("/data/local/tmp");
+ TemporaryFile tf(kDataTempPath);
close(tf.release());
std::ofstream fout(tf.path, std::ios::out | std::ios::binary);
fout.write((char*)input.data(), input.size() * sizeof(input[0]));
@@ -396,7 +418,7 @@
memset(gainWithOutFilter, 0, sizeof(gainWithOutFilter));
ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(kCaptureDurationSec, kNPointFFT, binOffsets,
inputMag, gainWithOutFilter, tf.path,
- AUDIO_SESSION_OUTPUT_MIX));
+ AUDIO_SESSION_NONE));
float diffA = gainWithOutFilter[0] - gainWithOutFilter[1];
float prevGain = -100.f;
for (auto strength = 150; strength < 1000; strength += strengthSupported ? 150 : 1000) {
@@ -421,6 +443,56 @@
}
}
+// assert the silent audio session with effect does not override the output audio
+TEST(AudioEffectTest, SilentAudioEffectSessionNotOverrideOutput) {
+ audio_session_t sessionId =
+ (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ sp<AudioEffect> bassboost = createEffect(SL_IID_BASSBOOST, sessionId);
+ if ((bassboost->descriptor().flags & EFFECT_FLAG_HW_ACC_MASK) != 0) {
+ GTEST_SKIP() << "effect processed output inaccessible, skipping test";
+ }
+ ASSERT_EQ(OK, bassboost->initCheck());
+ ASSERT_EQ(NO_ERROR, bassboost->setEnabled(true));
+
+ const auto bin = roundToFreqCenteredToFftBin(kBinWidth, kTestFrequency);
+ const int binIndex = std::get<0 /* index */>(bin);
+ const int binFrequency = std::get<1 /* freq */>(bin);
+
+ const int totalFrameCount = kSamplingFrequency * kPlayBackDurationSec;
+ // input for effect module
+ auto silentAudio = pffft::AlignedVector<float>(totalFrameCount);
+ auto input = pffft::AlignedVector<float>(totalFrameCount);
+ generateMultiTone({binFrequency}, kSamplingFrequency, kPlayBackDurationSec, kDefAmplitude,
+ input.data(), totalFrameCount);
+ TemporaryFile tf(kDataTempPath);
+ close(tf.release());
+ std::ofstream fout(tf.path, std::ios::out | std::ios::binary);
+ fout.write((char*)input.data(), input.size() * sizeof(input[0]));
+ fout.close();
+
+ // play non-silent audio file on AUDIO_SESSION_NONE
+ float audioGain, audioPlusSilentEffectGain;
+ ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(kCaptureDurationSec, kNPointFFT, {binIndex},
+ nullptr, &audioGain, tf.path,
+ AUDIO_SESSION_NONE));
+ EXPECT_FALSE(std::isinf(audioGain)) << "output gain should not be -inf";
+
+ TemporaryFile silentFile(kDataTempPath);
+ close(silentFile.release());
+ std::ofstream fSilent(silentFile.path, std::ios::out | std::ios::binary);
+ fSilent.write((char*)silentAudio.data(), silentAudio.size() * sizeof(silentAudio[0]));
+ fSilent.close();
+ // play non-silent audio file on AUDIO_SESSION_NONE and silent audio on sessionId, expect
+ // the new output gain to be almost same as last playback
+ ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(
+ kCaptureDurationSec, kNPointFFT, {binIndex}, nullptr, &audioPlusSilentEffectGain,
+ tf.path, AUDIO_SESSION_NONE, silentFile.path, sessionId));
+ EXPECT_FALSE(std::isinf(audioPlusSilentEffectGain))
+ << "output might have been overwritten in effect accumulate mode";
+ EXPECT_NEAR(audioGain, audioPlusSilentEffectGain, kAudioGainDiffTolerancedB)
+ << " output gain should almost same with one more silent audio stream";
+}
+
int main(int argc, char** argv) {
android::ProcessState::self()->startThreadPool();
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/libaudioclient/tests/audioeffect_tests.cpp b/media/libaudioclient/tests/audioeffect_tests.cpp
index 59d0c6a..791319e 100644
--- a/media/libaudioclient/tests/audioeffect_tests.cpp
+++ b/media/libaudioclient/tests/audioeffect_tests.cpp
@@ -556,8 +556,9 @@
ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
EXPECT_EQ(NO_ERROR, playback->create());
EXPECT_EQ(NO_ERROR, playback->start());
- EXPECT_TRUE(isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
- kDefaultOutputEffectPriority - 1, sessionId))
+ ASSERT_EQ(ALREADY_EXISTS,
+ isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+ kDefaultOutputEffectPriority - 1, sessionId))
<< "Effect should have been added. " << type;
EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
playback->stop();
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 740bf60..2753906 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -42,6 +42,8 @@
using ::aidl::android::hardware::audio::effect::Descriptor;
using ::aidl::android::hardware::audio::effect::IFactory;
using ::aidl::android::hardware::audio::effect::Processing;
+using ::aidl::android::media::audio::common::AudioDevice;
+using ::aidl::android::media::audio::common::AudioDeviceAddress;
using ::aidl::android::media::audio::common::AudioSource;
using ::aidl::android::media::audio::common::AudioStreamType;
using ::aidl::android::media::audio::common::AudioUuid;
@@ -281,7 +283,8 @@
auto getConfigProcessingWithAidlProcessing =
[&](const auto& aidlProcess, std::vector<effectsConfig::InputStream>& preprocess,
- std::vector<effectsConfig::OutputStream>& postprocess) {
+ std::vector<effectsConfig::OutputStream>& postprocess,
+ std::vector<effectsConfig::DeviceEffects>& deviceprocess) {
if (aidlProcess.type.getTag() == Processing::Type::streamType) {
AudioStreamType aidlType =
aidlProcess.type.template get<Processing::Type::streamType>();
@@ -313,6 +316,25 @@
effectsConfig::InputStream stream = {.type = type.value(),
.effects = std::move(effects)};
preprocess.emplace_back(stream);
+ } else if (aidlProcess.type.getTag() == Processing::Type::device) {
+ AudioDevice aidlDevice =
+ aidlProcess.type.template get<Processing::Type::device>();
+ std::vector<std::shared_ptr<const effectsConfig::Effect>> effects;
+ std::transform(aidlProcess.ids.begin(), aidlProcess.ids.end(),
+ std::back_inserter(effects), getConfigEffectWithDescriptor);
+ audio_devices_t type;
+ char address[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+ status_t status = ::aidl::android::aidl2legacy_AudioDevice_audio_device(
+ aidlDevice, &type, address);
+ if (status != NO_ERROR) {
+ ALOGE("%s device effect has invalid device type / address", __func__);
+ return;
+ }
+ effectsConfig::DeviceEffects device = {
+ {.type = type, .effects = std::move(effects)},
+ .address = address,
+ };
+ deviceprocess.emplace_back(device);
}
};
@@ -320,17 +342,21 @@
[&]() -> std::shared_ptr<const effectsConfig::Processings> {
std::vector<effectsConfig::InputStream> preprocess;
std::vector<effectsConfig::OutputStream> postprocess;
+ std::vector<effectsConfig::DeviceEffects> deviceprocess;
for (const auto& processing : mAidlProcessings) {
- getConfigProcessingWithAidlProcessing(processing, preprocess, postprocess);
+ getConfigProcessingWithAidlProcessing(processing, preprocess, postprocess,
+ deviceprocess);
}
- if (0 == preprocess.size() && 0 == postprocess.size()) {
+ if (0 == preprocess.size() && 0 == postprocess.size() &&
+ 0 == deviceprocess.size()) {
return nullptr;
}
return std::make_shared<const effectsConfig::Processings>(
effectsConfig::Processings({.preprocess = std::move(preprocess),
- .postprocess = std::move(postprocess)}));
+ .postprocess = std::move(postprocess),
+ .deviceprocess = std::move(deviceprocess)}));
}());
return processings;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 0401e82..3020ead 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -840,7 +840,7 @@
const sp<AMessage> mNotify;
};
-class OnBufferReleasedListener : public ::android::BnProducerListener{
+class OnBufferReleasedListener : public ::android::SurfaceListener{
private:
uint32_t mGeneration;
std::weak_ptr<BufferChannelBase> mBufferChannel;
@@ -864,6 +864,9 @@
notifyBufferReleased();
}
+ void onBuffersDiscarded([[maybe_unused]] const std::vector<sp<GraphicBuffer>>& buffers)
+ override { }
+
void onBufferDetached([[maybe_unused]] int slot) override {
notifyBufferReleased();
}
@@ -6722,7 +6725,7 @@
// to this surface after disconnect/connect, and those free frames would inherit the new
// generation number. Disconnecting after setting a unique generation prevents this.
nativeWindowDisconnect(surface.get(), "connectToSurface(reconnect)");
- sp<IProducerListener> listener =
+ sp<SurfaceListener> listener =
new OnBufferReleasedListener(*generation, mBufferChannel);
err = surfaceConnectWithListener(
surface, listener, "connectToSurface(reconnect-with-listener)");
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 714e312..74432a6 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -335,7 +335,7 @@
}
status_t surfaceConnectWithListener(
- const sp<Surface> &surface, sp<IProducerListener> listener, const char *reason) {
+ const sp<Surface> &surface, sp<SurfaceListener> listener, const char *reason) {
ALOGD("connecting to surface %p, reason %s", surface.get(), reason);
status_t err = surface->connect(NATIVE_WINDOW_API_MEDIA, listener);
diff --git a/media/libstagefright/include/media/stagefright/SurfaceUtils.h b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
index eccb413..882a5ab 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceUtils.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
@@ -27,7 +27,7 @@
namespace android {
struct HDRStaticInfo;
-class IProducerListener;
+class SurfaceListener;
/**
* Configures |nativeWindow| for given |width|x|height|, pixel |format|, |rotation| and |usage|.
@@ -45,7 +45,7 @@
status_t nativeWindowConnect(ANativeWindow *surface, const char *reason);
status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason);
status_t surfaceConnectWithListener(const sp<Surface> &surface,
- sp<IProducerListener> listener, const char *reason);
+ sp<SurfaceListener> listener, const char *reason);
/**
* Disable buffer dropping behavior of BufferQueue if target sdk of application
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.cpp b/media/libstagefright/renderfright/gl/ProgramCache.cpp
index 350f0b7..ad6dd03 100644
--- a/media/libstagefright/renderfright/gl/ProgramCache.cpp
+++ b/media/libstagefright/renderfright/gl/ProgramCache.cpp
@@ -683,7 +683,7 @@
fs << "uniform mat4 inputTransformMatrix;";
fs << R"__SHADER__(
highp vec3 InputTransform(const highp vec3 color) {
- return clamp(vec3(inputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+ return vec3(inputTransformMatrix * vec4(color, 1.0));
}
)__SHADER__";
} else {
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index ca862b0..151ce7c 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -290,7 +290,7 @@
// Max file duration limit is set
if (mMaxFileDurationLimitUs != 0) {
if (bitRate > 0) {
- int64_t size2 = ((mMaxFileDurationLimitUs * bitRate * 6) / 1000 / 8000000);
+ int64_t size2 = ((mMaxFileDurationLimitUs / 1000) * bitRate * 6) / 8000000;
if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
// When both file size and duration limits are set,
// we use the smaller limit of the two.
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index b92cd70..f7b9b33 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -99,21 +99,20 @@
* @return ioHandle if found, AUDIO_IO_HANDLE_NONE otherwise.
*/
audio_io_handle_t getIoForSession(audio_session_t sessionId,
- const effect_uuid_t *effectType = nullptr) const;
- bool hasOrphansForSession(audio_session_t sessionId) const;
- EffectDescriptorCollection getOrphanEffectsForSession(audio_session_t sessionId) const;
- void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
+ const effect_uuid_t* effectType = nullptr) const;
/**
- * @brief Checks if there is at least one orphan effect with given sessionId and effect type
- * uuid.
+ * @brief Checks if there is at least one orphan effect with given sessionId and optional effect
+ * type uuid.
* @param sessionId Session ID.
- * @param effectType Effect type UUID, the implementation will be same as hasOrphansForSession
- * if null.
+ * @param effectType Optional effect type UUID pointer to effect_uuid_t, nullptr by default.
* @return True if there is an orphan effect for given sessionId and type UUID, false otherwise.
*/
- bool hasOrphanEffectsForSessionAndType(audio_session_t sessionId,
- const effect_uuid_t* effectType) const;
+ bool hasOrphansForSession(audio_session_t sessionId,
+ const effect_uuid_t* effectType = nullptr) const;
+
+ EffectDescriptorCollection getOrphanEffectsForSession(audio_session_t sessionId) const;
+ void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
private:
status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 7971b61..090da6c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -210,27 +210,13 @@
}
}
-bool EffectDescriptorCollection::hasOrphansForSession(audio_session_t sessionId) const
-{
+bool EffectDescriptorCollection::hasOrphansForSession(audio_session_t sessionId,
+ const effect_uuid_t* effectType) const {
for (size_t i = 0; i < size(); ++i) {
sp<EffectDescriptor> effect = valueAt(i);
- if (effect->mSession == sessionId && effect->mIsOrphan) {
- return true;
- }
- }
- return false;
-}
-
-bool EffectDescriptorCollection::hasOrphanEffectsForSessionAndType(
- audio_session_t sessionId, const effect_uuid_t* effectType) const {
- if (effectType == nullptr) {
- return hasOrphansForSession(sessionId);
- }
-
- for (size_t i = 0; i < size(); ++i) {
- sp<EffectDescriptor> effect = valueAt(i);
- if (effect->mIsOrphan && effect->mSession == sessionId &&
- memcmp(&effect->mDesc.type, effectType, sizeof(effect_uuid_t)) == 0) {
+ if (effect->mSession == sessionId && effect->mIsOrphan &&
+ (effectType == nullptr ||
+ memcmp(&effect->mDesc.type, effectType, sizeof(effect_uuid_t)) == 0)) {
return true;
}
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e5fe2d2..739e201 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2186,8 +2186,7 @@
// matching criteria values in priority order for best matching output so far
std::vector<uint32_t> bestMatchCriteria(8, 0);
- const bool hasOrphanHaptic =
- mEffects.hasOrphanEffectsForSessionAndType(sessionId, FX_IID_HAPTICGENERATOR);
+ const bool hasOrphanHaptic = mEffects.hasOrphansForSession(sessionId, FX_IID_HAPTICGENERATOR);
const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(
channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
@@ -3508,6 +3507,11 @@
bool enabled,
audio_stream_type_t streamToDriveAbs)
{
+ if (!enabled) {
+ mAbsoluteVolumeDrivingStreams.erase(deviceType);
+ return NO_ERROR;
+ }
+
audio_attributes_t attributesToDriveAbs = mEngine->getAttributesForStreamType(streamToDriveAbs);
if (attributesToDriveAbs == AUDIO_ATTRIBUTES_INITIALIZER) {
ALOGW("%s: no attributes for stream %s, bailing out", __func__,
@@ -3515,12 +3519,7 @@
return BAD_VALUE;
}
- if (enabled) {
- mAbsoluteVolumeDrivingStreams[deviceType] = attributesToDriveAbs;
- } else {
- mAbsoluteVolumeDrivingStreams.erase(deviceType);
- }
-
+ mAbsoluteVolumeDrivingStreams[deviceType] = attributesToDriveAbs;
return NO_ERROR;
}
@@ -6410,7 +6409,7 @@
// means haptic use cases (either the client channelmask includes haptic bits, or created a
// HapticGenerator effect for this session) are not supported.
return clientHapticChannel == 0 &&
- !mEffects.hasOrphanEffectsForSessionAndType(sessionId, FX_IID_HAPTICGENERATOR);
+ !mEffects.hasOrphansForSession(sessionId, FX_IID_HAPTICGENERATOR);
}
}
@@ -8474,11 +8473,19 @@
bool& isBtScoVolSrc,
const char* caller) {
const VolumeSource callVolSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL, false);
- const VolumeSource btScoVolSrc = toVolumeSource(AUDIO_STREAM_BLUETOOTH_SCO, false);
+ isVoiceVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (callVolSrc == volumeSource);
+
const bool isScoRequested = isScoRequestedForComm();
const bool isHAUsed = isHearingAidUsedForComm();
- isVoiceVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (callVolSrc == volumeSource);
+ if (com_android_media_audio_replace_stream_bt_sco()) {
+ ALOGV("%s stream bt sco is replaced, no volume consistency check for calls", __func__);
+ isBtScoVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (callVolSrc == volumeSource) &&
+ (isScoRequested || isHAUsed);
+ return true;
+ }
+
+ const VolumeSource btScoVolSrc = toVolumeSource(AUDIO_STREAM_BLUETOOTH_SCO, false);
isBtScoVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (btScoVolSrc == volumeSource);
if ((callVolSrc != btScoVolSrc) &&
diff --git a/services/audiopolicy/permission/NativePermissionController.cpp b/services/audiopolicy/permission/NativePermissionController.cpp
index 8659f2c..07bb7e2 100644
--- a/services/audiopolicy/permission/NativePermissionController.cpp
+++ b/services/audiopolicy/permission/NativePermissionController.cpp
@@ -129,10 +129,12 @@
BinderResult<bool> NativePermissionController::validateUidPackagePair(
uid_t uid, const std::string& packageName) const {
+ if (uid == AID_ROOT || uid == AID_SYSTEM) return true;
uid = uid % AID_USER_OFFSET;
const auto fixed_package_opt = getFixedPackageName(uid);
if (fixed_package_opt.has_value()) {
- return packageName == fixed_package_opt.value();
+ return (uid == AID_ROOT || uid == AID_SYSTEM) ? true :
+ packageName == fixed_package_opt.value();
}
std::lock_guard l{m_};
if (!is_package_populated_) {
@@ -148,6 +150,7 @@
BinderResult<bool> NativePermissionController::checkPermission(PermissionEnum perm,
uid_t uid) const {
+ if (uid == AID_ROOT || uid == AID_SYSTEM || uid == getuid()) return true;
std::lock_guard l{m_};
const auto& uids = permission_map_[static_cast<size_t>(perm)];
if (!uids.empty()) {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 768cd07..f414862 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -23,6 +23,8 @@
#include <android/content/AttributionSourceState.h>
#include <android_media_audiopolicy.h>
+#include <com_android_media_audio.h>
+#include <error/expected_utils.h>
#include <media/AidlConversion.h>
#include <media/AudioPolicy.h>
#include <media/AudioValidator.h>
@@ -44,13 +46,30 @@
if (!_tmp.isOk()) return _tmp; \
}
+#define CHECK_PERM(expr1, expr2) \
+ VALUE_OR_RETURN_STATUS(getPermissionProvider().checkPermission((expr1), (expr2)))
+
#define MAX_ITEMS_PER_LIST 1024
namespace android {
namespace audiopolicy_flags = android::media::audiopolicy;
using binder::Status;
using aidl_utils::binderStatusFromStatusT;
+using com::android::media::audio::audioserver_permissions;
using com::android::media::permission::NativePermissionController;
+using com::android::media::permission::PermissionEnum::ACCESS_ULTRASOUND;
+using com::android::media::permission::PermissionEnum::CALL_AUDIO_INTERCEPTION;
+using com::android::media::permission::PermissionEnum::CAPTURE_AUDIO_HOTWORD;
+using com::android::media::permission::PermissionEnum::CAPTURE_VOICE_COMMUNICATION_OUTPUT;
+using com::android::media::permission::PermissionEnum::CAPTURE_AUDIO_OUTPUT;
+using com::android::media::permission::PermissionEnum::CAPTURE_MEDIA_OUTPUT;
+using com::android::media::permission::PermissionEnum::CAPTURE_TUNER_AUDIO_INPUT;
+using com::android::media::permission::PermissionEnum::MODIFY_AUDIO_ROUTING;
+using com::android::media::permission::PermissionEnum::MODIFY_AUDIO_SETTINGS;
+using com::android::media::permission::PermissionEnum::MODIFY_DEFAULT_AUDIO_EFFECTS;
+using com::android::media::permission::PermissionEnum::MODIFY_PHONE_STATE;
+using com::android::media::permission::PermissionEnum::RECORD_AUDIO;
+using com::android::media::permission::PermissionEnum::WRITE_SECURE_SETTINGS;
using content::AttributionSourceState;
using media::audio::common::AudioConfig;
using media::audio::common::AudioConfigBase;
@@ -86,31 +105,37 @@
!= std::end(mSupportedSystemUsages);
}
-status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+Status AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
return validateUsage(attr, getCallingAttributionSource());
}
-status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
+Status AudioPolicyService::validateUsage(const audio_attributes_t& attr,
const AttributionSourceState& attributionSource) {
if (isSystemUsage(attr.usage)) {
if (isSupportedSystemUsage(attr.usage)) {
if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
&& ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
- if (!callAudioInterceptionAllowed(attributionSource)) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(CALL_AUDIO_INTERCEPTION, attributionSource.uid)
+ : callAudioInterceptionAllowed(attributionSource))) {
ALOGE("%s: call audio interception not allowed for attribution source: %s",
__func__, attributionSource.toString().c_str());
- return PERMISSION_DENIED;
+ return Status::fromExceptionCode(Status::EX_SECURITY,
+ "Call audio interception not allowed");
}
- } else if (!modifyAudioRoutingAllowed(attributionSource)) {
+ } else if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, attributionSource.uid)
+ : modifyAudioRoutingAllowed(attributionSource))) {
ALOGE("%s: modify audio routing not allowed for attribution source: %s",
__func__, attributionSource.toString().c_str());
- return PERMISSION_DENIED;
+ return Status::fromExceptionCode(Status::EX_SECURITY,
+ "Modify audio routing not allowed");
}
} else {
- return BAD_VALUE;
+ return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
}
}
- return NO_ERROR;
+ return Status::ok();
}
@@ -137,7 +162,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
@@ -191,7 +218,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -215,7 +244,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (uint32_t(state) >= AUDIO_MODE_CNT) {
@@ -265,7 +296,9 @@
return binderStatusFromStatusT(NO_INIT);
}
- if (!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -355,7 +388,7 @@
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
+ RETURN_IF_BINDER_ERROR(validateUsage(attr, attributionSource));
ALOGV("%s()", __func__);
audio_utils::lock_guard _l(mMutex);
@@ -364,14 +397,22 @@
aidl2legacy_int32_t_uid_t(attributionSource.uid)))) {
attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
}
+ const bool bypassInterruptionAllowed = audioserver_permissions() ? (
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, attributionSource.uid) ||
+ CHECK_PERM(MODIFY_PHONE_STATE, attributionSource.uid) ||
+ CHECK_PERM(WRITE_SECURE_SETTINGS, attributionSource.uid))
+ : bypassInterruptionPolicyAllowed(attributionSource);
+
if (((attr.flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
- && !bypassInterruptionPolicyAllowed(attributionSource)) {
+ && !bypassInterruptionAllowed) {
attr.flags = static_cast<audio_flags_mask_t>(
attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
}
if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
- if (!accessUltrasoundAllowed(attributionSource)) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(ACCESS_ULTRASOUND, attributionSource.uid)
+ : accessUltrasoundAllowed(attributionSource))) {
ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
__func__, attributionSource.uid, attributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -400,18 +441,24 @@
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
- && !callAudioInterceptionAllowed(attributionSource)) {
+ && !(audioserver_permissions() ?
+ CHECK_PERM(CALL_AUDIO_INTERCEPTION, attributionSource.uid)
+ : callAudioInterceptionAllowed(attributionSource))) {
ALOGE("%s() permission denied: call redirection not allowed for uid %d",
__func__, attributionSource.uid);
result = PERMISSION_DENIED;
- } else if (!modifyPhoneStateAllowed(attributionSource)) {
+ } else if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_PHONE_STATE, attributionSource.uid)
+ : modifyPhoneStateAllowed(attributionSource))) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
__func__, attributionSource.uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
- if (!modifyAudioRoutingAllowed(attributionSource)) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, attributionSource.uid)
+ : modifyAudioRoutingAllowed(attributionSource))) {
ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
__func__, attributionSource.uid);
result = PERMISSION_DENIED;
@@ -630,8 +677,7 @@
return binderStatusFromStatusT(BAD_VALUE);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
- attributionSource)));
+ RETURN_IF_BINDER_ERROR(validateUsage(attr, attributionSource));
uint32_t virtualDeviceId = kDefaultVirtualDeviceId;
@@ -644,7 +690,10 @@
// type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
// is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
// - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
- if (!(recordingAllowed(attributionSource, inputSource)
+ const auto isRecordingAllowed = audioserver_permissions() ?
+ CHECK_PERM(RECORD_AUDIO, attributionSource.uid) :
+ recordingAllowed(attributionSource, inputSource);
+ if (!(isRecordingAllowed
|| inputSource == AUDIO_SOURCE_FM_TUNER
|| inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
|| inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
@@ -653,8 +702,12 @@
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureOutput = captureAudioOutputAllowed(attributionSource);
- bool canInterceptCallAudio = callAudioInterceptionAllowed(attributionSource);
+ bool canCaptureOutput = audioserver_permissions() ?
+ CHECK_PERM(CAPTURE_AUDIO_OUTPUT, attributionSource.uid)
+ : captureAudioOutputAllowed(attributionSource);
+ bool canInterceptCallAudio = audioserver_permissions() ?
+ CHECK_PERM(CALL_AUDIO_INTERCEPTION, attributionSource.uid)
+ : callAudioInterceptionAllowed(attributionSource);
bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
|| inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
|| inputSource == AUDIO_SOURCE_VOICE_CALL;
@@ -668,11 +721,15 @@
}
if (inputSource == AUDIO_SOURCE_FM_TUNER
&& !canCaptureOutput
- && !captureTunerAudioInputAllowed(attributionSource)) {
+ && !(audioserver_permissions() ?
+ CHECK_PERM(CAPTURE_TUNER_AUDIO_INPUT, attributionSource.uid)
+ : captureTunerAudioInputAllowed(attributionSource))) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureHotword = captureHotwordAllowed(attributionSource);
+ bool canCaptureHotword = audioserver_permissions() ?
+ CHECK_PERM(CAPTURE_AUDIO_HOTWORD, attributionSource.uid)
+ : captureHotwordAllowed(attributionSource);
if ((inputSource == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -687,7 +744,9 @@
}
if (attr.source == AUDIO_SOURCE_ULTRASOUND) {
- if (!accessUltrasoundAllowed(attributionSource)) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(ACCESS_ULTRASOUND, attributionSource.uid)
+ : accessUltrasoundAllowed(attributionSource))) {
ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
__func__, attributionSource.uid, attributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
@@ -733,14 +792,29 @@
status = PERMISSION_DENIED;
}
break;
- case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
- if (!(modifyAudioRoutingAllowed(attributionSource)
+ case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE: {
+ bool modAudioRoutingAllowed;
+ if (audioserver_permissions()) {
+ auto result = getPermissionProvider().checkPermission(
+ MODIFY_AUDIO_ROUTING, attributionSource.uid);
+ if (!result.ok()) {
+ ALOGE("%s permission provider error: %s", __func__,
+ result.error().toString8().c_str());
+ status = aidl_utils::statusTFromBinderStatus(result.error());
+ break;
+ }
+ modAudioRoutingAllowed = result.value();
+ } else {
+ modAudioRoutingAllowed = modifyAudioRoutingAllowed(attributionSource);
+ }
+ if (!(modAudioRoutingAllowed
|| ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
&& canInterceptCallAudio))) {
ALOGE("%s permission denied for remote submix capture", __func__);
status = PERMISSION_DENIED;
}
break;
+ }
case AudioPolicyInterface::API_INPUT_INVALID:
default:
LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
@@ -1035,7 +1109,9 @@
if (mAudioPolicyManager == nullptr) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (uint32_t(streamToDriveAbs) >= AUDIO_STREAM_PUBLIC_CNT) {
@@ -1059,7 +1135,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
@@ -1083,7 +1161,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
@@ -1133,7 +1213,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
audio_utils::lock_guard _l(mMutex);
@@ -1439,7 +1521,9 @@
sp<AudioPolicyEffects>audioPolicyEffects;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(getAudioPolicyEffects(audioPolicyEffects)));
- if (!modifyDefaultAudioEffectsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_DEFAULT_AUDIO_EFFECTS, IPCThreadState::self()->getCallingUid())
+ : modifyDefaultAudioEffectsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(audioPolicyEffects->addSourceDefaultEffect(
@@ -1465,7 +1549,9 @@
sp<AudioPolicyEffects> audioPolicyEffects;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(getAudioPolicyEffects(audioPolicyEffects)));
- if (!modifyDefaultAudioEffectsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_DEFAULT_AUDIO_EFFECTS, IPCThreadState::self()->getCallingUid())
+ : modifyDefaultAudioEffectsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(audioPolicyEffects->addStreamDefaultEffect(
@@ -1480,7 +1566,9 @@
aidl2legacy_int32_t_audio_unique_id_t(idAidl));
sp<AudioPolicyEffects>audioPolicyEffects;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(getAudioPolicyEffects(audioPolicyEffects)));
- if (!modifyDefaultAudioEffectsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_DEFAULT_AUDIO_EFFECTS, IPCThreadState::self()->getCallingUid())
+ : modifyDefaultAudioEffectsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
return binderStatusFromStatusT(audioPolicyEffects->removeSourceDefaultEffect(id));
@@ -1492,7 +1580,9 @@
aidl2legacy_int32_t_audio_unique_id_t(idAidl));
sp<AudioPolicyEffects>audioPolicyEffects;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(getAudioPolicyEffects(audioPolicyEffects)));
- if (!modifyDefaultAudioEffectsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_DEFAULT_AUDIO_EFFECTS, IPCThreadState::self()->getCallingUid())
+ : modifyDefaultAudioEffectsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
return binderStatusFromStatusT(audioPolicyEffects->removeStreamDefaultEffect(id));
@@ -1510,7 +1600,9 @@
std::back_inserter(systemUsages), aidl2legacy_AudioUsage_audio_usage_t)));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -1569,7 +1661,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
+ RETURN_IF_BINDER_ERROR(validateUsage(attributes));
audio_utils::lock_guard _l(mMutex);
*_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1644,7 +1736,9 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(AudioValidator::validateAudioPatch(patch)));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1663,7 +1757,9 @@
audio_patch_handle_t handle = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_patch_handle_t(handleAidl));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1711,7 +1807,9 @@
binderStatusFromStatusT(AudioValidator::validateAudioPortConfig(config)));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1774,7 +1872,9 @@
// loopback|render only need a MediaProjection (checked in caller AudioService.java)
bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
return !is_mix_loopback_render(mix.mRouteFlags); });
- if (needModifyAudioRouting && !modifyAudioRoutingAllowed()) {
+ if (needModifyAudioRouting && !(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -1790,12 +1890,16 @@
const AttributionSourceState attributionSource = getCallingAttributionSource();
- if (needCaptureMediaOutput && !captureMediaOutputAllowed(attributionSource)) {
+ if (needCaptureMediaOutput && !(audioserver_permissions() ?
+ CHECK_PERM(CAPTURE_MEDIA_OUTPUT, attributionSource.uid)
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (needCaptureVoiceCommunicationOutput &&
- !captureVoiceCommunicationOutputAllowed(attributionSource)) {
+ !(audioserver_permissions() ?
+ CHECK_PERM(CAPTURE_VOICE_COMMUNICATION_OUTPUT, attributionSource.uid)
+ : captureVoiceCommunicationOutputAllowed(attributionSource))) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -1852,7 +1956,9 @@
aidl2legacy_AudioDeviceTypeAddress));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1866,7 +1972,9 @@
uid_t uid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(uidAidl));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1885,7 +1993,9 @@
aidl2legacy_AudioDeviceTypeAddress));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1899,7 +2009,9 @@
int userId = VALUE_OR_RETURN_BINDER_STATUS(convertReinterpret<int>(userIdAidl));
audio_utils::lock_guard _l(mMutex);
- if(!modifyAudioRoutingAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_ROUTING, IPCThreadState::self()->getCallingUid())
+ : modifyAudioRoutingAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (mAudioPolicyManager == NULL) {
@@ -1927,7 +2039,7 @@
return binderStatusFromStatusT(NO_INIT);
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
+ RETURN_IF_BINDER_ERROR(validateUsage(attributes));
// startAudioSource should be created as the calling uid
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -1956,7 +2068,9 @@
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
- if (!settingsAllowed()) {
+ if (!(audioserver_permissions() ?
+ CHECK_PERM(MODIFY_AUDIO_SETTINGS, IPCThreadState::self()->getCallingUid())
+ : settingsAllowed())) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
audio_utils::lock_guard _l(mMutex);
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 699cacf..720ba84 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -452,8 +452,8 @@
app_state_t apmStatFromAmState(int amState);
bool isSupportedSystemUsage(audio_usage_t usage);
- status_t validateUsage(const audio_attributes_t& attr);
- status_t validateUsage(const audio_attributes_t& attr,
+ binder::Status validateUsage(const audio_attributes_t& attr);
+ binder::Status validateUsage(const audio_attributes_t& attr,
const AttributionSourceState& attributionSource);
void updateUidStates();
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 157f084..f7afeab 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1687,17 +1687,14 @@
}
Status CameraService::validateConnectLocked(const std::string& cameraId,
- const std::string& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid,
- /*out*/int& originalClientPid) const {
+ const std::string& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid) const {
#ifdef __BRILLO__
UNUSED(clientName8);
UNUSED(clientUid);
UNUSED(clientPid);
- UNUSED(originalClientPid);
#else
- Status allowed = validateClientPermissionsLocked(cameraId, clientName8, clientUid, clientPid,
- originalClientPid);
+ Status allowed = validateClientPermissionsLocked(cameraId, clientName8, clientUid, clientPid);
if (!allowed.isOk()) {
return allowed;
}
@@ -1735,8 +1732,7 @@
}
Status CameraService::validateClientPermissionsLocked(const std::string& cameraId,
- const std::string& clientName, int& clientUid, int& clientPid,
- /*out*/int& originalClientPid) const {
+ const std::string& clientName, int& clientUid, int& clientPid) const {
int callingPid = getCallingPid();
int callingUid = getCallingUid();
@@ -1819,12 +1815,10 @@
"is enabled", clientName.c_str(), clientPid, clientUid, cameraId.c_str());
}
+ userid_t clientUserId = multiuser_get_user_id(clientUid);
+
// Only use passed in clientPid to check permission. Use calling PID as the client PID that's
// connected to camera service directly.
- originalClientPid = clientPid;
- clientPid = callingPid;
-
- userid_t clientUserId = multiuser_get_user_id(clientUid);
// For non-system clients : Only allow clients who are being used by the current foreground
// device user, unless calling from our own process.
@@ -1845,11 +1839,11 @@
if (isHeadlessSystemUserMode()
&& (clientUserId == USER_SYSTEM)
&& !hasPermissionsForCameraHeadlessSystemUser(cameraId, callingPid, callingUid)) {
- ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
+ ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", callingPid, clientUid);
return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" as Headless System \
User without camera headless system user permission",
- clientName.c_str(), clientPid, clientUid, cameraId.c_str());
+ clientName.c_str(), callingPid, clientUid, cameraId.c_str());
}
}
@@ -2427,6 +2421,7 @@
std::string clientPackageName;
int packageUid = (clientUid == USE_CALLING_UID) ?
getCallingUid() : clientUid;
+ int callingPid = getCallingPid();
if (clientPackageNameMaybe.size() <= 0) {
// NDK calls don't come with package names, but we need one for various cases.
// Generally, there's a 1:1 mapping between UID and package name, but shared UIDs
@@ -2439,10 +2434,8 @@
clientPackageName = clientPackageNameMaybe;
}
- int originalClientPid = 0;
-
int packagePid = (clientPid == USE_CALLING_PID) ?
- getCallingPid() : clientPid;
+ callingPid : clientPid;
ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) and "
"Camera API version %d", packagePid, clientPackageName.c_str(), cameraId.c_str(),
static_cast<int>(effectiveApiLevel));
@@ -2468,7 +2461,7 @@
// Enforce client permissions and do basic validity checks
if (!(ret = validateConnectLocked(cameraId, clientPackageName,
- /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
+ /*inout*/clientUid, /*inout*/clientPid)).isOk()) {
return ret;
}
@@ -2485,7 +2478,7 @@
sp<BasicClient> clientTmp = nullptr;
std::shared_ptr<resource_policy::ClientDescriptor<std::string, sp<BasicClient>>> partial;
- if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
+ if ((err = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
IInterface::asBinder(cameraCb), clientPackageName, oomScoreOffset,
systemNativeClient, /*out*/&clientTmp, /*out*/&partial)) != NO_ERROR) {
switch (err) {
@@ -2531,9 +2524,11 @@
bool overrideForPerfClass = SessionConfigurationUtils::targetPerfClassPrimaryCamera(
mPerfClassPrimaryCameraIds, cameraId, targetSdkVersion);
+ // Only use passed in clientPid to check permission. Use calling PID as the client PID
+ // that's connected to camera service directly.
if(!(ret = makeClient(this, cameraCb, clientPackageName, systemNativeClient,
clientFeatureId, cameraId, api1CameraId, facing,
- orientation, clientPid, clientUid, getpid(),
+ orientation, callingPid, clientUid, getpid(),
deviceVersionAndTransport, effectiveApiLevel, overrideForPerfClass,
rotationOverride, forceSlowJpegMode, originalCameraId,
/*out*/&tmp)).isOk()) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 6b21e05..83412ca 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -947,17 +947,12 @@
void removeStates(const std::string& id);
// Check if we can connect, before we acquire the service lock.
- // The returned originalClientPid is the PID of the original process that wants to connect to
- // camera.
- // The returned clientPid is the PID of the client that directly connects to camera.
- // originalClientPid and clientPid are usually the same except when the application uses
- // mediaserver to connect to camera (using MediaRecorder to connect to camera). In that case,
- // clientPid is the PID of mediaserver and originalClientPid is the PID of the application.
+ // If clientPid/clientUid are USE_CALLING_PID/USE_CALLING_UID, they will be overwritten with
+ // the calling pid/uid.
binder::Status validateConnectLocked(const std::string& cameraId, const std::string& clientName,
- /*inout*/int& clientUid, /*inout*/int& clientPid, /*out*/int& originalClientPid) const;
+ /*inout*/int& clientUid, /*inout*/int& clientPid) const;
binder::Status validateClientPermissionsLocked(const std::string& cameraId,
- const std::string& clientName, /*inout*/int& clientUid, /*inout*/int& clientPid,
- /*out*/int& originalClientPid) const;
+ const std::string& clientName, /*inout*/int& clientUid, /*inout*/int& clientPid) const;
bool isCameraPrivacyEnabled(const String16& packageName,const std::string& cameraId,
int clientPid, int ClientUid);
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 1b7fc6e..fa569ce 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -23,7 +23,7 @@
#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
#include <camera/CameraMetadata.h>
#include <camera/camera2/OutputConfiguration.h>
-#include <gui/IProducerListener.h>
+#include <gui/Surface.h>
#include "common/CameraDeviceBase.h"
#include "device3/Camera3StreamInterface.h"
@@ -96,9 +96,12 @@
const CameraMetadata& settings) override;
protected:
- struct ProducerListener : public BnProducerListener {
- // ProducerListener impementation
+ struct StreamSurfaceListener : public SurfaceListener {
+ // StreamSurfaceListener implementation
void onBufferReleased() override { /*No impl. for now*/ };
+ bool needsReleaseNotify() override { return true; };
+ void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& /*buffers*/) override {};
+ void onBufferDetached(int /*slot*/) override {};
};
status_t registerCompositeStreamListener(int32_t streamId);
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 8b41d00..81eb7d1 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -48,7 +48,7 @@
mBlobHeight(0),
mDepthBufferAcquired(false),
mBlobBufferAcquired(false),
- mProducerListener(new ProducerListener()),
+ mStreamSurfaceListener(new StreamSurfaceListener()),
mMaxJpegBufferSize(-1),
mUHRMaxJpegBufferSize(-1),
mIsLogicalCamera(false) {
@@ -690,7 +690,7 @@
return NO_INIT;
}
- auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mStreamSurfaceListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mBlobStreamId);
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index 3f8f6a2..75deef7 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -130,12 +130,12 @@
static const auto kDepthMapDataSpace = HAL_DATASPACE_DEPTH;
static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
- int mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
- size_t mBlobWidth, mBlobHeight;
- sp<CpuConsumer> mBlobConsumer, mDepthConsumer;
- bool mDepthBufferAcquired, mBlobBufferAcquired;
- sp<Surface> mDepthSurface, mBlobSurface, mOutputSurface;
- sp<ProducerListener> mProducerListener;
+ int mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
+ size_t mBlobWidth, mBlobHeight;
+ sp<CpuConsumer> mBlobConsumer, mDepthConsumer;
+ bool mDepthBufferAcquired, mBlobBufferAcquired;
+ sp<Surface> mDepthSurface, mBlobSurface, mOutputSurface;
+ sp<StreamSurfaceListener> mStreamSurfaceListener;
ssize_t mMaxJpegBufferSize;
ssize_t mUHRMaxJpegBufferSize;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index ea4f6fd..2f05d4d 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -68,7 +68,7 @@
mMainImageStreamId(-1),
mMainImageSurfaceId(-1),
mYuvBufferAcquired(false),
- mProducerListener(new ProducerListener()),
+ mStreamSurfaceListener(new StreamSurfaceListener()),
mDequeuedOutputBufferCnt(0),
mCodecOutputCounter(0),
mQuality(-1),
@@ -513,7 +513,7 @@
return NO_INIT;
}
- auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mStreamSurfaceListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mMainImageStreamId);
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 9cfd78b..ba10e05 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -19,7 +19,6 @@
#include <queue>
-#include <gui/IProducerListener.h>
#include <gui/CpuConsumer.h>
#include <media/hardware/VideoAPI.h>
@@ -234,10 +233,10 @@
bool mYuvBufferAcquired; // Only applicable to HEVC codec
std::queue<int64_t> mMainImageFrameNumbers;
- static const int32_t kMaxOutputSurfaceProducerCount = 1;
- sp<Surface> mOutputSurface;
- sp<ProducerListener> mProducerListener;
- int32_t mDequeuedOutputBufferCnt;
+ static const int32_t kMaxOutputSurfaceProducerCount = 1;
+ sp<Surface> mOutputSurface;
+ sp<StreamSurfaceListener> mStreamSurfaceListener;
+ int32_t mDequeuedOutputBufferCnt;
// Map from frame number to JPEG setting of orientation+quality
struct HeicSettings {
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
index dadefcc..1646d5e 100644
--- a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
@@ -54,7 +54,7 @@
mOutputColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
mOutputStreamUseCase(0),
mFirstRequestLatency(-1),
- mProducerListener(new ProducerListener()),
+ mStreamSurfaceListener(new StreamSurfaceListener()),
mMaxJpegBufferSize(-1),
mUHRMaxJpegBufferSize(-1),
mStaticInfo(device->info()) {
@@ -653,7 +653,7 @@
return NO_INIT;
}
- auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mStreamSurfaceListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mP010StreamId);
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.h b/services/camera/libcameraservice/api2/JpegRCompositeStream.h
index 6669739..d3ab19c 100644
--- a/services/camera/libcameraservice/api2/JpegRCompositeStream.h
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.h
@@ -128,7 +128,8 @@
int32_t mOutputColorSpace;
int64_t mOutputStreamUseCase;
nsecs_t mFirstRequestLatency;
- sp<ProducerListener> mProducerListener;
+
+ sp<StreamSurfaceListener> mStreamSurfaceListener;
ssize_t mMaxJpegBufferSize;
ssize_t mUHRMaxJpegBufferSize;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 8a93ed8..f8b78c1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -193,6 +193,7 @@
virtual void onBufferReleased();
virtual bool needsReleaseNotify() { return mNeedsReleaseNotify; }
virtual void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& buffers);
+ virtual void onBufferDetached(int /*slot*/) override {};
private:
wp<Camera3OutputStream> mParent;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 1feb4a0..43f12fb 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -22,7 +22,7 @@
#include <camera/CameraMetadata.h>
#include <gui/IConsumerListener.h>
-#include <gui/IProducerListener.h>
+#include <gui/Surface.h>
#include <gui/BufferItemConsumer.h>
#include <utils/Condition.h>
@@ -159,7 +159,7 @@
// the IProducerListener::onBufferReleased callback is associated with. We
// create one of these per output BufferQueue, and then pass the producer
// into onBufferReleasedByOutput above.
- class OutputListener : public BnProducerListener,
+ class OutputListener : public SurfaceListener,
public IBinder::DeathRecipient {
public:
OutputListener(wp<Camera3StreamSplitter> splitter,
@@ -168,6 +168,9 @@
// From IProducerListener
void onBufferReleased() override;
+ bool needsReleaseNotify() override { return true; };
+ void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& /*buffers*/) override {};
+ void onBufferDetached(int /*slot*/) override {}
// From IBinder::DeathRecipient
void binderDied(const wp<IBinder>& who) override;
diff --git a/services/camera/virtualcamera/util/JpegUtil.h b/services/camera/virtualcamera/util/JpegUtil.h
index 184dd56..0a8df90 100644
--- a/services/camera/virtualcamera/util/JpegUtil.h
+++ b/services/camera/virtualcamera/util/JpegUtil.h
@@ -18,6 +18,7 @@
#define ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
#include <optional>
+#include <vector>
#include "android/hardware_buffer.h"
#include "util/Util.h"