Merge "Camera: remove the camera device version check during statusCallback" into sc-dev
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index d6642f3..2bccd87 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -161,7 +161,7 @@
}
std::vector<int32_t> sensorPixelModesUsed;
- if ((err = parcel->readParcelableVector(&sensorPixelModesUsed)) != OK) {
+ if ((err = parcel->readInt32Vector(&sensorPixelModesUsed)) != OK) {
ALOGE("%s: Failed to read sensor pixel mode(s) from parcel", __FUNCTION__);
return err;
}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 03a8dc9..803c4a4 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -16,9 +16,9 @@
libstagefright_foundation libjpeg libui libgui libcutils liblog \
libhidlbase libdatasource libaudioclient \
android.hardware.media.omx@1.0 \
- media_permission-aidl-cpp
+ framework-permission-aidl-cpp
-LOCAL_STATIC_LIBRARIES := media_permission-aidl-cpp
+LOCAL_STATIC_LIBRARIES := framework-permission-aidl-cpp
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -52,7 +52,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright libmedia liblog libutils libbinder \
libstagefright_foundation libdatasource libaudioclient \
- media_permission-aidl-cpp
+ framework-permission-aidl-cpp
LOCAL_C_INCLUDES:= \
frameworks/av/camera/include \
@@ -90,7 +90,7 @@
frameworks/av/media/libstagefright \
frameworks/native/include/media/openmax \
frameworks/native/include/media/hardware \
- media_permission-aidl-cpp
+ framework-permission-aidl-cpp
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -119,7 +119,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright libmedia liblog libutils libbinder \
libstagefright_foundation libaudioclient \
- media_permission-aidl-cpp
+ framework-permission-aidl-cpp
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index c86a611..4b41ff8 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -24,7 +24,7 @@
#include <utils/String16.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <binder/ProcessState.h>
#include <media/mediarecorder.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -39,7 +39,7 @@
using namespace android;
-using media::permission::Identity;
+using content::AttributionSourceState;
static void usage(const char* name)
{
@@ -113,10 +113,10 @@
audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
attr.source = AUDIO_SOURCE_MIC;
- // TODO b/182392769: use identity util
+ // TODO b/182392769: use attribution source util
source = new AudioSource(
&attr,
- Identity(),
+ AttributionSourceState(),
sampleRate,
channels);
} else {
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 9d9ed70..7caa457 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -262,6 +262,8 @@
kParamIndexTunneledMode, // struct
kParamIndexTunnelHandle, // int32[]
kParamIndexTunnelSystemTime, // int64
+ kParamIndexTunnelHoldRender, // bool
+ kParamIndexTunnelStartRender, // bool
// dmabuf allocator
kParamIndexStoreDmaBufUsage, // store, struct
@@ -2366,6 +2368,31 @@
C2PortTunnelSystemTime;
constexpr char C2_PARAMKEY_OUTPUT_RENDER_TIME[] = "output.render-time";
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * When a video frame is pushed to the decoder with this parameter set to true,
+ * the decoder must decode the frame, signal partial completion, and hold on the
+ * frame until C2StreamTunnelStartRender is set to true (which resets this
+ * flag). Flush will also result in the frames being returned back to the
+ * client (but not rendered).
+ */
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelHoldRender>
+ C2StreamTunnelHoldRender;
+constexpr char C2_PARAMKEY_TUNNEL_HOLD_RENDER[] = "output.tunnel-hold-render";
+
+/**
+ * Tunneled mode video peek signaling flag.
+ *
+ * Upon receiving this flag, the decoder shall set C2StreamTunnelHoldRender to
+ * false, which shall cause any frames held for rendering to be immediately
+ * displayed, regardless of their timestamps.
+*/
+typedef C2StreamParam<C2Info, C2EasyBoolValue, kParamIndexTunnelStartRender>
+ C2StreamTunnelStartRender;
+constexpr char C2_PARAMKEY_TUNNEL_START_RENDER[] = "output.tunnel-start-render";
+
C2ENUM(C2PlatformConfig::encoding_quality_level_t, uint32_t,
NONE,
S_HANDHELD,
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index ce15a30..16398a4 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -673,6 +673,10 @@
mCodec->mCallback->onOutputBuffersChanged();
}
+ void onFirstTunnelFrameReady() override {
+ mCodec->mCallback->onFirstTunnelFrameReady();
+ }
+
private:
CCodec *mCodec;
};
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3c3b41d..f88408e 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -209,6 +209,7 @@
int32_t flags = 0;
int32_t tmp = 0;
bool eos = false;
+ bool tunnelFirstFrame = false;
if (buffer->meta()->findInt32("eos", &tmp) && tmp) {
eos = true;
mInputMetEos = true;
@@ -217,6 +218,9 @@
if (buffer->meta()->findInt32("csd", &tmp) && tmp) {
flags |= C2FrameData::FLAG_CODEC_CONFIG;
}
+ if (buffer->meta()->findInt32("tunnel-first-frame", &tmp) && tmp) {
+ tunnelFirstFrame = true;
+ }
ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
std::list<std::unique_ptr<C2Work>> items;
std::unique_ptr<C2Work> work(new C2Work);
@@ -288,6 +292,13 @@
// TODO: fill info's
work->input.configUpdate = std::move(mParamsToBeSet);
+ if (tunnelFirstFrame) {
+ C2StreamTunnelHoldRender::input tunnelHoldRender{
+ 0u /* stream */,
+ C2_TRUE /* value */
+ };
+ work->input.configUpdate.push_back(C2Param::Copy(tunnelHoldRender));
+ }
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
@@ -1724,6 +1735,15 @@
}
break;
}
+ case C2StreamTunnelHoldRender::CORE_INDEX: {
+ C2StreamTunnelHoldRender::output firstTunnelFrameHoldRender;
+ if (!(worklet->output.flags & C2FrameData::FLAG_INCOMPLETE)) break;
+ if (!firstTunnelFrameHoldRender.updateFrom(*param)) break;
+ if (firstTunnelFrameHoldRender.value != C2_TRUE) break;
+ ALOGV("[%s] onWorkDone: first tunnel frame ready", mName);
+ mCCodecCallback->onFirstTunnelFrameReady();
+ break;
+ }
default:
ALOGV("[%s] onWorkDone: unrecognized config update (%08X)",
mName, param->index());
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 45da003..5a2aca2 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -45,6 +45,7 @@
virtual void onError(status_t err, enum ActionCode actionCode) = 0;
virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
virtual void onOutputBuffersChanged() = 0;
+ virtual void onFirstTunnelFrameReady() = 0;
};
/**
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 27e87e6..2df0ba2 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -938,6 +938,14 @@
return value == 0 ? C2_FALSE : C2_TRUE;
}));
+ add(ConfigMapper("android._trigger-tunnel-peek", C2_PARAMKEY_TUNNEL_START_RENDER, "value")
+ .limitTo(D::PARAM & D::VIDEO & D::DECODER)
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value = 0;
+ (void)v.get(&value);
+ return value == 0 ? C2_FALSE : C2_TRUE;
+ }));
+
/* still to do
constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
diff --git a/media/codecs/m4v_h263/dec/test/Android.bp b/media/codecs/m4v_h263/dec/test/Android.bp
index 6eed66f..d8de569 100644
--- a/media/codecs/m4v_h263/dec/test/Android.bp
+++ b/media/codecs/m4v_h263/dec/test/Android.bp
@@ -47,6 +47,10 @@
},
},
+ // this unit test also runs within the mainline tests (MTS),
+ // so it must be compatible back to Android Q/10 (sdk 29)
+ min_sdk_version: "29",
+
srcs: [
"Mpeg4H263DecoderTest.cpp",
],
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4fd3a56..443e26c 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -1700,17 +1700,17 @@
return ERROR_MALFORMED;
}
- size_t header_start = 0;
- size_t header_lenth = 0;
+ long header_start = 0;
+ long header_length = 0;
for (header_start = 0; header_start < frame.len - 4; header_start++) {
if (ntohl(0x000001b3) == *(uint32_t*)((uint8_t*)tmpData.get() + header_start)) {
break;
}
}
bool isComplete_csd = false;
- for (header_lenth = 0; header_lenth < frame.len - 4 - header_start; header_lenth++) {
+ for (header_length = 0; header_length < frame.len - 4 - header_start; header_length++) {
if (ntohl(0x000001b8) == *(uint32_t*)((uint8_t*)tmpData.get()
- + header_start + header_lenth)) {
+ + header_start + header_length)) {
isComplete_csd = true;
break;
}
@@ -1720,7 +1720,7 @@
return ERROR_MALFORMED;
}
addESDSFromCodecPrivate(trackInfo->mMeta, false,
- (uint8_t*)(tmpData.get()) + header_start, header_lenth);
+ (uint8_t*)(tmpData.get()) + header_start, header_length);
return OK;
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 5bbabdf..248a39c 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -504,7 +504,14 @@
}
mCurrentTimeUs = seekTimeUs;
- mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
+ int64_t seekTimeUsTimesBitrate;
+ if (__builtin_mul_overflow(seekTimeUs, bitrate, &seekTimeUsTimesBitrate)) {
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+ if (__builtin_add_overflow(
+ mFirstFramePos, seekTimeUsTimesBitrate / 8000000, &mCurrentPos)) {
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
seekCBR = true;
} else {
mCurrentTimeUs = actualSeekTimeUs;
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index fe2d98e..33a5c7f 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -52,6 +52,7 @@
"libcutils",
"libutils",
"libbinder",
+ "libpermission",
],
sanitize: {
@@ -86,7 +87,7 @@
export_header_lib_headers: ["libaaudio_headers"],
export_shared_lib_headers: [
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
shared_libs: [
@@ -99,15 +100,11 @@
"libcutils",
"libutils",
"libbinder",
+ "framework-permission-aidl-cpp",
"aaudio-aidl-cpp",
- "media_permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
- static_libs: [
- "media_permission-aidl-cpp",
- ],
-
cflags: [
"-Wno-unused-parameter",
"-Wall",
@@ -177,7 +174,7 @@
imports: [
"audio_common-aidl",
"shared-file-region-aidl",
- "media_permission-aidl",
+ "framework-permission-aidl"
],
backend:
{
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 5e0a4bb..8d90034 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -31,7 +31,7 @@
AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
mConfiguration(std::move(parcelable.params)),
- mIdentity(parcelable.identity),
+ mAttributionSource(parcelable.attributionSource),
mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
mInService(parcelable.inService) {
}
@@ -39,7 +39,7 @@
StreamRequest AAudioStreamRequest::parcelable() const {
StreamRequest result;
result.params = std::move(mConfiguration).parcelable();
- result.identity = mIdentity;
+ result.attributionSource = mAttributionSource;
result.sharingModeMatchRequired = mSharingModeMatchRequired;
result.inService = mInService;
return result;
@@ -50,7 +50,7 @@
}
void AAudioStreamRequest::dump() const {
- ALOGD("mIdentity = %s", mIdentity.toString().c_str());
+ ALOGD("mAttributionSource = %s", mAttributionSource.toString().c_str());
ALOGD("mSharingModeMatchRequired = %d", mSharingModeMatchRequired);
ALOGD("mInService = %d", mInService);
mConfiguration.dump();
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 02341c8..cc43a48 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -23,10 +23,12 @@
#include <aaudio/StreamRequest.h>
#include "binding/AAudioStreamConfiguration.h"
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
namespace aaudio {
+using android::content::AttributionSourceState;
+
class AAudioStreamRequest {
public:
AAudioStreamRequest() = default;
@@ -34,12 +36,12 @@
// Construct based on a parcelable representation.
explicit AAudioStreamRequest(const StreamRequest& parcelable);
- const android::media::permission::Identity &getIdentity() const {
- return mIdentity;
+ const AttributionSourceState &getAttributionSource() const {
+ return mAttributionSource;
}
- void setIdentity(const android::media::permission::Identity &identity) {
- mIdentity = identity;
+ void setAttributionSource(const AttributionSourceState &attributionSource) {
+ mAttributionSource = attributionSource;
}
bool isSharingModeMatchRequired() const {
@@ -75,7 +77,7 @@
private:
AAudioStreamConfiguration mConfiguration;
- android::media::permission::Identity mIdentity;
+ AttributionSourceState mAttributionSource;
bool mSharingModeMatchRequired = false;
bool mInService = false; // Stream opened by AAudioservice
};
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
index 12802e6..53787a0 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
@@ -17,11 +17,11 @@
package aaudio;
import aaudio.StreamParameters;
-import android.media.permission.Identity;
+import android.content.AttributionSourceState;
parcelable StreamRequest {
- StreamParameters params;
- Identity identity;
- boolean sharingModeMatchRequired; // = false;
- boolean inService; // = false; // Stream opened by AAudioservice
+ StreamParameters params;
+ AttributionSourceState attributionSource;
+ boolean sharingModeMatchRequired; // = false;
+ boolean inService; // = false; // Stream opened by AAudioservice
}
\ No newline at end of file
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 0a19d17..ebc9f2b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -166,6 +166,10 @@
? &mDataWriteCounter
: descriptor->writeCounterAddress;
+ // Clear buffer to avoid an initial glitch on some devices.
+ size_t bufferSizeBytes = descriptor->capacityInFrames * descriptor->bytesPerFrame;
+ memset(descriptor->dataAddress, 0, bufferSizeBytes);
+
mDataQueue = std::make_unique<FifoBufferIndirect>(
descriptor->bytesPerFrame,
descriptor->capacityInFrames,
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index d8b27c3..cf2abe8 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -51,7 +51,7 @@
using android::Mutex;
using android::WrappingBuffer;
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
using namespace aaudio;
@@ -108,15 +108,16 @@
// Request FLOAT for the shared mixer or the device.
request.getConfiguration().setFormat(AUDIO_FORMAT_PCM_FLOAT);
- // TODO b/182392769: use identity util
- Identity identity;
- identity.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
- identity.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
- identity.packageName = builder.getOpPackageName();
- identity.attributionTag = builder.getAttributionTag();
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.packageName = builder.getOpPackageName();
+ attributionSource.attributionTag = builder.getAttributionTag();
+ attributionSource.token = sp<android::BBinder>::make();
// Build the request to send to the server.
- request.setIdentity(identity);
+ request.setAttributionSource(attributionSource);
request.setSharingModeMatchRequired(isSharingModeMatchRequired());
request.setInService(isInService());
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 0d60120..acfac24 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -25,8 +25,7 @@
// TODO These defines should be moved to a central place in audio.
#define SAMPLES_PER_FRAME_MIN 1
-// TODO Remove 8 channel limitation.
-#define SAMPLES_PER_FRAME_MAX FCC_8
+#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
#define SAMPLE_RATE_HZ_MIN 8000
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 2135c54..e015592 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -43,8 +43,7 @@
// on the edge of being ridiculous.
// TODO These defines should be moved to a central place in audio.
#define SAMPLES_PER_FRAME_MIN 1
-// TODO Remove 8 channel limitation.
-#define SAMPLES_PER_FRAME_MAX FCC_8
+#define SAMPLES_PER_FRAME_MAX FCC_LIMIT
#define SAMPLE_RATE_HZ_MIN 8000
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index eca5392..dc66742 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -32,7 +32,7 @@
#include "utility/AudioClock.h"
#include "utility/FixedBlockWriter.h"
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
using namespace android;
using namespace aaudio;
@@ -157,12 +157,13 @@
.tags = ""
};
- // TODO b/182392769: use identity util
- Identity identity;
- identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
- identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
- identity.packageName = builder.getOpPackageName();
- identity.attributionTag = builder.getAttributionTag();
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.packageName = builder.getOpPackageName();
+ attributionSource.attributionTag = builder.getAttributionTag();
+ attributionSource.token = sp<BBinder>::make();
// ----------- open the AudioRecord ---------------------
// Might retry, but never more than once.
@@ -170,7 +171,7 @@
const audio_format_t requestedInternalFormat = getDeviceFormat();
mAudioRecord = new AudioRecord(
- identity
+ attributionSource
);
mAudioRecord->set(
AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 7d0a197..692651d 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -25,7 +25,7 @@
#include "AAudioLegacy.h"
#include "legacy/AudioStreamLegacy.h"
#include "utility/FixedBlockWriter.h"
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
namespace aaudio {
@@ -87,7 +87,7 @@
FixedBlockWriter mFixedBlockWriter;
// TODO add 64-bit position reporting to AudioRecord and use it.
- android::media::permission::Identity mIdentity;
+ android::content::AttributionSourceState mAttributionSource;
// Only one type of conversion buffer is used.
std::unique_ptr<float[]> mFormatConversionBufferFloat;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 04a9dec..1d412c0 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -33,7 +33,7 @@
using namespace android;
using namespace aaudio;
-using media::permission::Identity;
+using android::content::AttributionSourceState;
// Arbitrary and somewhat generous number of bursts.
#define DEFAULT_BURSTS_PER_BUFFER_CAPACITY 8
@@ -151,7 +151,7 @@
};
mAudioTrack = new AudioTrack();
- // TODO b/182392769: use identity util
+ // TODO b/182392769: use attribution source util
mAudioTrack->set(
AUDIO_STREAM_DEFAULT, // ignored because we pass attributes below
getSampleRate(),
@@ -167,7 +167,7 @@
sessionId,
streamTransferType,
NULL, // DEFAULT audio_offload_info_t
- Identity(), // DEFAULT uid and pid
+ AttributionSourceState(), // DEFAULT uid and pid
&attributes,
// WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
// headphones a few times.
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index c77aeeb..321e7f9 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -1182,7 +1182,7 @@
const media::AudioClient& aidl) {
AudioClient legacy;
legacy.clientTid = VALUE_OR_RETURN(aidl2legacy_int32_t_pid_t(aidl.clientTid));
- legacy.identity = aidl.identity;
+ legacy.attributionSource = aidl.attributionSource;
return legacy;
}
@@ -1190,7 +1190,7 @@
const AudioClient& legacy) {
media::AudioClient aidl;
aidl.clientTid = VALUE_OR_RETURN(legacy2aidl_pid_t_int32_t(legacy.clientTid));
- aidl.identity = legacy.identity;
+ aidl.attributionSource = legacy.attributionSource;
return aidl;
}
@@ -2323,4 +2323,28 @@
return unexpected(BAD_VALUE);
}
+ConversionResult<TrackSecondaryOutputInfoPair>
+aidl2legacy_TrackSecondaryOutputInfo_TrackSecondaryOutputInfoPair(
+ const media::TrackSecondaryOutputInfo& aidl) {
+ TrackSecondaryOutputInfoPair trackSecondaryOutputInfoPair;
+ trackSecondaryOutputInfoPair.first =
+ VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
+ trackSecondaryOutputInfoPair.second =
+ VALUE_OR_RETURN(convertContainer<std::vector<audio_port_handle_t>>(
+ aidl.secondaryOutputIds, aidl2legacy_int32_t_audio_io_handle_t));
+ return trackSecondaryOutputInfoPair;
+}
+
+ConversionResult<media::TrackSecondaryOutputInfo>
+legacy2aidl_TrackSecondaryOutputInfoPair_TrackSecondaryOutputInfo(
+ const TrackSecondaryOutputInfoPair& legacy) {
+ media::TrackSecondaryOutputInfo trackSecondaryOutputInfo;
+ trackSecondaryOutputInfo.portId =
+ VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.first));
+ trackSecondaryOutputInfo.secondaryOutputIds =
+ VALUE_OR_RETURN(convertContainer<std::vector<int32_t>>(
+ legacy.second, legacy2aidl_audio_io_handle_t_int32_t));
+ return trackSecondaryOutputInfo;
+}
+
} // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 19d68a0..9c307ff 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -132,12 +132,12 @@
"libshmemcompat",
"libutils",
"libvibrator",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libbinder",
],
@@ -164,7 +164,6 @@
// for memory heap analysis
"libc_malloc_debug_backtrace",
"shared-file-region-aidl-cpp",
- "media_permission-aidl-cpp",
],
cflags: [
"-Wall",
@@ -232,7 +231,7 @@
"libshmemcompat",
"libutils",
"shared-file-region-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
"audioclient-types-aidl-cpp",
@@ -351,10 +350,11 @@
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
+ "aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
"audio_common-aidl",
- "media_permission-aidl",
+ "framework-permission-aidl",
],
backend: {
cpp: {
@@ -436,7 +436,7 @@
"av-types-aidl",
"effect-aidl",
"shared-file-region-aidl",
- "media_permission-aidl",
+ "framework-permission-aidl",
],
double_loadable: true,
backend: {
@@ -461,7 +461,6 @@
"aidl/android/media/GetOutputForAttrResponse.aidl",
"aidl/android/media/Int.aidl",
"aidl/android/media/RecordClientInfo.aidl",
-
"aidl/android/media/IAudioPolicyService.aidl",
"aidl/android/media/IAudioPolicyServiceClient.aidl",
],
@@ -470,8 +469,9 @@
"audioclient-types-aidl",
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
- "media_permission-aidl",
+ "framework-permission-aidl",
],
+
double_loadable: true,
backend: {
cpp: {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index d5047b1..6ad5483 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -42,7 +42,6 @@
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
using media::IAudioPolicyService;
-using media::permission::Identity;
namespace {
@@ -58,8 +57,8 @@
// ---------------------------------------------------------------------------
-AudioEffect::AudioEffect(const Identity& identity)
- : mClientIdentity(identity)
+AudioEffect::AudioEffect(const android::content::AttributionSourceState& attributionSource)
+ : mClientAttributionSource(attributionSource)
{
}
@@ -108,12 +107,12 @@
mDescriptor.type = *(type != NULL ? type : EFFECT_UUID_NULL);
mDescriptor.uuid = *(uuid != NULL ? uuid : EFFECT_UUID_NULL);
- // TODO b/182392769: use identity util
+ // TODO b/182392769: use attribution source util
mIEffectClient = new EffectClient(this);
pid_t pid = IPCThreadState::self()->getCallingPid();
- mClientIdentity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
+ mClientAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
pid_t uid = IPCThreadState::self()->getCallingUid();
- mClientIdentity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
+ mClientAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(uid));
media::CreateEffectRequest request;
request.desc = VALUE_OR_RETURN_STATUS(
@@ -123,7 +122,7 @@
request.output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(io));
request.sessionId = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(mSessionId));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
- request.identity = mClientIdentity;
+ request.attributionSource = mClientAttributionSource;
request.probe = probe;
media::CreateEffectResponse response;
@@ -178,7 +177,7 @@
IInterface::asBinder(iEffect)->linkToDeath(mIEffectClient);
ALOGV("set() %p OK effect: %s id: %d status %d enabled %d pid %d", this, mDescriptor.name, mId,
- mStatus, mEnabled, mClientIdentity.pid);
+ mStatus, mEnabled, mClientAttributionSource.pid);
if (!audio_is_global_session(mSessionId)) {
AudioSystem::acquireAudioSessionId(mSessionId, pid, uid);
@@ -223,7 +222,7 @@
if (!mProbe && (mStatus == NO_ERROR || mStatus == ALREADY_EXISTS)) {
if (!audio_is_global_session(mSessionId)) {
AudioSystem::releaseAudioSessionId(mSessionId,
- VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientIdentity.pid)));
+ VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid)));
}
if (mIEffect != NULL) {
mIEffect->disconnect();
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 1a4bde9..a1d3bdb 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -39,7 +39,7 @@
namespace android {
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
using aidl_utils::statusTFromBinderStatus;
// ---------------------------------------------------------------------------
@@ -126,11 +126,11 @@
return NO_ERROR;
}
-AudioRecord::AudioRecord(const Identity &client)
- : mActive(false), mStatus(NO_INIT), mClientIdentity(client), mSessionId(AUDIO_SESSION_ALLOCATE),
- mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
+AudioRecord::AudioRecord(const AttributionSourceState &client)
+ : mActive(false), mStatus(NO_INIT), mClientAttributionSource(client),
+ mSessionId(AUDIO_SESSION_ALLOCATE), mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+ mPreviousSchedulingGroup(SP_DEFAULT), mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE), mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
mSelectedMicFieldDimension(MIC_FIELD_DIMENSION_DEFAULT)
{
}
@@ -140,7 +140,7 @@
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- const Identity& client,
+ const AttributionSourceState& client,
size_t frameCount,
callback_t cbf,
void* user,
@@ -154,14 +154,14 @@
float microphoneFieldDimension)
: mActive(false),
mStatus(NO_INIT),
- mClientIdentity(client),
+ mClientAttributionSource(client),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mProxy(NULL)
{
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientIdentity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientIdentity.pid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
(void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
@@ -191,7 +191,7 @@
IPCThreadState::self()->flushCommands();
ALOGV("%s(%d): releasing session id %d",
__func__, mPortId, mSessionId);
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientIdentity.pid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
AudioSystem::releaseAudioSessionId(mSessionId, pid);
}
}
@@ -243,11 +243,11 @@
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
- "notificationFrames %u, sessionId %d, transferType %d, flags %#x, identity %s"
+ "notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
"uid %d, pid %d",
__func__,
inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
- sessionId, transferType, flags, mClientIdentity.toString().c_str(), uid, pid);
+ sessionId, transferType, flags, mClientAttributionSource.toString().c_str(), uid, pid);
// TODO b/182392553: refactor or remove
pid_t callingPid = IPCThreadState::self()->getCallingPid();
@@ -256,13 +256,13 @@
if (pid == -1 || (callingPid != myPid)) {
adjPid = callingPid;
}
- mClientIdentity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(adjPid));
+ mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(adjPid));
uid_t adjUid = uid;
if (uid == -1 || (callingPid != myPid)) {
adjUid = IPCThreadState::self()->getCallingUid();
}
- mClientIdentity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(adjUid));
+ mClientAttributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(adjUid));
mTracker.reset(new RecordingActivityTracker());
@@ -801,7 +801,7 @@
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
- input.clientInfo.identity = mClientIdentity;
+ input.clientInfo.attributionSource = mClientAttributionSource;
input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (mAudioRecordThread != 0) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 0bc592d..88e752b 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -52,7 +52,7 @@
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
using media::IAudioPolicyService;
-using media::permission::Identity;
+using android::content::AttributionSourceState;
// client singleton for AudioFlinger binder interface
Mutex AudioSystem::gLock;
@@ -71,6 +71,25 @@
Mutex gSoundTriggerCaptureStateListenerLock;
sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
+// Binder for the AudioFlinger service that's passed to this client process from the system server.
+// This allows specific isolated processes to access the audio system. Currently used only for the
+// HotwordDetectionService.
+sp<IBinder> gAudioFlingerBinder = nullptr;
+
+void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
+ if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
+ ALOGE("setAudioFlingerBinder: received a binder of type %s",
+ String8(audioFlinger->getInterfaceDescriptor()).string());
+ return;
+ }
+ Mutex::Autolock _l(gLock);
+ if (gAudioFlinger != nullptr) {
+ ALOGW("setAudioFlingerBinder: ignoring; AudioFlinger connection already established.");
+ return;
+ }
+ gAudioFlingerBinder = audioFlinger;
+}
+
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger() {
sp<IAudioFlinger> af;
@@ -79,15 +98,19 @@
{
Mutex::Autolock _l(gLock);
if (gAudioFlinger == 0) {
- sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
- do {
- binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
- if (binder != 0)
- break;
- ALOGW("AudioFlinger not published, waiting...");
- usleep(500000); // 0.5 s
- } while (true);
+ if (gAudioFlingerBinder != nullptr) {
+ binder = gAudioFlingerBinder;
+ } else {
+ sp<IServiceManager> sm = defaultServiceManager();
+ do {
+ binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
+ if (binder != 0)
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while (true);
+ }
if (gAudioFlingerClient == NULL) {
gAudioFlingerClient = new AudioFlingerClient();
} else {
@@ -941,7 +964,7 @@
audio_io_handle_t* output,
audio_session_t session,
audio_stream_type_t* stream,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_t* config,
audio_output_flags_t flags,
audio_port_handle_t* selectedDeviceId,
@@ -984,7 +1007,7 @@
media::GetOutputForAttrResponse responseAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
- aps->getOutputForAttr(attrAidl, sessionAidl, identity, configAidl, flagsAidl,
+ aps->getOutputForAttr(attrAidl, sessionAidl, attributionSource, configAidl, flagsAidl,
selectedDeviceIdAidl, &responseAidl)));
*output = VALUE_OR_RETURN_STATUS(
@@ -1038,7 +1061,7 @@
audio_io_handle_t* input,
audio_unique_id_t riid,
audio_session_t session,
- const Identity &identity,
+ const AttributionSourceState &attributionSource,
const audio_config_base_t* config,
audio_input_flags_t flags,
audio_port_handle_t* selectedDeviceId,
@@ -1077,7 +1100,7 @@
media::GetInputForAttrResponse response;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
- aps->getInputForAttr(attrAidl, inputAidl, riidAidl, sessionAidl, identity,
+ aps->getInputForAttr(attrAidl, inputAidl, riidAidl, sessionAidl, attributionSource,
configAidl, flagsAidl, selectedDeviceIdAidl, &response)));
*input = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.input));
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 1bc3baa..6765bdb 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -48,7 +48,7 @@
// ---------------------------------------------------------------------------
using media::VolumeShaper;
-using media::permission::Identity;
+using android::content::AttributionSourceState;
// TODO: Move to a separate .h
@@ -225,11 +225,11 @@
return NO_ERROR;
}
-AudioTrack::AudioTrack() : AudioTrack(Identity())
+AudioTrack::AudioTrack() : AudioTrack(AttributionSourceState())
{
}
-AudioTrack::AudioTrack(const Identity& identity)
+AudioTrack::AudioTrack(const AttributionSourceState& attributionSource)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -237,7 +237,7 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mClientIdentity(identity),
+ mClientAttributionSource(attributionSource),
mAudioTrackCallback(new AudioTrackCallback())
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
@@ -259,7 +259,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
@@ -275,8 +275,8 @@
(void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
- 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
- offloadInfo, identity, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+ 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+ attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}
AudioTrack::AudioTrack(
@@ -292,7 +292,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed)
@@ -309,7 +309,7 @@
(void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
- identity, pAttributes, doNotReconnect, maxRequiredSpeed);
+ attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
}
AudioTrack::~AudioTrack()
@@ -335,7 +335,7 @@
mCblkMemory.clear();
mSharedBuffer.clear();
IPCThreadState::self()->flushCommands();
- pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientIdentity.pid));
+ pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
__func__, mPortId,
mSessionId, IPCThreadState::self()->getCallingPid(), clientPid);
@@ -381,7 +381,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
@@ -391,15 +391,15 @@
uint32_t channelCount;
pid_t callingPid;
pid_t myPid;
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
__func__,
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
- sessionId, transferType, identity.uid, identity.pid);
+ sessionId, transferType, attributionSource.uid, attributionSource.pid);
mThreadCanCallJava = threadCanCallJava;
mSelectedDeviceId = selectedDeviceId;
@@ -596,18 +596,15 @@
}
mNotificationFramesAct = 0;
// TODO b/182392553: refactor or remove
+ mClientAttributionSource = AttributionSourceState(attributionSource);
callingPid = IPCThreadState::self()->getCallingPid();
myPid = getpid();
if (uid == -1 || (callingPid != myPid)) {
- mClientIdentity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
+ mClientAttributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
IPCThreadState::self()->getCallingUid()));
- } else {
- mClientIdentity.uid = identity.uid;
}
if (pid == (pid_t)-1 || (callingPid != myPid)) {
- mClientIdentity.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
- } else {
- mClientIdentity.pid = identity.pid;
+ mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
}
mAuxEffectId = 0;
mOrigFlags = mFlags = flags;
@@ -692,13 +689,14 @@
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
{
- Identity identity;
- identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
- identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
+ AttributionSourceState attributionSource;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
+ attributionSource.token = sp<BBinder>::make();
return set(streamType, sampleRate, format,
static_cast<audio_channel_mask_t>(channelMask),
frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
- threadCanCallJava, sessionId, transferType, offloadInfo, identity,
+ threadCanCallJava, sessionId, transferType, offloadInfo, attributionSource,
pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}
@@ -1700,7 +1698,7 @@
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.config.offload_info = mOffloadInfoCopy;
- input.clientInfo.identity = mClientIdentity;
+ input.clientInfo.attributionSource = mClientAttributionSource;
input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
// It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 389b73f..0564cdf 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -743,6 +743,16 @@
return statusTFromBinderStatus(mDelegate->setVibratorInfos(vibratorInfos));
}
+status_t AudioFlingerClientAdapter::updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) {
+ std::vector<media::TrackSecondaryOutputInfo> trackSecondaryOutputInfos =
+ VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<media::TrackSecondaryOutputInfo>>(
+ trackSecondaryOutputs,
+ legacy2aidl_TrackSecondaryOutputInfoPair_TrackSecondaryOutputInfo));
+ return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
@@ -1199,4 +1209,13 @@
return Status::fromStatusT(mDelegate->setVibratorInfos(vibratorInfos));
}
+Status AudioFlingerServerAdapter::updateSecondaryOutputs(
+ const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) {
+ TrackSecondaryOutputsMap trackSecondaryOutputs =
+ VALUE_OR_RETURN_BINDER(convertContainer<TrackSecondaryOutputsMap>(
+ trackSecondaryOutputInfos,
+ aidl2legacy_TrackSecondaryOutputInfo_TrackSecondaryOutputInfoPair));
+ return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
+}
+
} // namespace android
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 451c4b1..e5e8496 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -27,7 +27,7 @@
namespace android {
-using media::permission::Identity;
+using android::content::AttributionSourceState;
// Descriptors for all available tones (See ToneGenerator::ToneDescriptor class declaration for details)
const ToneGenerator::ToneDescriptor ToneGenerator::sToneDescriptors[] = {
@@ -1260,10 +1260,11 @@
////////////////////////////////////////////////////////////////////////////////
bool ToneGenerator::initAudioTrack() {
// Open audio track in mono, PCM 16bit, default sampling rate.
- // TODO b/182392769: use identity util
- Identity identity = Identity();
- identity.packageName = mOpPackageName;
- mpAudioTrack = new AudioTrack(identity);
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource = AttributionSourceState();
+ attributionSource.packageName = mOpPackageName;
+ attributionSource.token = sp<BBinder>::make();
+ mpAudioTrack = new AudioTrack(attributionSource);
ALOGV("AudioTrack(%p) created", mpAudioTrack.get());
audio_attributes_t attr;
@@ -1289,7 +1290,7 @@
AUDIO_SESSION_ALLOCATE,
AudioTrack::TRANSFER_CALLBACK,
nullptr,
- identity,
+ attributionSource,
&attr);
// Set caller name so it can be logged in destructor.
// MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_TONEGENERATOR
diff --git a/media/libaudioclient/aidl/android/media/AudioClient.aidl b/media/libaudioclient/aidl/android/media/AudioClient.aidl
index aa4d8f5..e98fed3 100644
--- a/media/libaudioclient/aidl/android/media/AudioClient.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioClient.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.permission.Identity;
+import android.content.AttributionSourceState;
/**
* {@hide}
@@ -24,5 +24,5 @@
parcelable AudioClient {
/** Interpreted as pid_t. */
int clientTid;
- Identity identity;
+ AttributionSourceState attributionSource;
}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 5737fcd..2d274f4 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -19,7 +19,7 @@
import android.media.AudioDevice;
import android.media.EffectDescriptor;
import android.media.IEffectClient;
-import android.media.permission.Identity;
+import android.content.AttributionSourceState;
/**
* Input arguments of the createEffect() method.
@@ -35,6 +35,6 @@
/** Interpreted as audio_session_t. */
int sessionId;
AudioDevice device;
- Identity identity;
+ AttributionSourceState attributionSource;
boolean probe;
}
diff --git a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
index 5b26d22..7e3c240 100644
--- a/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateRecordRequest.aidl
@@ -19,7 +19,6 @@
import android.media.AudioAttributesInternal;
import android.media.AudioClient;
import android.media.AudioConfigBase;
-import android.media.permission.Identity;
/**
* CreateRecordRequest contains all input arguments sent by AudioRecord to AudioFlinger
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index abbced5..d2cae6d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -40,6 +40,7 @@
import android.media.IAudioTrack;
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
+import android.media.TrackSecondaryOutputInfo;
import android.media.audio.common.AudioFormat;
/**
@@ -207,4 +208,9 @@
// Set vibrators' information.
// The value will be used to initialize HapticGenerator.
void setVibratorInfos(in AudioVibratorInfo[] vibratorInfos);
+
+ // Update secondary outputs.
+ // This usually happens when there is a dynamic policy registered.
+ void updateSecondaryOutputs(
+ in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index f8924f3..65bcd82 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.content.AttributionSourceState;
+
import android.media.audio.common.AudioFormat;
import android.media.AudioAttributesEx;
@@ -48,7 +50,6 @@
import android.media.IAudioPolicyServiceClient;
import android.media.ICaptureStateListener;
import android.media.Int;
-import android.media.permission.Identity;
import android.media.SoundTriggerSession;
/**
@@ -81,7 +82,7 @@
GetOutputForAttrResponse getOutputForAttr(in AudioAttributesInternal attr,
int /* audio_session_t */ session,
- in Identity identity,
+ in AttributionSourceState attributionSource,
in AudioConfig config,
int /* Bitmask, indexed by AudioOutputFlags */ flags,
int /* audio_port_handle_t */ selectedDeviceId);
@@ -96,7 +97,7 @@
int /* audio_io_handle_t */ input,
int /* audio_unique_id_t */ riid,
int /* audio_session_t */ session,
- in Identity identity,
+ in AttributionSourceState attributionSource,
in AudioConfigBase config,
int /* Bitmask, indexed by AudioInputFlags */ flags,
int /* audio_port_handle_t */ selectedDeviceId);
diff --git a/media/libaudioclient/aidl/android/media/TrackSecondaryOutputInfo.aidl b/media/libaudioclient/aidl/android/media/TrackSecondaryOutputInfo.aidl
new file mode 100644
index 0000000..113328e
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/TrackSecondaryOutputInfo.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * This is a class that contains port handle for a track and handles for all secondary
+ * outputs of the track.
+ * @hide
+ */
+parcelable TrackSecondaryOutputInfo {
+ int portId; // audio_port_handle_t
+ int[] secondaryOutputIds; // audio_io_handle_t[]
+}
\ No newline at end of file
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index 21e25b9..b290aa8 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -65,7 +65,7 @@
"libutils",
"libxml2",
"mediametricsservice-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
header_libs: [
"libaudiofoundation_headers",
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index 1b75917..d03c6fa 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -23,7 +23,7 @@
*/
#include <android_audio_policy_configuration_V7_0-enums.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
#include <media/AidlConversion.h>
@@ -48,7 +48,7 @@
using namespace ::android::audio::policy::configuration::V7_0;
}
-using media::permission::Identity;
+using android::content::AttributionSourceState;
constexpr audio_unique_id_use_t kUniqueIds[] = {
AUDIO_UNIQUE_ID_USE_UNSPECIFIED, AUDIO_UNIQUE_ID_USE_SESSION, AUDIO_UNIQUE_ID_USE_MODULE,
@@ -225,15 +225,16 @@
attributes.usage = usage;
sp<AudioTrack> track = new AudioTrack();
- // TODO b/182392769: use identity util
- Identity i;
- i.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
- i.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
nullptr, notificationFrames, sharedBuffer, false, sessionId,
((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
: AudioTrack::TRANSFER_DEFAULT,
- offload ? &offloadInfo : nullptr, i, &attributes, false, 1.0f,
+ offload ? &offloadInfo : nullptr, attributionSource, &attributes, false, 1.0f,
AUDIO_PORT_HANDLE_NONE);
status_t status = track->initCheck();
@@ -308,10 +309,11 @@
attributes.source = inputSource;
- // TODO b/182392769: use identity util
- Identity i;
- i.packageName = std::string(mFdp.ConsumeRandomLengthString().c_str());
- sp<AudioRecord> record = new AudioRecord(i);
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = std::string(mFdp.ConsumeRandomLengthString().c_str());
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioRecord> record = new AudioRecord(attributionSource);
record->set(AUDIO_SOURCE_DEFAULT, sampleRate, format, channelMask, frameCount, nullptr, nullptr,
notificationFrames, false, sessionId,
fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT, flags,
@@ -418,9 +420,9 @@
request.output = io;
request.sessionId = sessionId;
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
- // TODO b/182392769: use identity util
- request.identity.packageName = opPackageName;
- request.identity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(getpid()));
+ // TODO b/182392769: use attribution source util
+ request.attributionSource.packageName = opPackageName;
+ request.attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(getpid()));
request.probe = false;
media::CreateEffectResponse response{};
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 1dd9d60..4ec69c7 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -50,6 +50,7 @@
#include <android/media/AudioUniqueIdUse.h>
#include <android/media/EffectDescriptor.h>
#include <android/media/ExtraAudioDescriptor.h>
+#include <android/media/TrackSecondaryOutputInfo.h>
#include <android/media/SharedFileRegion.h>
#include <binder/IMemory.h>
@@ -407,6 +408,13 @@
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
const audio_encapsulation_type_t & legacy);
+using TrackSecondaryOutputInfoPair = std::pair<audio_port_handle_t, std::vector<audio_io_handle_t>>;
+ConversionResult<TrackSecondaryOutputInfoPair>
+aidl2legacy_TrackSecondaryOutputInfo_TrackSecondaryOutputInfoPair(
+ const media::TrackSecondaryOutputInfo& aidl);
+ConversionResult<media::TrackSecondaryOutputInfo>
+legacy2aidl_TrackSecondaryOutputInfoPair_TrackSecondaryOutputInfo(
+ const TrackSecondaryOutputInfoPair& legacy);
} // namespace android
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 295fd4f..3be8ce2 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -19,7 +19,7 @@
#define ANDROID_AUDIO_CLIENT_H
#include <sys/types.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
namespace android {
@@ -29,7 +29,7 @@
clientTid(-1) {}
pid_t clientTid;
- android::media::permission::Identity identity;
+ android::content::AttributionSourceState attributionSource;
};
}; // namespace android
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 8e446ea..5dfe5fc 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -29,6 +29,8 @@
using AttributesVector = std::vector<audio_attributes_t>;
using StreamTypeVector = std::vector<audio_stream_type_t>;
+using TrackSecondaryOutputsMap = std::map<audio_port_handle_t, std::vector<audio_io_handle_t>>;
+
constexpr bool operator==(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
{
return lhs.usage == rhs.usage && lhs.content_type == rhs.content_type &&
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 974ce62..3c19ec1 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -23,7 +23,7 @@
#include <media/IAudioFlinger.h>
#include <media/AudioSystem.h>
#include <system/audio_effect.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <utils/RefBase.h>
#include <utils/Errors.h>
@@ -337,9 +337,9 @@
*
* Parameters:
*
- * client: Identity for app-op checks
+ * client: Attribution source for app-op checks
*/
- explicit AudioEffect(const media::permission::Identity& client);
+ explicit AudioEffect(const android::content::AttributionSourceState& client);
/* Terminates the AudioEffect and unregisters it from AudioFlinger.
* The effect engine is also destroyed if this AudioEffect was the last controlling
@@ -531,7 +531,7 @@
static const uint32_t kMaxPreProcessing = 10;
protected:
- media::permission::Identity mClientIdentity; // Identity used for app op checks.
+ android::content::AttributionSourceState mClientAttributionSource; // source for app op checks.
bool mEnabled = false; // enable state
audio_session_t mSessionId = AUDIO_SESSION_OUTPUT_MIX; // audio session ID
int32_t mPriority = 0; // priority for effect control
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 9965e25..326919a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -32,7 +32,7 @@
#include <utils/threads.h>
#include "android/media/IAudioRecord.h"
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
namespace android {
@@ -149,9 +149,9 @@
*
* Parameters:
*
- * clientIdentity: The identity of the owner of the record
+ * client: The attribution source of the owner of the record
*/
- AudioRecord(const media::permission::Identity& clientIdentity);
+ AudioRecord(const android::content::AttributionSourceState& client);
/* Creates an AudioRecord object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
@@ -164,7 +164,7 @@
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
* channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
- * client: The identity of the owner of the record
+ * client: The attribution source of the owner of the record
* frameCount: Minimum size of track PCM buffer in frames. This defines the
* application's contribution to the
* latency of the track. The actual size selected by the AudioRecord could
@@ -187,7 +187,7 @@
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- const media::permission::Identity& clientIdentity,
+ const android::content::AttributionSourceState& client,
size_t frameCount = 0,
callback_t cbf = NULL,
void* user = NULL,
@@ -696,7 +696,7 @@
status_t mStatus;
- media::permission::Identity mClientIdentity; // The identity of the owner of this record
+ android::content::AttributionSourceState mClientAttributionSource; // Owner's attribution source
size_t mFrameCount; // corresponds to current IAudioRecord, value is
// reported back by AudioFlinger to the client
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 4c99dbd..a9109c8 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -22,7 +22,7 @@
#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerClient.h>
#include <android/media/BnAudioPolicyServiceClient.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <media/AidlConversionUtil.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
@@ -38,6 +38,8 @@
#include <utils/Mutex.h>
#include <vector>
+using android::content::AttributionSourceState;
+
namespace android {
struct record_client_info {
@@ -146,6 +148,11 @@
static void setRecordConfigCallback(record_config_callback);
static void setRoutingCallback(routing_callback cb);
+ // Sets the binder to use for accessing the AudioFlinger service. This enables the system server
+ // to grant specific isolated processes access to the audio system. Currently used only for the
+ // HotwordDetectionService.
+ static void setAudioFlingerBinder(const sp<IBinder>& audioFlinger);
+
// helper function to obtain AudioFlinger service handle
static const sp<IAudioFlinger> get_audio_flinger();
@@ -264,7 +271,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -280,7 +287,7 @@
audio_io_handle_t *input,
audio_unique_id_t riid,
audio_session_t session,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index c293343..f61eef2 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -26,7 +26,7 @@
#include <media/Modulo.h>
#include <media/VolumeShaper.h>
#include <utils/threads.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <string>
@@ -36,6 +36,8 @@
namespace android {
+using content::AttributionSourceState;
+
// ----------------------------------------------------------------------------
struct audio_track_cblk_t;
@@ -182,7 +184,7 @@
*/
AudioTrack();
- AudioTrack(const media::permission::Identity& identity);
+ AudioTrack(const AttributionSourceState& attributionSourceState);
/* Creates an AudioTrack object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
@@ -230,7 +232,8 @@
* transferType: How data is transferred to AudioTrack.
* offloadInfo: If not NULL, provides offload parameters for
* AudioSystem::getOutputForAttr().
- * identity: The identity of the app which initiallly requested this AudioTrack.
+ * attributionSource: The attribution source of the app which initially requested this
+ * AudioTrack.
* Includes the UID and PID for power management tracking, or -1 for
* current user/process ID, plus the package name.
* pAttributes: If not NULL, supersedes streamType for use case selection.
@@ -259,8 +262,8 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- const media::permission::Identity& identity =
- media::permission::Identity(),
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
@@ -290,8 +293,8 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- const media::permission::Identity& identity =
- media::permission::Identity(),
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f);
@@ -338,8 +341,8 @@
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
- const media::permission::Identity& identity =
- media::permission::Identity(),
+ const AttributionSourceState& attributionSource =
+ AttributionSourceState(),
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
@@ -1347,7 +1350,7 @@
sp<DeathNotifier> mDeathNotifier;
uint32_t mSequence; // incremented for each new IAudioTrack attempt
- media::permission::Identity mClientIdentity;
+ AttributionSourceState mClientAttributionSource;
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 3a04569..327b37e 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -26,19 +26,21 @@
#include <binder/IInterface.h>
#include <media/AidlConversion.h>
#include <media/AudioClient.h>
+#include <media/AudioCommonTypes.h>
#include <media/DeviceDescriptorBase.h>
#include <system/audio.h>
#include <system/audio_effect.h>
#include <system/audio_policy.h>
#include <utils/String8.h>
#include <media/MicrophoneInfo.h>
+#include <map>
#include <string>
#include <vector>
#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerService.h>
#include <android/media/BpAudioFlingerService.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include "android/media/CreateEffectRequest.h"
#include "android/media/CreateEffectResponse.h"
#include "android/media/CreateRecordRequest.h"
@@ -55,6 +57,7 @@
#include "android/media/OpenInputResponse.h"
#include "android/media/OpenOutputRequest.h"
#include "android/media/OpenOutputResponse.h"
+#include "android/media/TrackSecondaryOutputInfo.h"
namespace android {
@@ -128,7 +131,6 @@
audio_attributes_t attr;
audio_config_base_t config;
AudioClient clientInfo;
- media::permission::Identity identity;
audio_unique_id_t riid;
int32_t maxSharedAudioHistoryMs;
@@ -338,6 +340,9 @@
// The values will be used to initialize HapticGenerator.
virtual status_t setVibratorInfos(
const std::vector<media::AudioVibratorInfo>& vibratorInfos) = 0;
+
+ virtual status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
};
/**
@@ -430,6 +435,8 @@
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
+ status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -513,6 +520,7 @@
SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
+ UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
};
/**
@@ -619,6 +627,8 @@
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
+ Status updateSecondaryOutputs(
+ const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 3c1da4d..def7ca6 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -37,6 +37,7 @@
"libmediametrics_headers",
],
shared_libs: [
+ "framework-permission-aidl-cpp",
"libaudioclient",
"libbinder",
"libcutils",
@@ -59,6 +60,7 @@
"libbinder",
"libcutils",
"libutils",
+ "framework-permission-aidl-cpp",
],
data: ["record_test_input_*.txt"],
}
diff --git a/media/libaudioclient/tests/test_create_audiorecord.cpp b/media/libaudioclient/tests/test_create_audiorecord.cpp
index 57676c1..1cbcb71 100644
--- a/media/libaudioclient/tests/test_create_audiorecord.cpp
+++ b/media/libaudioclient/tests/test_create_audiorecord.cpp
@@ -19,7 +19,7 @@
#include <string.h>
#include <unistd.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryDealer.h>
#include <binder/MemoryHeapBase.h>
@@ -33,7 +33,7 @@
namespace android {
-using media::permission::Identity;
+using android::content::AttributionSourceState;
int testRecord(FILE *inputFile, int outputFileFd)
{
@@ -41,16 +41,17 @@
uint32_t testCount = 0;
Vector<String16> args;
int ret = 0;
- // TODO b/182392769: use identity util
- Identity identity;
- identity.packageName = PACKAGE_NAME;
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = std::string(PACKAGE_NAME);
+ attributionSource.token = sp<BBinder>::make();
if (inputFile == nullptr) {
sp<AudioRecord> record = new AudioRecord(AUDIO_SOURCE_DEFAULT,
0 /* sampleRate */,
AUDIO_FORMAT_DEFAULT,
AUDIO_CHANNEL_IN_MONO,
- identity);
+ attributionSource);
if (record == 0 || record->initCheck() != NO_ERROR) {
write(outputFileFd, "Error creating AudioRecord\n",
sizeof("Error creating AudioRecord\n"));
@@ -96,7 +97,7 @@
memset(&attributes, 0, sizeof(attributes));
attributes.source = inputSource;
- sp<AudioRecord> record = new AudioRecord(identity);
+ sp<AudioRecord> record = new AudioRecord(attributionSource);
record->set(AUDIO_SOURCE_DEFAULT,
sampleRate,
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
index a54e22f..f30eb54 100644
--- a/media/libaudioprocessing/AudioMixerBase.cpp
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "AudioMixer"
//#define LOG_NDEBUG 0
+#include <array>
#include <sstream>
#include <string.h>
@@ -1295,8 +1296,29 @@
// Needs to derive a compile time constant (constexpr). Could be targeted to go
// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
- (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+constexpr int MIXTYPE_MONOVOL(int mixtype, int channels) {
+ if (channels <= FCC_2) {
+ return mixtype;
+ } else if (mixtype == MIXTYPE_MULTI) {
+ return MIXTYPE_MULTI_MONOVOL;
+ } else if (mixtype == MIXTYPE_MULTI_SAVEONLY) {
+ return MIXTYPE_MULTI_SAVEONLY_MONOVOL;
+ } else {
+ return mixtype;
+ }
+}
+
+// Helper to make a functional array from volumeRampMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+ std::size_t ... Is>
+static constexpr auto makeVRMArray(std::index_sequence<Is...>)
+{
+ using F = void(*)(TO*, size_t, const TI*, TA*, TV*, const TV*, TAV*, TAV);
+ return std::array<F, sizeof...(Is)>{
+ { &volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ...}
+ };
+}
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
@@ -1308,40 +1330,26 @@
static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
{
- switch (channels) {
- case 1:
- volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 2:
- volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 3:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 4:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 5:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 6:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 7:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 8:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
+ static constexpr auto volumeRampMultiArray =
+ makeVRMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+ if (channels > 0 && channels <= volumeRampMultiArray.size()) {
+ volumeRampMultiArray[channels - 1](out, frameCount, in, aux, vol, volinc, vola, volainc);
+ } else {
+ ALOGE("%s: invalid channel count:%d", __func__, channels);
}
}
+// Helper to make a functional array from volumeMulti.
+template <int MIXTYPE, typename TO, typename TI, typename TV, typename TA, typename TAV,
+ std::size_t ... Is>
+static constexpr auto makeVMArray(std::index_sequence<Is...>)
+{
+ using F = void(*)(TO*, size_t, const TI*, TA*, const TV*, TAV);
+ return std::array<F, sizeof...(Is)>{
+ { &volumeMulti<MIXTYPE_MONOVOL(MIXTYPE, Is + 1), Is + 1, TO, TI, TV, TA, TAV> ... }
+ };
+}
+
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
@@ -1352,31 +1360,12 @@
static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
const TI* in, TA* aux, const TV *vol, TAV vola)
{
- switch (channels) {
- case 1:
- volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
- break;
- case 2:
- volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
- break;
- case 3:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
- break;
- case 4:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
- break;
- case 5:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
- break;
- case 6:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
- break;
- case 7:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
- break;
- case 8:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
- break;
+ static constexpr auto volumeMultiArray =
+ makeVMArray<MIXTYPE, TO, TI, TV, TA, TAV>(std::make_index_sequence<FCC_LIMIT>());
+ if (channels > 0 && channels <= volumeMultiArray.size()) {
+ volumeMultiArray[channels - 1](out, frameCount, in, aux, vol, vola);
+ } else {
+ ALOGE("%s: invalid channel count:%d", __func__, channels);
}
}
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 8d374c9..cd47dc6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_MIXER_OPS_H
#define ANDROID_AUDIO_MIXER_OPS_H
+#include <system/audio.h>
+
namespace android {
// Hack to make static_assert work in a constexpr
@@ -231,7 +233,7 @@
typename TO, typename TI, typename TV,
typename F>
void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
- static_assert(NCHAN > 0 && NCHAN <= 8);
+ static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
|| MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
|| MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -291,6 +293,16 @@
// NCHAN == 8
proc(*out++, f(inp(), vol[0])); // side left
proc(*out++, f(inp(), vol[1])); // side right
+ if constexpr (NCHAN > FCC_8) {
+ // Mutes to zero extended surround channels.
+ // 7.1.4 has the correct behavior.
+ // 22.2 has the behavior that FLC and FRC will be mixed instead
+ // of SL and SR and LFE will be center, not left.
+ for (int i = 8; i < NCHAN; ++i) {
+ // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
+ proc(*out++, f(inp(), 0.f));
+ }
+ }
}
/*
diff --git a/media/libaudioprocessing/AudioResampler.cpp b/media/libaudioprocessing/AudioResampler.cpp
index c761b38..51673d7 100644
--- a/media/libaudioprocessing/AudioResampler.cpp
+++ b/media/libaudioprocessing/AudioResampler.cpp
@@ -268,7 +268,7 @@
mPhaseFraction(0),
mQuality(quality) {
- const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
+ const int maxChannels = quality < DYN_LOW_QUALITY ? FCC_2 : FCC_LIMIT;
if (inChannelCount < 1
|| inChannelCount > maxChannels) {
LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels",
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
index 1aacfd1..2292b19 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -545,64 +545,76 @@
// Note: A stride of 2 is achieved with non-SIMD processing.
int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2;
LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more");
- LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > 8,
- "Resampler channels(%d) must be between 1 to 8", mChannelCount);
+ LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > FCC_LIMIT,
+ "Resampler channels(%d) must be between 1 to %d", mChannelCount, FCC_LIMIT);
// stride 16 (falls back to stride 2 for machines that do not support NEON)
+
+
+// For now use a #define as a compiler generated function table requires renaming.
+#pragma push_macro("AUDIORESAMPLERDYN_CASE")
+#undef AUDIORESAMPLERDYN_CASE
+#define AUDIORESAMPLERDYN_CASE(CHANNEL, LOCKED) \
+ case CHANNEL: if constexpr (CHANNEL <= FCC_LIMIT) {\
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<CHANNEL, LOCKED, 16>; \
+ } break
+
if (locked) {
switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>;
- break;
+ AUDIORESAMPLERDYN_CASE(1, true);
+ AUDIORESAMPLERDYN_CASE(2, true);
+ AUDIORESAMPLERDYN_CASE(3, true);
+ AUDIORESAMPLERDYN_CASE(4, true);
+ AUDIORESAMPLERDYN_CASE(5, true);
+ AUDIORESAMPLERDYN_CASE(6, true);
+ AUDIORESAMPLERDYN_CASE(7, true);
+ AUDIORESAMPLERDYN_CASE(8, true);
+ AUDIORESAMPLERDYN_CASE(9, true);
+ AUDIORESAMPLERDYN_CASE(10, true);
+ AUDIORESAMPLERDYN_CASE(11, true);
+ AUDIORESAMPLERDYN_CASE(12, true);
+ AUDIORESAMPLERDYN_CASE(13, true);
+ AUDIORESAMPLERDYN_CASE(14, true);
+ AUDIORESAMPLERDYN_CASE(15, true);
+ AUDIORESAMPLERDYN_CASE(16, true);
+ AUDIORESAMPLERDYN_CASE(17, true);
+ AUDIORESAMPLERDYN_CASE(18, true);
+ AUDIORESAMPLERDYN_CASE(19, true);
+ AUDIORESAMPLERDYN_CASE(20, true);
+ AUDIORESAMPLERDYN_CASE(21, true);
+ AUDIORESAMPLERDYN_CASE(22, true);
+ AUDIORESAMPLERDYN_CASE(23, true);
+ AUDIORESAMPLERDYN_CASE(24, true);
}
} else {
switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>;
- break;
+ AUDIORESAMPLERDYN_CASE(1, false);
+ AUDIORESAMPLERDYN_CASE(2, false);
+ AUDIORESAMPLERDYN_CASE(3, false);
+ AUDIORESAMPLERDYN_CASE(4, false);
+ AUDIORESAMPLERDYN_CASE(5, false);
+ AUDIORESAMPLERDYN_CASE(6, false);
+ AUDIORESAMPLERDYN_CASE(7, false);
+ AUDIORESAMPLERDYN_CASE(8, false);
+ AUDIORESAMPLERDYN_CASE(9, false);
+ AUDIORESAMPLERDYN_CASE(10, false);
+ AUDIORESAMPLERDYN_CASE(11, false);
+ AUDIORESAMPLERDYN_CASE(12, false);
+ AUDIORESAMPLERDYN_CASE(13, false);
+ AUDIORESAMPLERDYN_CASE(14, false);
+ AUDIORESAMPLERDYN_CASE(15, false);
+ AUDIORESAMPLERDYN_CASE(16, false);
+ AUDIORESAMPLERDYN_CASE(17, false);
+ AUDIORESAMPLERDYN_CASE(18, false);
+ AUDIORESAMPLERDYN_CASE(19, false);
+ AUDIORESAMPLERDYN_CASE(20, false);
+ AUDIORESAMPLERDYN_CASE(21, false);
+ AUDIORESAMPLERDYN_CASE(22, false);
+ AUDIORESAMPLERDYN_CASE(23, false);
+ AUDIORESAMPLERDYN_CASE(24, false);
}
}
+#pragma pop_macro("AUDIORESAMPLERDYN_CASE")
+
#ifdef DEBUG_RESAMPLER
printf("channels:%d %s stride:%d %s coef:%d shift:%d\n",
mChannelCount, locked ? "locked" : "interpolated",
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
index cf84b83..3419816 100644
--- a/media/libaudioprocessing/include/media/AudioMixerBase.h
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -45,8 +45,7 @@
{
public:
// Do not change these unless underlying code changes.
- // This mixer has a hard-coded upper limit of 8 channels for output.
- static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+ static constexpr uint32_t MAX_NUM_CHANNELS = FCC_LIMIT;
static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
static const uint16_t UNITY_GAIN_INT = 0x1000;
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index f838892..1551e33 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -157,7 +157,7 @@
if (pConfig->inputCfg.format != pConfig->outputCfg.format) return -EINVAL;
const uint32_t channelCount = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
#ifdef SUPPORT_MC
- if (channelCount < 1 || channelCount > FCC_8) return -EINVAL;
+ if (channelCount < 1 || channelCount > FCC_LIMIT) return -EINVAL;
#else
if (channelCount != FCC_2) return -EINVAL;
#endif
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index e471c7b..e98d7d8 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -365,7 +365,7 @@
"libaudioclient",
"libmedia_codeclist",
"libmedia_omx",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
@@ -374,17 +374,17 @@
"libandroidicu",
//"libsonivox",
"libmedia_omx",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
static_libs: [
"resourcemanager_aidl_interface-ndk_platform",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_static_lib_headers: [
"resourcemanager_aidl_interface-ndk_platform",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_include_dirs: [
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index 0f189ee..07c0ac5 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -35,7 +35,7 @@
namespace android {
-using media::permission::Identity;
+using android::content::AttributionSourceState;
enum {
CREATE = IBinder::FIRST_CALL_TRANSACTION,
@@ -65,22 +65,22 @@
virtual sp<IMediaPlayer> create(
const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId,
- const Identity& identity) {
+ const AttributionSourceState& attributionSource) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(client));
data.writeInt32(audioSessionId);
- data.writeParcelable(identity);
+ data.writeParcelable(attributionSource);
remote()->transact(CREATE, data, &reply);
return interface_cast<IMediaPlayer>(reply.readStrongBinder());
}
- virtual sp<IMediaRecorder> createMediaRecorder(const Identity& identity)
+ virtual sp<IMediaRecorder> createMediaRecorder(const AttributionSourceState& attributionSource)
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
- data.writeParcelable(identity);
+ data.writeParcelable(attributionSource);
remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}
@@ -131,23 +131,23 @@
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
audio_session_t audioSessionId = (audio_session_t) data.readInt32();
- Identity identity;
- status_t status = data.readParcelable(&identity);
+ AttributionSourceState attributionSource;
+ status_t status = data.readParcelable(&attributionSource);
if (status != NO_ERROR) {
return status;
}
- sp<IMediaPlayer> player = create(client, audioSessionId, identity);
+ sp<IMediaPlayer> player = create(client, audioSessionId, attributionSource);
reply->writeStrongBinder(IInterface::asBinder(player));
return NO_ERROR;
} break;
case CREATE_MEDIA_RECORDER: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
- Identity identity;
- status_t status = data.readParcelable(&identity);
+ AttributionSourceState attributionSource;
+ status_t status = data.readParcelable(&attributionSource);
if (status != NO_ERROR) {
return status;
}
- sp<IMediaRecorder> recorder = createMediaRecorder(identity);
+ sp<IMediaRecorder> recorder = createMediaRecorder(attributionSource);
reply->writeStrongBinder(IInterface::asBinder(recorder));
return NO_ERROR;
} break;
diff --git a/media/libmedia/include/media/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
index 243e9c7..6070673 100644
--- a/media/libmedia/include/media/IMediaPlayerService.h
+++ b/media/libmedia/include/media/IMediaPlayerService.h
@@ -27,7 +27,7 @@
#include <media/IMediaPlayerClient.h>
#include <media/IMediaMetadataRetriever.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <string>
@@ -48,12 +48,12 @@
DECLARE_META_INTERFACE(MediaPlayerService);
virtual sp<IMediaRecorder> createMediaRecorder(
- const android::media::permission::Identity &identity) = 0;
+ const android::content::AttributionSourceState &attributionSource) = 0;
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE,
- const android::media::permission::Identity& identity =
- android::media::permission::Identity()) = 0;
+ const android::content::AttributionSourceState &attributionSource =
+ android::content::AttributionSourceState()) = 0;
virtual sp<IMediaCodecList> getCodecList() const = 0;
// Connects to a remote display.
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index b5325ce..2b7818d 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -21,7 +21,7 @@
#include <media/AudioSystem.h>
#include <media/MicrophoneInfo.h>
#include <media/mediarecorder.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <system/audio.h>
@@ -34,8 +34,8 @@
struct PersistentSurface;
struct MediaRecorderBase {
- explicit MediaRecorderBase(const media::permission::Identity &client)
- : mClient(client) {}
+ explicit MediaRecorderBase(const android::content::AttributionSourceState &attributionSource)
+ : mAttributionSource(attributionSource) {}
virtual ~MediaRecorderBase() {}
virtual status_t init() = 0;
@@ -84,7 +84,7 @@
protected:
- media::permission::Identity mClient;
+ android::content::AttributionSourceState mAttributionSource;
private:
MediaRecorderBase(const MediaRecorderBase &);
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index fbba398..de4c7db 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -29,7 +29,7 @@
#include <media/IMediaPlayer.h>
#include <media/IMediaDeathNotifier.h>
#include <media/IStreamSource.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <utils/KeyedVector.h>
#include <utils/String8.h>
@@ -212,8 +212,8 @@
public virtual IMediaDeathNotifier
{
public:
- explicit MediaPlayer(const android::media::permission::Identity& mIdentity =
- android::media::permission::Identity());
+ explicit MediaPlayer(const android::content::AttributionSourceState& mAttributionSource =
+ android::content::AttributionSourceState());
~MediaPlayer();
void died();
void disconnect();
@@ -317,7 +317,7 @@
float mSendLevel;
struct sockaddr_in mRetransmitEndpoint;
bool mRetransmitEndpointValid;
- const android::media::permission::Identity mIdentity;
+ const android::content::AttributionSourceState mAttributionSource;
};
}; // namespace android
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 96a3293..d54ff32 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -25,7 +25,7 @@
#include <media/IMediaRecorderClient.h>
#include <media/IMediaDeathNotifier.h>
#include <media/MicrophoneInfo.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
namespace android {
@@ -227,7 +227,7 @@
public virtual IMediaDeathNotifier
{
public:
- explicit MediaRecorder(const media::permission::Identity& identity);
+ explicit MediaRecorder(const android::content::AttributionSourceState& attributionSource);
~MediaRecorder();
void died();
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 7504787..1c9b9e4 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -40,9 +40,10 @@
namespace android {
using media::VolumeShaper;
-using media::permission::Identity;
+using content::AttributionSourceState;
-MediaPlayer::MediaPlayer(const Identity& identity) : mIdentity(identity)
+MediaPlayer::MediaPlayer(const AttributionSourceState& attributionSource)
+ : mAttributionSource(attributionSource)
{
ALOGV("constructor");
mListener = NULL;
@@ -153,7 +154,7 @@
if (url != NULL) {
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mIdentity));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mAttributionSource));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(httpService, url, headers))) {
player.clear();
@@ -170,7 +171,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mIdentity));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mAttributionSource));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(fd, offset, length))) {
player.clear();
@@ -186,7 +187,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mIdentity));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mAttributionSource));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(source))) {
player.clear();
@@ -202,7 +203,7 @@
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != 0) {
- sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mIdentity));
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mAttributionSource));
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(rtpParams))) {
player.clear();
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index da2b190..cf12c36 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -33,7 +33,7 @@
namespace android {
-using media::permission::Identity;
+using content::AttributionSourceState;
status_t MediaRecorder::setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy)
@@ -760,13 +760,14 @@
return INVALID_OPERATION;
}
-MediaRecorder::MediaRecorder(const Identity &identity) : mSurfaceMediaSource(NULL)
+MediaRecorder::MediaRecorder(const AttributionSourceState &attributionSource)
+ : mSurfaceMediaSource(NULL)
{
ALOGV("constructor");
const sp<IMediaPlayerService> service(getMediaPlayerService());
if (service != NULL) {
- mMediaRecorder = service->createMediaRecorder(identity);
+ mMediaRecorder = service->createMediaRecorder(attributionSource);
}
if (mMediaRecorder != NULL) {
mCurrentState = MEDIA_RECORDER_IDLE;
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index a8350ea..d597a4d 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -308,6 +308,17 @@
switch (uid) {
case AID_RADIO: // telephony subsystem, RIL
return false;
+ default:
+ // Some isolated processes can access the audio system; see
+ // AudioSystem::setAudioFlingerBinder (currently only the HotwordDetectionService). Instead
+ // of also allowing access to the MediaMetrics service, it's simpler to just disable it for
+ // now.
+ // TODO(b/190151205): Either allow the HotwordDetectionService to access MediaMetrics or
+ // make this disabling specific to that process.
+ if (uid >= AID_ISOLATED_START && uid <= AID_ISOLATED_END) {
+ return false;
+ }
+ break;
}
int enabled = property_get_int32(Item::EnabledProperty, -1);
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 287317d..f55678d 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -35,7 +35,7 @@
"android.hardware.media.c2@1.0",
"android.hardware.media.omx@1.0",
"av-types-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
"libbase",
"libactivitymanager_aidl",
@@ -76,12 +76,12 @@
"libstagefright_nuplayer",
"libstagefright_rtsp",
"libstagefright_timedtext",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
"libmedia",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
include_dirs: [
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index dc4aea5..d278a01 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -95,7 +95,7 @@
using android::NOT_ENOUGH_DATA;
using android::Parcel;
using android::media::VolumeShaper;
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
// Max number of entries in the filter.
const int kMaxFilterSize = 64; // I pulled that out of thin air.
@@ -455,21 +455,22 @@
ALOGV("MediaPlayerService destroyed");
}
-sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(const Identity& identity)
+sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(
+ const AttributionSourceState& attributionSource)
{
- // TODO b/182392769: use identity util
- Identity verifiedIdentity = identity;
- verifiedIdentity.uid = VALUE_OR_FATAL(
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState verifiedAttributionSource = attributionSource;
+ verifiedAttributionSource.uid = VALUE_OR_FATAL(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
- verifiedIdentity.pid = VALUE_OR_FATAL(
+ verifiedAttributionSource.pid = VALUE_OR_FATAL(
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
sp<MediaRecorderClient> recorder =
- new MediaRecorderClient(this, verifiedIdentity);
+ new MediaRecorderClient(this, verifiedAttributionSource);
wp<MediaRecorderClient> w = recorder;
Mutex::Autolock lock(mLock);
mMediaRecorderClients.add(w);
ALOGV("Create new media recorder client from pid %s",
- verifiedIdentity.toString().c_str());
+ verifiedAttributionSource.toString().c_str());
return recorder;
}
@@ -489,21 +490,21 @@
}
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
- audio_session_t audioSessionId, const Identity& identity)
+ audio_session_t audioSessionId, const AttributionSourceState& attributionSource)
{
int32_t connId = android_atomic_inc(&mNextConnId);
- // TODO b/182392769: use identity util
- Identity verifiedIdentity = identity;
- verifiedIdentity.pid = VALUE_OR_FATAL(
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState verifiedAttributionSource = attributionSource;
+ verifiedAttributionSource.pid = VALUE_OR_FATAL(
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
- verifiedIdentity.uid = VALUE_OR_FATAL(
+ verifiedAttributionSource.uid = VALUE_OR_FATAL(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
sp<Client> c = new Client(
- this, verifiedIdentity, connId, client, audioSessionId);
+ this, verifiedAttributionSource, connId, client, audioSessionId);
ALOGV("Create new client(%d) from %s, ", connId,
- verifiedIdentity.toString().c_str());
+ verifiedAttributionSource.toString().c_str());
wp<Client> w = c;
{
@@ -556,8 +557,8 @@
char buffer[SIZE];
String8 result;
result.append(" Client\n");
- snprintf(buffer, 255, " Identity(%s), connId(%d), status(%d), looping(%s)\n",
- mIdentity.toString().c_str(), mConnId, mStatus, mLoop?"true": "false");
+ snprintf(buffer, 255, " AttributionSource(%s), connId(%d), status(%d), looping(%s)\n",
+ mAttributionSource.toString().c_str(), mConnId, mStatus, mLoop?"true": "false");
result.append(buffer);
sp<MediaPlayerBase> p;
@@ -621,7 +622,8 @@
for (int i = 0, n = mMediaRecorderClients.size(); i < n; ++i) {
sp<MediaRecorderClient> c = mMediaRecorderClients[i].promote();
if (c != 0) {
- snprintf(buffer, 255, " MediaRecorderClient pid(%d)\n", c->mIdentity.pid);
+ snprintf(buffer, 255, " MediaRecorderClient pid(%d)\n",
+ c->mAttributionSource.pid);
result.append(buffer);
write(fd, result.string(), result.size());
result = "\n";
@@ -744,10 +746,10 @@
}
MediaPlayerService::Client::Client(
- const sp<MediaPlayerService>& service, const Identity& identity,
+ const sp<MediaPlayerService>& service, const AttributionSourceState& attributionSource,
int32_t connId, const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId)
- : mIdentity(identity)
+ : mAttributionSource(attributionSource)
{
ALOGV("Client(%d) constructor", connId);
mConnId = connId;
@@ -768,7 +770,8 @@
MediaPlayerService::Client::~Client()
{
- ALOGV("Client(%d) destructor identity = %s", mConnId, mIdentity.toString().c_str());
+ ALOGV("Client(%d) destructor AttributionSource = %s", mConnId,
+ mAttributionSource.toString().c_str());
mAudioOutput.clear();
wp<Client> client(this);
disconnect();
@@ -781,7 +784,8 @@
void MediaPlayerService::Client::disconnect()
{
- ALOGV("disconnect(%d) from identity %s", mConnId, mIdentity.toString().c_str());
+ ALOGV("disconnect(%d) from AttributionSource %s", mConnId,
+ mAttributionSource.toString().c_str());
// grab local reference and clear main reference to prevent future
// access to object
sp<MediaPlayerBase> p;
@@ -822,11 +826,11 @@
}
if (p == NULL) {
p = MediaPlayerFactory::createPlayer(playerType, mListener,
- VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mIdentity.pid)));
+ VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAttributionSource.pid)));
}
if (p != NULL) {
- p->setUID(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mIdentity.uid)));
+ p->setUID(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid)));
}
return p;
@@ -934,7 +938,7 @@
mAudioDeviceUpdatedListener = new AudioDeviceUpdatedNotifier(p);
if (!p->hardwareOutput()) {
- mAudioOutput = new AudioOutput(mAudioSessionId, mIdentity,
+ mAudioOutput = new AudioOutput(mAudioSessionId, mAttributionSource,
mAudioAttributes, mAudioDeviceUpdatedListener);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
@@ -1784,8 +1788,9 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, const Identity& identity,
- const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
+MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId,
+ const AttributionSourceState& attributionSource, const audio_attributes_t* attr,
+ const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
: mCallback(NULL),
mCallbackCookie(NULL),
mCallbackData(NULL),
@@ -1797,7 +1802,7 @@
mMsecsPerFrame(0),
mFrameSize(0),
mSessionId(sessionId),
- mIdentity(identity),
+ mAttributionSource(attributionSource),
mSendLevel(0.0),
mAuxEffectId(0),
mFlags(AUDIO_OUTPUT_FLAG_NONE),
@@ -2193,7 +2198,7 @@
mSessionId,
AudioTrack::TRANSFER_CALLBACK,
offloadInfo,
- mIdentity,
+ mAttributionSource,
mAttributes,
doNotReconnect,
1.0f, // default value for maxRequiredSpeed
@@ -2220,7 +2225,7 @@
mSessionId,
AudioTrack::TRANSFER_DEFAULT,
NULL, // offload info
- mIdentity,
+ mAttributionSource,
mAttributes,
doNotReconnect,
targetSpeed,
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 35a65d3..98091be 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -33,12 +33,14 @@
#include <media/MediaPlayerInterface.h>
#include <media/Metadata.h>
#include <media/stagefright/foundation/ABase.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <system/audio.h>
namespace android {
+using content::AttributionSourceState;
+
class AudioTrack;
struct AVSyncSettings;
class DeathNotifier;
@@ -80,7 +82,7 @@
public:
AudioOutput(
audio_session_t sessionId,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_attributes_t * attr,
const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
virtual ~AudioOutput();
@@ -169,7 +171,7 @@
float mMsecsPerFrame;
size_t mFrameSize;
audio_session_t mSessionId;
- media::permission::Identity mIdentity;
+ AttributionSourceState mAttributionSource;
float mSendLevel;
int mAuxEffectId;
audio_output_flags_t mFlags;
@@ -231,13 +233,13 @@
static void instantiate();
// IMediaPlayerService interface
- virtual sp<IMediaRecorder> createMediaRecorder(const media::permission::Identity &identity);
+ virtual sp<IMediaRecorder> createMediaRecorder(const AttributionSourceState &attributionSource);
void removeMediaRecorderClient(const wp<MediaRecorderClient>& client);
virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId,
- const media::permission::Identity& identity);
+ const AttributionSourceState& attributionSource);
virtual sp<IMediaCodecList> getCodecList() const;
@@ -380,7 +382,7 @@
void notify(int msg, int ext1, int ext2, const Parcel *obj);
pid_t pid() const {
- return VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mIdentity.pid));
+ return VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAttributionSource.pid));
}
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -411,7 +413,7 @@
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
int32_t connId,
const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId);
@@ -458,7 +460,7 @@
sp<MediaPlayerService> mService;
sp<IMediaPlayerClient> mClient;
sp<AudioOutput> mAudioOutput;
- const media::permission::Identity mIdentity;
+ const AttributionSourceState mAttributionSource;
status_t mStatus;
bool mLoop;
int32_t mConnId;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index daa923e..a914006 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -126,8 +126,9 @@
}
if ((as == AUDIO_SOURCE_FM_TUNER
- && !(captureAudioOutputAllowed(mIdentity) || captureTunerAudioInputAllowed(mIdentity)))
- || !recordingAllowed(mIdentity, (audio_source_t)as)) {
+ && !(captureAudioOutputAllowed(mAttributionSource)
+ || captureTunerAudioInputAllowed(mAttributionSource)))
+ || !recordingAllowed(mAttributionSource, (audio_source_t)as)) {
return PERMISSION_DENIED;
}
Mutex::Autolock lock(mLock);
@@ -377,12 +378,12 @@
}
MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service,
- const Identity& identity)
+ const AttributionSourceState& attributionSource)
{
ALOGV("Client constructor");
- // identity already validated in createMediaRecorder
- mIdentity = identity;
- mRecorder = new StagefrightRecorder(identity);
+ // attribution source already validated in createMediaRecorder
+ mAttributionSource = attributionSource;
+ mRecorder = new StagefrightRecorder(attributionSource);
mMediaPlayerService = service;
}
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 24c6ee1..dcb9f82 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -22,7 +22,7 @@
#include <media/AudioSystem.h>
#include <media/IMediaRecorder.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <vector>
@@ -94,13 +94,13 @@
MediaRecorderClient(
const sp<MediaPlayerService>& service,
- const media::permission::Identity& identity);
+ const content::AttributionSourceState& attributionSource);
virtual ~MediaRecorderClient();
std::vector<DeathNotifier> mDeathNotifiers;
sp<AudioDeviceUpdatedNotifier> mAudioDeviceUpdatedNotifier;
- media::permission::Identity mIdentity;
+ content::AttributionSourceState mAttributionSource;
mutable Mutex mLock;
MediaRecorderBase *mRecorder;
sp<MediaPlayerService> mMediaPlayerService;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index ce642f3..bffd7b3 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -116,8 +116,8 @@
}
-StagefrightRecorder::StagefrightRecorder(const Identity& clientIdentity)
- : MediaRecorderBase(clientIdentity),
+StagefrightRecorder::StagefrightRecorder(const AttributionSourceState& client)
+ : MediaRecorderBase(client),
mWriter(NULL),
mOutputFd(-1),
mAudioSource((audio_source_t)AUDIO_SOURCE_CNT), // initialize with invalid value
@@ -159,7 +159,7 @@
// we run as part of the media player service; what we really want to
// know is the app which requested the recording.
- mMetricsItem->setUid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClient.uid)));
+ mMetricsItem->setUid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid)));
mMetricsItem->setCString(kRecorderLogSessionId, mLogSessionId.c_str());
@@ -1144,7 +1144,8 @@
status_t StagefrightRecorder::setClientName(const String16& clientName) {
- mClient.packageName = VALUE_OR_RETURN_STATUS(legacy2aidl_String16_string(clientName));
+ mAttributionSource.packageName = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_String16_string(clientName));
return OK;
}
@@ -1355,7 +1356,7 @@
sp<AudioSource> audioSource =
new AudioSource(
&attr,
- mClient,
+ mAttributionSource,
sourceSampleRate,
mAudioChannels,
mSampleRate,
@@ -1880,10 +1881,10 @@
Size videoSize;
videoSize.width = mVideoWidth;
videoSize.height = mVideoHeight;
- uid_t uid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(mClient.uid));
- pid_t pid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(mClient.pid));
+ uid_t uid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
+ pid_t pid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(mAttributionSource.pid));
String16 clientName = VALUE_OR_RETURN_STATUS(
- aidl2legacy_string_view_String16(mClient.packageName.value_or("")));
+ aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
if (mCaptureFpsEnable) {
if (!(mCaptureFps > 0.)) {
ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 59a080e..d6de47f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -26,12 +26,12 @@
#include <system/audio.h>
#include <media/hardware/MetadataBufferType.h>
-#include <android/media/permission/Identity.h>
-
-using namespace android::media::permission;
+#include <android/content/AttributionSourceState.h>
namespace android {
+using content::AttributionSourceState;
+
class Camera;
class ICameraRecordingProxy;
class CameraSource;
@@ -45,7 +45,7 @@
struct ALooper;
struct StagefrightRecorder : public MediaRecorderBase {
- explicit StagefrightRecorder(const Identity& clientIdentity);
+ explicit StagefrightRecorder(const AttributionSourceState& attributionSource);
virtual ~StagefrightRecorder();
virtual status_t init();
virtual status_t setLogSessionId(const String8 &id);
diff --git a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
index 5b16911..92236ea 100644
--- a/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
+++ b/media/libmediaplayerservice/tests/stagefrightRecorder/Android.bp
@@ -73,7 +73,7 @@
"libstagefright",
"libstagefright_foundation",
"libutils",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
diff --git a/media/libmediaplayerservice/tests/stagefrightRecorder/StagefrightRecorderTest.cpp b/media/libmediaplayerservice/tests/stagefrightRecorder/StagefrightRecorderTest.cpp
index 6dea53d..162c187 100644
--- a/media/libmediaplayerservice/tests/stagefrightRecorder/StagefrightRecorderTest.cpp
+++ b/media/libmediaplayerservice/tests/stagefrightRecorder/StagefrightRecorderTest.cpp
@@ -59,10 +59,11 @@
}
void SetUp() override {
- // TODO b/182392769: use identity util
- Identity identity;
- identity.packageName = std::string(LOG_TAG);
- mStfRecorder = new StagefrightRecorder(identity);
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = std::string(LOG_TAG);
+ attributionSource.token = sp<BBinder>::make();
+ mStfRecorder = new StagefrightRecorder(attributionSource);
ASSERT_NE(mStfRecorder, nullptr) << "Failed to create the instance of recorder";
mOutputAudioFp = fopen(OUTPUT_FILE_NAME_AUDIO, "wb");
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index d6e36b9..a052a70 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -338,7 +338,7 @@
"android.hardware.cas.native@1.0",
"android.hardware.drm@1.0",
"android.hardware.media.omx@1.0",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
@@ -352,7 +352,7 @@
"libogg",
"libwebm",
"libstagefright_id3",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libmediandk_format",
"libmedia_ndkformatpriv",
],
@@ -370,7 +370,7 @@
"libhidlmemory",
"libmedia",
"android.hidl.allocator@1.0",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
export_include_dirs: [
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 89fe56f..b6acdc8 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -34,7 +34,7 @@
namespace android {
-using android::media::permission::Identity;
+using content::AttributionSourceState;
static void AudioRecordCallbackFunction(int event, void *user, void *info) {
AudioSource *source = (AudioSource *) user;
@@ -54,13 +54,13 @@
}
AudioSource::AudioSource(
- const audio_attributes_t *attr, const Identity& identity,
+ const audio_attributes_t *attr, const AttributionSourceState& attributionSource,
uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate,
audio_port_handle_t selectedDeviceId,
audio_microphone_direction_t selectedMicDirection,
float selectedMicFieldDimension)
{
- set(attr, identity, sampleRate, channelCount, outSampleRate, selectedDeviceId,
+ set(attr, attributionSource, sampleRate, channelCount, outSampleRate, selectedDeviceId,
selectedMicDirection, selectedMicFieldDimension);
}
@@ -71,17 +71,18 @@
audio_microphone_direction_t selectedMicDirection,
float selectedMicFieldDimension)
{
- // TODO b/182392769: use identity util
- Identity identity;
- identity.packageName = VALUE_OR_FATAL(legacy2aidl_String16_string(opPackageName));
- identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
- identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
- set(attr, identity, sampleRate, channelCount, outSampleRate, selectedDeviceId,
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = VALUE_OR_FATAL(legacy2aidl_String16_string(opPackageName));
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(uid));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(pid));
+ attributionSource.token = sp<BBinder>::make();
+ set(attr, attributionSource, sampleRate, channelCount, outSampleRate, selectedDeviceId,
selectedMicDirection, selectedMicFieldDimension);
}
void AudioSource::set(
- const audio_attributes_t *attr, const Identity& identity,
+ const audio_attributes_t *attr, const AttributionSourceState& attributionSource,
uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate,
audio_port_handle_t selectedDeviceId,
audio_microphone_direction_t selectedMicDirection,
@@ -126,7 +127,7 @@
mRecord = new AudioRecord(
AUDIO_SOURCE_DEFAULT, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
audio_channel_in_mask_from_count(channelCount),
- identity,
+ attributionSource,
(size_t) (bufCount * frameCount),
AudioRecordCallbackFunction,
this,
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8fa7463..1986272 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -976,6 +976,10 @@
return "BufferDecoded";
case TunnelPeekState::kBufferRendered:
return "BufferRendered";
+ case TunnelPeekState::kDisabledQueued:
+ return "DisabledQueued";
+ case TunnelPeekState::kEnabledQueued:
+ return "EnabledQueued";
default:
return default_string;
}
@@ -986,25 +990,39 @@
if (!msg->findInt32("tunnel-peek", &tunnelPeek)){
return;
}
+
+ TunnelPeekState previousState = mTunnelPeekState;
if(tunnelPeek == 0){
- if (mTunnelPeekState == TunnelPeekState::kEnabledNoBuffer) {
- mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kEnabledNoBuffer),
- asString(TunnelPeekState::kDisabledNoBuffer));
- return;
+ switch (mTunnelPeekState) {
+ case TunnelPeekState::kEnabledNoBuffer:
+ mTunnelPeekState = TunnelPeekState::kDisabledNoBuffer;
+ break;
+ case TunnelPeekState::kEnabledQueued:
+ mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+ break;
+ default:
+ ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ return;
}
} else {
- if (mTunnelPeekState == TunnelPeekState::kDisabledNoBuffer) {
- mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kDisabledNoBuffer),
- asString(TunnelPeekState::kEnabledNoBuffer));
- return;
+ switch (mTunnelPeekState) {
+ case TunnelPeekState::kDisabledNoBuffer:
+ mTunnelPeekState = TunnelPeekState::kEnabledNoBuffer;
+ break;
+ case TunnelPeekState::kDisabledQueued:
+ mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+ break;
+ case TunnelPeekState::kBufferDecoded:
+ msg->setInt32("android._trigger-tunnel-peek", 1);
+ mTunnelPeekState = TunnelPeekState::kBufferRendered;
+ break;
+ default:
+ ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ return;
}
}
- ALOGV("Ignoring tunnel-peek=%d for %s", tunnelPeek, asString(mTunnelPeekState));
+ ALOGV("TunnelPeekState: %s -> %s", asString(previousState), asString(mTunnelPeekState));
}
void MediaCodec::updatePlaybackDuration(const sp<AMessage> &msg) {
@@ -3294,25 +3312,32 @@
if (mState != STARTED) {
break;
}
+ TunnelPeekState previousState = mTunnelPeekState;
switch(mTunnelPeekState) {
case TunnelPeekState::kDisabledNoBuffer:
+ case TunnelPeekState::kDisabledQueued:
mTunnelPeekState = TunnelPeekState::kBufferDecoded;
+ ALOGV("First tunnel frame ready");
ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kDisabledNoBuffer),
- asString(TunnelPeekState::kBufferDecoded));
+ asString(previousState),
+ asString(mTunnelPeekState));
break;
case TunnelPeekState::kEnabledNoBuffer:
- mTunnelPeekState = TunnelPeekState::kBufferDecoded;
- ALOGV("TunnelPeekState: %s -> %s",
- asString(TunnelPeekState::kEnabledNoBuffer),
- asString(TunnelPeekState::kBufferDecoded));
+ case TunnelPeekState::kEnabledQueued:
{
sp<AMessage> parameters = new AMessage();
parameters->setInt32("android._trigger-tunnel-peek", 1);
mCodec->signalSetParameters(parameters);
}
+ mTunnelPeekState = TunnelPeekState::kBufferRendered;
+ ALOGV("First tunnel frame ready");
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
break;
default:
+ ALOGV("Ignoring first tunnel frame ready, TunnelPeekState: %s",
+ asString(mTunnelPeekState));
break;
}
@@ -4777,6 +4802,28 @@
buffer->meta()->setInt32("csd", true);
}
+ if (mTunneled) {
+ TunnelPeekState previousState = mTunnelPeekState;
+ switch(mTunnelPeekState){
+ case TunnelPeekState::kEnabledNoBuffer:
+ buffer->meta()->setInt32("tunnel-first-frame", 1);
+ mTunnelPeekState = TunnelPeekState::kEnabledQueued;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
+ break;
+ case TunnelPeekState::kDisabledNoBuffer:
+ buffer->meta()->setInt32("tunnel-first-frame", 1);
+ mTunnelPeekState = TunnelPeekState::kDisabledQueued;
+ ALOGV("TunnelPeekState: %s -> %s",
+ asString(previousState),
+ asString(mTunnelPeekState));
+ break;
+ default:
+ break;
+ }
+ }
+
status_t err = OK;
if (hasCryptoOrDescrambler() && !c2Buffer && !memory) {
AString *errorDetailMsg;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index d1dcdb5..43d50f1 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -31,6 +31,8 @@
namespace android {
+using content::AttributionSourceState;
+
class AudioRecord;
struct AudioSource : public MediaSource, public MediaBufferObserver {
@@ -38,7 +40,7 @@
// _not_ a bitmask of audio_channels_t constants.
AudioSource(
const audio_attributes_t *attr,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
uint32_t sampleRate,
uint32_t channels,
uint32_t outSampleRate = 0,
@@ -145,7 +147,7 @@
void set(
const audio_attributes_t *attr,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
uint32_t sampleRate,
uint32_t channels,
uint32_t outSampleRate = 0,
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 0e6f0b3..d372140 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -377,15 +377,23 @@
// This type is used to track the tunnel mode video peek state machine:
//
// DisabledNoBuffer -> EnabledNoBuffer when tunnel-peek = true
+ // DisabledQueued -> EnabledQueued when tunnel-peek = true
+ // DisabledNoBuffer -> DisabledQueued when first frame queued
// EnabledNoBuffer -> DisabledNoBuffer when tunnel-peek = false
+ // EnabledQueued -> DisabledQueued when tunnel-peek = false
+ // EnabledNoBuffer -> EnabledQueued when first frame queued
// DisabledNoBuffer -> BufferDecoded when kWhatFirstTunnelFrameReady
+ // DisabledQueued -> BufferDecoded when kWhatFirstTunnelFrameReady
// EnabledNoBuffer -> BufferDecoded when kWhatFirstTunnelFrameReady
+ // EnabledQueued -> BufferDecoded when kWhatFirstTunnelFrameReady
// BufferDecoded -> BufferRendered when kWhatFrameRendered
// <all states> -> EnabledNoBuffer when flush
// <all states> -> EnabledNoBuffer when stop then configure then start
enum struct TunnelPeekState {
kDisabledNoBuffer,
kEnabledNoBuffer,
+ kDisabledQueued,
+ kEnabledQueued,
kBufferDecoded,
kBufferRendered,
};
diff --git a/media/libstagefright/tests/fuzzers/WriterFuzzer.cpp b/media/libstagefright/tests/fuzzers/WriterFuzzer.cpp
index 969c6e1..97d1160 100644
--- a/media/libstagefright/tests/fuzzers/WriterFuzzer.cpp
+++ b/media/libstagefright/tests/fuzzers/WriterFuzzer.cpp
@@ -17,7 +17,7 @@
// dylan.katz@leviathansecurity.com
#include <android-base/file.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <ctype.h>
#include <media/mediarecorder.h>
#include <media/stagefright/MPEG4Writer.h>
@@ -40,7 +40,7 @@
namespace android {
-using media::permission::Identity;
+using android::content::AttributionSourceState;
std::string getFourCC(FuzzedDataProvider *fdp) {
std::string fourCC = fdp->ConsumeRandomLengthString(4);
@@ -166,11 +166,12 @@
StandardWriters writerType = dataProvider.ConsumeEnum<StandardWriters>();
sp<MediaWriter> writer = createWriter(tf.fd, writerType, fileMeta);
- Identity i;
- i.packageName = dataProvider.ConsumeRandomLengthString(kMaxPackageNameLen);
- i.uid = dataProvider.ConsumeIntegral<int32_t>();
- i.pid = dataProvider.ConsumeIntegral<int32_t>();
- sp<MediaRecorder> mr = new MediaRecorder(i);
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = dataProvider.ConsumeRandomLengthString(kMaxPackageNameLen);
+ attributionSource.uid = dataProvider.ConsumeIntegral<int32_t>();
+ attributionSource.pid = dataProvider.ConsumeIntegral<int32_t>();
+ attributionSource.token = sp<BBinder>::make();
+ sp<MediaRecorder> mr = new MediaRecorder(attributionSource);
writer->setListener(mr);
uint8_t baseOpLen = operations.size();
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 3ceacfe..32a22ba 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -40,7 +40,7 @@
"libstagefright_foundation",
"libutils",
"liblog",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
header_libs: [
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 9e48c1f..bfe73d5 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -46,12 +46,11 @@
"libbinder",
"libcutils",
"liblog",
- "libpermission",
"libutils",
"libhidlbase",
+ "libpermission",
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
- "media_permission-aidl-cpp",
],
export_static_lib_headers: [
"libbatterystats_aidl",
@@ -71,7 +70,7 @@
],
export_shared_lib_headers: [
- "media_permission-aidl-cpp"
+ "libpermission",
],
include_dirs: [
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 214a174..9c7b863 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -25,7 +25,7 @@
#include <system/audio-hal-enums.h>
#include <media/AidlConversion.h>
#include <media/AidlConversionUtil.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <iterator>
#include <algorithm>
@@ -40,7 +40,7 @@
namespace android {
-using media::permission::Identity;
+using content::AttributionSourceState;
static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
@@ -81,92 +81,101 @@
}
}
-static bool checkRecordingInternal(const Identity& identity, const String16& msg,
- bool start, audio_source_t source) {
+std::optional<AttributionSourceState> resolveAttributionSource(
+ const AttributionSourceState& callerAttributionSource) {
+ AttributionSourceState nextAttributionSource = callerAttributionSource;
+
+ if (!nextAttributionSource.packageName.has_value()) {
+ nextAttributionSource = AttributionSourceState(nextAttributionSource);
+ PermissionController permissionController;
+ const uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(nextAttributionSource.uid));
+ nextAttributionSource.packageName = VALUE_OR_FATAL(legacy2aidl_String16_string(
+ resolveCallingPackage(permissionController, VALUE_OR_FATAL(
+ aidl2legacy_string_view_String16(nextAttributionSource.packageName.value_or(""))),
+ uid)));
+ if (!nextAttributionSource.packageName.has_value()) {
+ return std::nullopt;
+ }
+ }
+
+ AttributionSourceState myAttributionSource;
+ myAttributionSource.uid = VALUE_OR_FATAL(android::legacy2aidl_uid_t_int32_t(getuid()));
+ myAttributionSource.pid = VALUE_OR_FATAL(android::legacy2aidl_pid_t_int32_t(getpid()));
+ myAttributionSource.token = sp<BBinder>::make();
+ myAttributionSource.next.push_back(nextAttributionSource);
+
+ return std::optional<AttributionSourceState>{myAttributionSource};
+}
+
+static bool checkRecordingInternal(const AttributionSourceState& attributionSource,
+ const String16& msg, bool start, audio_source_t source) {
// Okay to not track in app ops as audio server or media server is us and if
// device is rooted security model is considered compromised.
// system_server loses its RECORD_AUDIO permission when a secondary
// user is active, but it is a core system service so let it through.
// TODO(b/141210120): UserManager.DISALLOW_RECORD_AUDIO should not affect system user 0
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
if (isAudioServerOrMediaServerOrSystemServerOrRootUid(uid)) return true;
// We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
- // may open a record track on behalf of a client. Note that pid may be a tid.
+ // may open a record track on behalf of a client. Note that pid may be a tid.
// IMPORTANT: DON'T USE PermissionCache - RUNTIME PERMISSIONS CHANGE.
- PermissionController permissionController;
- const bool ok = permissionController.checkPermission(sAndroidPermissionRecordAudio,
- identity.pid, identity.uid);
- if (!ok) {
- ALOGE("Request requires %s", String8(sAndroidPermissionRecordAudio).c_str());
+ const std::optional<AttributionSourceState> resolvedAttributionSource =
+ resolveAttributionSource(attributionSource);
+ if (!resolvedAttributionSource.has_value()) {
return false;
}
- String16 resolvedOpPackageName = resolveCallingPackage(
- permissionController, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
- identity.packageName.value_or(""))), uid);
- if (resolvedOpPackageName.size() == 0) {
- return false;
- }
+ const int32_t attributedOpCode = getOpForSource(source);
-
- AppOpsManager appOps;
- const int32_t op = getOpForSource(source);
+ permission::PermissionChecker permissionChecker;
+ bool permitted = false;
if (start) {
- if (int32_t mode = appOps.startOpNoThrow(op, identity.uid,
- resolvedOpPackageName, /*startIfModeDefault*/ false,
- VALUE_OR_FATAL(aidl2legacy_optional_string_view_optional_String16(
- identity.attributionTag)), msg) == AppOpsManager::MODE_ERRORED) {
- ALOGE("Request start for \"%s\" (uid %d) denied by app op: %d, mode: %d",
- String8(resolvedOpPackageName).c_str(), identity.uid, op, mode);
- return false;
- }
+ permitted = (permissionChecker.checkPermissionForStartDataDeliveryFromDatasource(
+ sAndroidPermissionRecordAudio, resolvedAttributionSource.value(), msg,
+ attributedOpCode) != permission::PermissionChecker::PERMISSION_HARD_DENIED);
} else {
- if (int32_t mode = appOps.checkOp(op, uid,
- resolvedOpPackageName) == AppOpsManager::MODE_ERRORED) {
- ALOGE("Request check for \"%s\" (uid %d) denied by app op: %d, mode: %d",
- String8(resolvedOpPackageName).c_str(), identity.uid, op, mode);
- return false;
- }
+ permitted = (permissionChecker.checkPermissionForPreflightFromDatasource(
+ sAndroidPermissionRecordAudio, resolvedAttributionSource.value(), msg,
+ attributedOpCode) != permission::PermissionChecker::PERMISSION_HARD_DENIED);
}
- return true;
+ return permitted;
}
-bool recordingAllowed(const Identity& identity, audio_source_t source) {
- return checkRecordingInternal(identity, String16(), /*start*/ false, source);
+bool recordingAllowed(const AttributionSourceState& attributionSource, audio_source_t source) {
+ return checkRecordingInternal(attributionSource, String16(), /*start*/ false, source);
}
-bool startRecording(const Identity& identity, const String16& msg, audio_source_t source) {
- return checkRecordingInternal(identity, msg, /*start*/ true, source);
+bool startRecording(const AttributionSourceState& attributionSource, const String16& msg,
+ audio_source_t source) {
+ return checkRecordingInternal(attributionSource, msg, /*start*/ true, source);
}
-void finishRecording(const Identity& identity, audio_source_t source) {
+void finishRecording(const AttributionSourceState& attributionSource, audio_source_t source) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- if (isAudioServerOrRootUid(uid)) return;
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ if (isAudioServerOrMediaServerOrSystemServerOrRootUid(uid)) return;
- PermissionController permissionController;
- String16 resolvedOpPackageName = resolveCallingPackage(
- permissionController,
- VALUE_OR_FATAL(aidl2legacy_string_view_String16(identity.packageName.value_or(""))),
- VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid)));
- if (resolvedOpPackageName.size() == 0) {
+ // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
+ // may open a record track on behalf of a client. Note that pid may be a tid.
+ // IMPORTANT: DON'T USE PermissionCache - RUNTIME PERMISSIONS CHANGE.
+ const std::optional<AttributionSourceState> resolvedAttributionSource =
+ resolveAttributionSource(attributionSource);
+ if (!resolvedAttributionSource.has_value()) {
return;
}
- AppOpsManager appOps;
-
- const int32_t op = getOpForSource(source);
- appOps.finishOp(op, identity.uid, resolvedOpPackageName,
- VALUE_OR_FATAL(aidl2legacy_optional_string_view_optional_String16(
- identity.attributionTag)));
+ const int32_t attributedOpCode = getOpForSource(source);
+ permission::PermissionChecker permissionChecker;
+ permissionChecker.finishDataDeliveryFromDatasource(attributedOpCode,
+ resolvedAttributionSource.value());
}
-bool captureAudioOutputAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool captureAudioOutputAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
@@ -174,9 +183,9 @@
return ok;
}
-bool captureMediaOutputAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool captureMediaOutputAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureMediaOutput("android.permission.CAPTURE_MEDIA_OUTPUT");
bool ok = PermissionCache::checkPermission(sCaptureMediaOutput, pid, uid);
@@ -184,9 +193,9 @@
return ok;
}
-bool captureTunerAudioInputAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool captureTunerAudioInputAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureTunerAudioInput("android.permission.CAPTURE_TUNER_AUDIO_INPUT");
bool ok = PermissionCache::checkPermission(sCaptureTunerAudioInput, pid, uid);
@@ -194,9 +203,9 @@
return ok;
}
-bool captureVoiceCommunicationOutputAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool captureVoiceCommunicationOutputAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerOrRootUid(uid)) return true;
static const String16 sCaptureVoiceCommOutput(
"android.permission.CAPTURE_VOICE_COMMUNICATION_OUTPUT");
@@ -205,15 +214,18 @@
return ok;
}
-bool captureHotwordAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- uid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool captureHotwordAllowed(const AttributionSourceState& attributionSource) {
// CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
- bool ok = recordingAllowed(identity);
+ bool ok = recordingAllowed(attributionSource);
if (ok) {
static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
- ok = PermissionCache::checkPermission(sCaptureHotwordAllowed, pid, uid);
+ // Use PermissionChecker, which includes some logic for allowing the isolated
+ // HotwordDetectionService to hold certain permissions.
+ permission::PermissionChecker permissionChecker;
+ ok = (permissionChecker.checkPermissionForPreflight(
+ sCaptureHotwordAllowed, attributionSource, String16(),
+ AppOpsManager::OP_NONE) != permission::PermissionChecker::PERMISSION_HARD_DENIED);
}
if (!ok) ALOGV("android.permission.CAPTURE_AUDIO_HOTWORD");
return ok;
@@ -230,12 +242,12 @@
}
bool modifyAudioRoutingAllowed() {
- return modifyAudioRoutingAllowed(getCallingIdentity());
+ return modifyAudioRoutingAllowed(getCallingAttributionSource());
}
-bool modifyAudioRoutingAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool modifyAudioRoutingAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerUid(IPCThreadState::self()->getCallingUid())) return true;
// IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
bool ok = PermissionCache::checkPermission(sModifyAudioRouting, pid, uid);
@@ -245,12 +257,12 @@
}
bool modifyDefaultAudioEffectsAllowed() {
- return modifyDefaultAudioEffectsAllowed(getCallingIdentity());
+ return modifyDefaultAudioEffectsAllowed(getCallingAttributionSource());
}
-bool modifyDefaultAudioEffectsAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool modifyDefaultAudioEffectsAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
if (isAudioServerUid(IPCThreadState::self()->getCallingUid())) return true;
static const String16 sModifyDefaultAudioEffectsAllowed(
@@ -271,18 +283,18 @@
return ok;
}
-bool modifyPhoneStateAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
bool ok = PermissionCache::checkPermission(sModifyPhoneState, pid, uid);
ALOGE_IF(!ok, "Request requires %s", String8(sModifyPhoneState).c_str());
return ok;
}
// privileged behavior needed by Dialer, Settings, SetupWizard and CellBroadcastReceiver
-bool bypassInterruptionPolicyAllowed(const Identity& identity) {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(identity.pid));
+bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource) {
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+ pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
static const String16 sWriteSecureSettings("android.permission.WRITE_SECURE_SETTINGS");
bool ok = PermissionCache::checkPermission(sModifyPhoneState, pid, uid)
|| PermissionCache::checkPermission(sWriteSecureSettings, pid, uid)
@@ -292,11 +304,14 @@
return ok;
}
-Identity getCallingIdentity() {
- Identity identity = Identity();
- identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
- identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
- return identity;
+AttributionSourceState getCallingAttributionSource() {
+ AttributionSourceState attributionSource = AttributionSourceState();
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
+ IPCThreadState::self()->getCallingPid()));
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
+ IPCThreadState::self()->getCallingUid()));
+ attributionSource.token = sp<BBinder>::make();
+ return attributionSource;
}
void purgePermissionCache() {
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index b245834..c1698dc 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -16,7 +16,8 @@
"liblog",
"libmediautils",
"libutils",
- "media_permission-aidl-cpp",
+ "libbinder",
+ "framework-permission-aidl-cpp",
],
cflags: [
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 2f9e780..6e52512 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,16 +17,16 @@
#include <fcntl.h>
#include <functional>
-#include <type_traits>
+#include <type_traits>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include "fuzzer/FuzzedDataProvider.h"
#include "mediautils/ServiceUtilities.h"
static constexpr int kMaxOperations = 50;
static constexpr int kMaxStringLen = 256;
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
const std::vector<std::function<void(FuzzedDataProvider*, android::MediaPackageManager)>>
operations = {
@@ -54,10 +54,11 @@
std::string packageNameStr = data_provider.ConsumeRandomLengthString(kMaxStringLen);
std::string msgStr = data_provider.ConsumeRandomLengthString(kMaxStringLen);
android::String16 msgStr16(packageNameStr.c_str());
- Identity identity;
- identity.packageName = packageNameStr;
- identity.uid = uid;
- identity.pid = pid;
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageNameStr;
+ attributionSource.uid = uid;
+ attributionSource.pid = pid;
+ attributionSource.token = android::sp<android::BBinder>::make();
// There is not state here, and order is not significant,
// so we can simply call all of the target functions
@@ -65,14 +66,14 @@
android::isAudioServerUid(uid);
android::isAudioServerOrSystemServerUid(uid);
android::isAudioServerOrMediaServerUid(uid);
- android::recordingAllowed(identity);
- android::startRecording(identity, msgStr16, source);
- android::finishRecording(identity, source);
- android::captureAudioOutputAllowed(identity);
- android::captureMediaOutputAllowed(identity);
- android::captureHotwordAllowed(identity);
- android::modifyPhoneStateAllowed(identity);
- android::bypassInterruptionPolicyAllowed(identity);
+ android::recordingAllowed(attributionSource);
+ android::startRecording(attributionSource, msgStr16, source);
+ android::finishRecording(attributionSource, source);
+ android::captureAudioOutputAllowed(attributionSource);
+ android::captureMediaOutputAllowed(attributionSource);
+ android::captureHotwordAllowed(attributionSource);
+ android::modifyPhoneStateAllowed(attributionSource);
+ android::bypassInterruptionPolicyAllowed(attributionSource);
android::settingsAllowed();
android::modifyAudioRoutingAllowed();
android::modifyDefaultAudioEffectsAllowed();
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index e7132b8..734313c 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -25,7 +25,9 @@
#include <cutils/multiuser.h>
#include <private/android_filesystem_config.h>
#include <system/audio-hal-enums.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
+#include <binder/PermissionController.h>
+#include <android/permission/PermissionChecker.h>
#include <map>
#include <optional>
@@ -35,6 +37,8 @@
namespace android {
+using content::AttributionSourceState;
+
// Audio permission utilities
// Used for calls that should originate from system services.
@@ -80,28 +84,30 @@
}
}
-bool recordingAllowed(const media::permission::Identity& identity,
+bool recordingAllowed(const AttributionSourceState& attributionSource,
audio_source_t source = AUDIO_SOURCE_DEFAULT);
-bool startRecording(const media::permission::Identity& identity,
+bool startRecording(const AttributionSourceState& attributionSource,
const String16& msg, audio_source_t source);
-void finishRecording(const media::permission::Identity& identity, audio_source_t source);
-bool captureAudioOutputAllowed(const media::permission::Identity& identity);
-bool captureMediaOutputAllowed(const media::permission::Identity& identity);
-bool captureTunerAudioInputAllowed(const media::permission::Identity& identity);
-bool captureVoiceCommunicationOutputAllowed(const media::permission::Identity& identity);
-bool captureHotwordAllowed(const media::permission::Identity& identity);
+void finishRecording(const AttributionSourceState& attributionSource, audio_source_t source);
+std::optional<AttributionSourceState> resolveAttributionSource(
+ const AttributionSourceState& callerAttributionSource);
+bool captureAudioOutputAllowed(const AttributionSourceState& attributionSource);
+bool captureMediaOutputAllowed(const AttributionSourceState& attributionSource);
+bool captureTunerAudioInputAllowed(const AttributionSourceState& attributionSource);
+bool captureVoiceCommunicationOutputAllowed(const AttributionSourceState& attributionSource);
+bool captureHotwordAllowed(const AttributionSourceState& attributionSource);
bool settingsAllowed();
bool modifyAudioRoutingAllowed();
-bool modifyAudioRoutingAllowed(const media::permission::Identity& identity);
+bool modifyAudioRoutingAllowed(const AttributionSourceState& attributionSource);
bool modifyDefaultAudioEffectsAllowed();
-bool modifyDefaultAudioEffectsAllowed(const media::permission::Identity& identity);
+bool modifyDefaultAudioEffectsAllowed(const AttributionSourceState& attributionSource);
bool dumpAllowed();
-bool modifyPhoneStateAllowed(const media::permission::Identity& identity);
-bool bypassInterruptionPolicyAllowed(const media::permission::Identity& identity);
+bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
+bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
void purgePermissionCache();
int32_t getOpForSource(audio_source_t source);
-media::permission::Identity getCallingIdentity();
+AttributionSourceState getCallingAttributionSource();
status_t checkIMemory(const sp<IMemory>& iMemory);
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index a7d47fb..b91f302 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -81,12 +81,12 @@
"libmedia_helper",
"libshmemcompat",
"libvibrator",
- "media_permission-aidl-cpp",
],
static_libs: [
"libcpustats",
"libsndfile",
+ "libpermission",
],
header_libs: [
@@ -97,7 +97,6 @@
export_shared_lib_headers: [
"libpermission",
- "media_permission-aidl-cpp",
],
cflags: [
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 3562b00..54a6425 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -103,7 +103,7 @@
namespace android {
using media::IEffectClient;
-using media::permission::Identity;
+using android::content::AttributionSourceState;
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
static const char kHardwareLockedString[] = "Hardware lock is taken\n";
@@ -163,31 +163,32 @@
}
};
-// TODO b/182392769: use identity util
+// TODO b/182392769: use attribution source util
/* static */
-media::permission::Identity AudioFlinger::checkIdentityPackage(
- const media::permission::Identity& identity) {
+AttributionSourceState AudioFlinger::checkAttributionSourcePackage(
+ const AttributionSourceState& attributionSource) {
Vector<String16> packages;
- PermissionController{}.getPackagesForUid(identity.uid, packages);
+ PermissionController{}.getPackagesForUid(attributionSource.uid, packages);
- Identity checkedIdentity = identity;
- if (!identity.packageName.has_value() || identity.packageName.value().size() == 0) {
+ AttributionSourceState checkedAttributionSource = attributionSource;
+ if (!attributionSource.packageName.has_value()
+ || attributionSource.packageName.value().size() == 0) {
if (!packages.isEmpty()) {
- checkedIdentity.packageName =
+ checkedAttributionSource.packageName =
std::move(legacy2aidl_String16_string(packages[0]).value());
}
} else {
String16 opPackageLegacy = VALUE_OR_FATAL(
- aidl2legacy_string_view_String16(identity.packageName.value_or("")));
+ aidl2legacy_string_view_String16(attributionSource.packageName.value_or("")));
if (std::find_if(packages.begin(), packages.end(),
[&opPackageLegacy](const auto& package) {
return opPackageLegacy == package; }) == packages.end()) {
ALOGW("The package name(%s) provided does not correspond to the uid %d",
- identity.packageName.value_or("").c_str(), identity.uid);
- checkedIdentity.packageName = std::optional<std::string>();
+ attributionSource.packageName.value_or("").c_str(), attributionSource.uid);
+ checkedAttributionSource.packageName = std::optional<std::string>();
}
}
- return checkedIdentity;
+ return checkedAttributionSource;
}
// ----------------------------------------------------------------------------
@@ -236,11 +237,12 @@
timespec ts{};
clock_gettime(CLOCK_MONOTONIC, &ts);
// zero ID has a special meaning, so start allocation at least at AUDIO_UNIQUE_ID_USE_MAX
- uint32_t sessionBase = (uint32_t)std::max((long)1, ts.tv_sec);
+ uint32_t movingBase = (uint32_t)std::max((long)1, ts.tv_sec);
// unsigned instead of audio_unique_id_use_t, because ++ operator is unavailable for enum
for (unsigned use = AUDIO_UNIQUE_ID_USE_UNSPECIFIED; use < AUDIO_UNIQUE_ID_USE_MAX; use++) {
mNextUniqueIds[use] =
- ((use == AUDIO_UNIQUE_ID_USE_SESSION) ? sessionBase : 1) * AUDIO_UNIQUE_ID_USE_MAX;
+ ((use == AUDIO_UNIQUE_ID_USE_SESSION || use == AUDIO_UNIQUE_ID_USE_CLIENT) ?
+ movingBase : 1) * AUDIO_UNIQUE_ID_USE_MAX;
}
#if 1
@@ -312,6 +314,27 @@
return NO_ERROR;
}
+status_t AudioFlinger::updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) {
+ Mutex::Autolock _l(mLock);
+ for (const auto& [trackId, secondaryOutputs] : trackSecondaryOutputs) {
+ size_t i = 0;
+ for (; i < mPlaybackThreads.size(); ++i) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ Mutex::Autolock _tl(thread->mLock);
+ sp<PlaybackThread::Track> track = thread->getTrackById_l(trackId);
+ if (track != nullptr) {
+ ALOGD("%s trackId: %u", __func__, trackId);
+ updateSecondaryOutputsForTrack_l(track.get(), thread, secondaryOutputs);
+ break;
+ }
+ }
+ ALOGW_IF(i >= mPlaybackThreads.size(),
+ "%s cannot find track with id %u", __func__, trackId);
+ }
+ return NO_ERROR;
+}
+
// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
if (mAudioVibratorInfos.empty()) {
@@ -409,7 +432,7 @@
ret = AudioSystem::getOutputForAttr(&localAttr, &io,
actualSessionId,
- &streamType, client.identity,
+ &streamType, client.attributionSource,
&fullConfig,
(audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
AUDIO_OUTPUT_FLAG_DIRECT),
@@ -420,7 +443,7 @@
ret = AudioSystem::getInputForAttr(&localAttr, &io,
RECORD_RIID_INVALID,
actualSessionId,
- client.identity,
+ client.attributionSource,
config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ, deviceId, &portId);
}
@@ -826,21 +849,21 @@
// TODO b/182392553: refactor or make clearer
pid_t clientPid =
- VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(input.clientInfo.identity.pid));
+ VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(input.clientInfo.attributionSource.pid));
bool updatePid = (clientPid == (pid_t)-1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid =
- VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(input.clientInfo.identity.uid));
+ VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(input.clientInfo.attributionSource.uid));
audio_io_handle_t effectThreadId = AUDIO_IO_HANDLE_NONE;
std::vector<int> effectIds;
audio_attributes_t localAttr = input.attr;
- Identity adjIdentity = input.clientInfo.identity;
+ AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
- adjIdentity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
clientUid = callingUid;
updatePid = true;
}
@@ -850,7 +873,7 @@
"%s uid %d pid %d tried to pass itself off as pid %d",
__func__, callingUid, callingPid, clientPid);
clientPid = callingPid;
- adjIdentity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
+ adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
}
audio_session_t sessionId = input.sessionId;
@@ -865,7 +888,7 @@
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType,
- adjIdentity, &input.config, input.flags,
+ adjAttributionSource, &input.config, input.flags,
&output.selectedDeviceId, &portId, &secondaryOutputs);
if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
@@ -930,7 +953,7 @@
&output.frameCount, &output.notificationFrameCount,
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
- callingPid, adjIdentity, input.clientInfo.clientTid,
+ callingPid, adjAttributionSource, input.clientInfo.clientTid,
&lStatus, portId, input.audioTrackCallback);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
@@ -944,88 +967,7 @@
// Connect secondary outputs. Failure on a secondary output must not imped the primary
// Any secondary output setup failure will lead to a desync between the AP and AF until
// the track is destroyed.
- TeePatches teePatches;
- for (audio_io_handle_t secondaryOutput : secondaryOutputs) {
- PlaybackThread *secondaryThread = checkPlaybackThread_l(secondaryOutput);
- if (secondaryThread == NULL) {
- ALOGE("no playback thread found for secondary output %d", output.outputId);
- continue;
- }
-
- size_t sourceFrameCount = thread->frameCount() * output.sampleRate
- / thread->sampleRate();
- size_t sinkFrameCount = secondaryThread->frameCount() * output.sampleRate
- / secondaryThread->sampleRate();
- // If the secondary output has just been opened, the first secondaryThread write
- // will not block as it will fill the empty startup buffer of the HAL,
- // so a second sink buffer needs to be ready for the immediate next blocking write.
- // Additionally, have a margin of one main thread buffer as the scheduling jitter
- // can reorder the writes (eg if thread A&B have the same write intervale,
- // the scheduler could schedule AB...BA)
- size_t frameCountToBeReady = 2 * sinkFrameCount + sourceFrameCount;
- // Total secondary output buffer must be at least as the read frames plus
- // the margin of a few buffers on both sides in case the
- // threads scheduling has some jitter.
- // That value should not impact latency as the secondary track is started before
- // its buffer is full, see frameCountToBeReady.
- size_t frameCount = frameCountToBeReady + 2 * (sourceFrameCount + sinkFrameCount);
- // The frameCount should also not be smaller than the secondary thread min frame
- // count
- size_t minFrameCount = AudioSystem::calculateMinFrameCount(
- [&] { Mutex::Autolock _l(secondaryThread->mLock);
- return secondaryThread->latency_l(); }(),
- secondaryThread->mNormalFrameCount,
- secondaryThread->mSampleRate,
- output.sampleRate,
- input.speed);
- frameCount = std::max(frameCount, minFrameCount);
-
- using namespace std::chrono_literals;
- auto inChannelMask = audio_channel_mask_out_to_in(input.config.channel_mask);
- sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
- output.sampleRate,
- inChannelMask,
- input.config.format,
- frameCount,
- NULL /* buffer */,
- (size_t)0 /* bufferSize */,
- AUDIO_INPUT_FLAG_DIRECT,
- 0ns /* timeout */);
- status_t status = patchRecord->initCheck();
- if (status != NO_ERROR) {
- ALOGE("Secondary output patchRecord init failed: %d", status);
- continue;
- }
-
- // TODO: We could check compatibility of the secondaryThread with the PatchTrack
- // for fast usage: thread has fast mixer, sample rate matches, etc.;
- // for now, we exclude fast tracks by removing the Fast flag.
- const audio_output_flags_t outputFlags =
- (audio_output_flags_t)(output.flags & ~AUDIO_OUTPUT_FLAG_FAST);
- sp patchTrack = new PlaybackThread::PatchTrack(secondaryThread,
- streamType,
- output.sampleRate,
- input.config.channel_mask,
- input.config.format,
- frameCount,
- patchRecord->buffer(),
- patchRecord->bufferSize(),
- outputFlags,
- 0ns /* timeout */,
- frameCountToBeReady);
- status = patchTrack->initCheck();
- if (status != NO_ERROR) {
- ALOGE("Secondary output patchTrack init failed: %d", status);
- continue;
- }
- teePatches.push_back({patchRecord, patchTrack});
- secondaryThread->addPatchTrack(patchTrack);
- // In case the downstream patchTrack on the secondaryThread temporarily outlives
- // our created track, ensure the corresponding patchRecord is still alive.
- patchTrack->setPeerProxy(patchRecord, true /* holdReference */);
- patchRecord->setPeerProxy(patchTrack, false /* holdReference */);
- }
- track->setTeePatches(std::move(teePatches));
+ updateSecondaryOutputsForTrack_l(track.get(), thread, secondaryOutputs);
}
// move effect chain to this output thread if an effect on same session was waiting
@@ -2090,24 +2032,26 @@
output.inputId = AUDIO_IO_HANDLE_NONE;
// TODO b/182392553: refactor or clean up
- Identity adjIdentity = input.clientInfo.identity;
- bool updatePid = (adjIdentity.pid == -1);
+ AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
+ bool updatePid = (adjAttributionSource.pid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(adjIdentity.uid));
+ const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(
+ adjAttributionSource.uid));
if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(currentUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, currentUid);
- adjIdentity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
updatePid = true;
}
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
- const pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(adjIdentity.pid));
+ const pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(
+ adjAttributionSource.pid));
if (updatePid) {
ALOGW_IF(currentPid != (pid_t)-1 && currentPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
__func__, callingUid, callingPid, currentPid);
- adjIdentity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
+ adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
}
// we don't yet support anything other than linear PCM
@@ -2135,7 +2079,7 @@
output.selectedDeviceId = input.selectedDeviceId;
output.flags = input.flags;
- client = registerPid(VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(adjIdentity.pid)));
+ client = registerPid(VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid)));
// Not a conventional loop, but a retry loop for at most two iterations total.
// Try first maybe with FAST flag then try again without FAST flag if that fails.
@@ -2155,7 +2099,7 @@
input.riid,
sessionId,
// FIXME compare to AudioTrack
- adjIdentity,
+ adjAttributionSource,
&input.config,
output.flags, &output.selectedDeviceId, &portId);
if (lStatus != NO_ERROR) {
@@ -2182,7 +2126,7 @@
input.config.format, input.config.channel_mask,
&output.frameCount, sessionId,
&output.notificationFrameCount,
- callingPid, adjIdentity, &output.flags,
+ callingPid, adjAttributionSource, &output.flags,
input.clientInfo.clientTid,
&lStatus, portId, input.maxSharedAudioHistoryMs);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
@@ -2952,8 +2896,8 @@
audio_is_linear_pcm(config->format) &&
audio_is_linear_pcm(halconfig.format) &&
(halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
- (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_8) &&
- (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
+ (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_LIMIT) &&
+ (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_LIMIT)) {
// FIXME describe the change proposed by HAL (save old values so we can log them here)
ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
inStream.clear();
@@ -3441,6 +3385,94 @@
return nullptr;
}
+void AudioFlinger::updateSecondaryOutputsForTrack_l(
+ PlaybackThread::Track* track,
+ PlaybackThread* thread,
+ const std::vector<audio_io_handle_t> &secondaryOutputs) const {
+ TeePatches teePatches;
+ for (audio_io_handle_t secondaryOutput : secondaryOutputs) {
+ PlaybackThread *secondaryThread = checkPlaybackThread_l(secondaryOutput);
+ if (secondaryThread == nullptr) {
+ ALOGE("no playback thread found for secondary output %d", thread->id());
+ continue;
+ }
+
+ size_t sourceFrameCount = thread->frameCount() * track->sampleRate()
+ / thread->sampleRate();
+ size_t sinkFrameCount = secondaryThread->frameCount() * track->sampleRate()
+ / secondaryThread->sampleRate();
+ // If the secondary output has just been opened, the first secondaryThread write
+ // will not block as it will fill the empty startup buffer of the HAL,
+ // so a second sink buffer needs to be ready for the immediate next blocking write.
+ // Additionally, have a margin of one main thread buffer as the scheduling jitter
+ // can reorder the writes (eg if thread A&B have the same write intervale,
+ // the scheduler could schedule AB...BA)
+ size_t frameCountToBeReady = 2 * sinkFrameCount + sourceFrameCount;
+ // Total secondary output buffer must be at least as the read frames plus
+ // the margin of a few buffers on both sides in case the
+ // threads scheduling has some jitter.
+ // That value should not impact latency as the secondary track is started before
+ // its buffer is full, see frameCountToBeReady.
+ size_t frameCount = frameCountToBeReady + 2 * (sourceFrameCount + sinkFrameCount);
+ // The frameCount should also not be smaller than the secondary thread min frame
+ // count
+ size_t minFrameCount = AudioSystem::calculateMinFrameCount(
+ [&] { Mutex::Autolock _l(secondaryThread->mLock);
+ return secondaryThread->latency_l(); }(),
+ secondaryThread->mNormalFrameCount,
+ secondaryThread->mSampleRate,
+ track->sampleRate(),
+ track->getSpeed());
+ frameCount = std::max(frameCount, minFrameCount);
+
+ using namespace std::chrono_literals;
+ auto inChannelMask = audio_channel_mask_out_to_in(track->channelMask());
+ sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
+ track->sampleRate(),
+ inChannelMask,
+ track->format(),
+ frameCount,
+ nullptr /* buffer */,
+ (size_t)0 /* bufferSize */,
+ AUDIO_INPUT_FLAG_DIRECT,
+ 0ns /* timeout */);
+ status_t status = patchRecord->initCheck();
+ if (status != NO_ERROR) {
+ ALOGE("Secondary output patchRecord init failed: %d", status);
+ continue;
+ }
+
+ // TODO: We could check compatibility of the secondaryThread with the PatchTrack
+ // for fast usage: thread has fast mixer, sample rate matches, etc.;
+ // for now, we exclude fast tracks by removing the Fast flag.
+ const audio_output_flags_t outputFlags =
+ (audio_output_flags_t)(track->getOutputFlags() & ~AUDIO_OUTPUT_FLAG_FAST);
+ sp patchTrack = new PlaybackThread::PatchTrack(secondaryThread,
+ track->streamType(),
+ track->sampleRate(),
+ track->channelMask(),
+ track->format(),
+ frameCount,
+ patchRecord->buffer(),
+ patchRecord->bufferSize(),
+ outputFlags,
+ 0ns /* timeout */,
+ frameCountToBeReady);
+ status = patchTrack->initCheck();
+ if (status != NO_ERROR) {
+ ALOGE("Secondary output patchTrack init failed: %d", status);
+ continue;
+ }
+ teePatches.push_back({patchRecord, patchTrack});
+ secondaryThread->addPatchTrack(patchTrack);
+ // In case the downstream patchTrack on the secondaryThread temporarily outlives
+ // our created track, ensure the corresponding patchRecord is still alive.
+ patchTrack->setPeerProxy(patchRecord, true /* holdReference */);
+ patchRecord->setPeerProxy(patchTrack, false /* holdReference */);
+ }
+ track->setTeePatches(std::move(teePatches));
+}
+
sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
audio_session_t triggerSession,
audio_session_t listenerSession,
@@ -3577,7 +3609,7 @@
const int32_t priority = request.priority;
const AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioDeviceTypeAddress(request.device));
- Identity adjIdentity = request.identity;
+ AttributionSourceState adjAttributionSource = request.attributionSource;
const audio_session_t sessionId = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_session_t(request.sessionId));
audio_io_handle_t io = VALUE_OR_RETURN_STATUS(
@@ -3595,19 +3627,20 @@
// TODO b/182392553: refactor or make clearer
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- adjIdentity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
- pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(adjIdentity.pid));
+ adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid));
if (currentPid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
ALOGW_IF(currentPid != -1 && currentPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
__func__, callingUid, callingPid, currentPid);
- adjIdentity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
+ adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
currentPid = callingPid;
}
ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
- adjIdentity.pid, effectClient.get(), priority, sessionId, io, mEffectsFactoryHal.get());
+ adjAttributionSource.pid, effectClient.get(), priority, sessionId, io,
+ mEffectsFactoryHal.get());
if (mEffectsFactoryHal == 0) {
ALOGE("%s: no effects factory hal", __func__);
@@ -3635,7 +3668,7 @@
goto Exit;
}
} else if (sessionId == AUDIO_SESSION_DEVICE) {
- if (!modifyDefaultAudioEffectsAllowed(adjIdentity)) {
+ if (!modifyDefaultAudioEffectsAllowed(adjAttributionSource)) {
ALOGE("%s: device effect permission denied for uid %d", __func__, callingUid);
lStatus = PERMISSION_DENIED;
goto Exit;
@@ -3680,7 +3713,7 @@
// check recording permission for visualizer
if ((memcmp(&descOut.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) &&
// TODO: Do we need to start/stop op - i.e. is there recording being performed?
- !recordingAllowed(adjIdentity)) {
+ !recordingAllowed(adjAttributionSource)) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -3963,7 +3996,7 @@
// if the move request is not received from audio policy manager, the effect must be
// re-registered with the new strategy and output
if (dstChain == 0) {
- dstChain = effect->callback()->chain().promote();
+ dstChain = effect->getCallback()->chain().promote();
if (dstChain == 0) {
ALOGW("moveEffectChain_l() cannot get chain from effect %p", effect.get());
status = NO_INIT;
@@ -4013,7 +4046,7 @@
goto Exit;
}
- dstChain = effect->callback()->chain().promote();
+ dstChain = effect->getCallback()->chain().promote();
if (dstChain == 0) {
thread->addEffect_l(effect);
status = INVALID_OPERATION;
@@ -4170,7 +4203,8 @@
case TransactionCode::SET_LOW_RAM_DEVICE:
case TransactionCode::SYSTEM_READY:
case TransactionCode::SET_AUDIO_HAL_PIDS:
- case TransactionCode::SET_VIBRATOR_INFOS: {
+ case TransactionCode::SET_VIBRATOR_INFOS:
+ case TransactionCode::UPDATE_SECONDARY_OUTPUTS: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 4b03d10..fff61f8 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -37,6 +37,8 @@
#include <android/media/IAudioFlingerClient.h>
#include <android/media/IAudioTrackCallback.h>
#include <android/os/BnExternalVibrationController.h>
+#include <android/content/AttributionSourceState.h>
+
#include <android-base/macros.h>
#include <cutils/atomic.h>
@@ -124,13 +126,15 @@
#define INCLUDING_FROM_AUDIOFLINGER_H
+using android::content::AttributionSourceState;
+
class AudioFlinger : public AudioFlingerServerAdapter::Delegate
{
public:
static void instantiate() ANDROID_API;
- static media::permission::Identity checkIdentityPackage(
- const media::permission::Identity& identity);
+ static AttributionSourceState checkAttributionSourcePackage(
+ const AttributionSourceState& attributionSource);
status_t dump(int fd, const Vector<String16>& args) override;
@@ -272,6 +276,9 @@
virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+ virtual status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs);
+
status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
const std::function<status_t()>& delegate) override;
@@ -775,6 +782,11 @@
ThreadBase *hapticPlaybackThread_l() const;
+ void updateSecondaryOutputsForTrack_l(
+ PlaybackThread::Track* track,
+ PlaybackThread* thread,
+ const std::vector<audio_io_handle_t>& secondaryOutputs) const;
+
void removeClient_l(pid_t pid);
void removeNotificationClient(pid_t pid);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index d75b13b..d3492d9 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -152,12 +152,12 @@
if (fromHandle) {
if (enabled) {
if (status != NO_ERROR) {
- mCallback->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
+ getCallback()->checkSuspendOnEffectEnabled(this, false, false /*threadLocked*/);
} else {
- mCallback->onEffectEnable(this);
+ getCallback()->onEffectEnable(this);
}
} else {
- mCallback->onEffectDisable(this);
+ getCallback()->onEffectDisable(this);
}
}
return status;
@@ -247,8 +247,9 @@
doRegister = true;
mPolicyRegistered = mHandles.size() > 0;
if (mPolicyRegistered) {
- io = mCallback->io();
- strategy = mCallback->strategy();
+ const auto callback = getCallback();
+ io = callback->io();
+ strategy = callback->strategy();
}
}
// enable effect when registered according to enable state requested by controlling handle
@@ -349,8 +350,9 @@
// unsafe method called when the effect parent thread has been destroyed
ssize_t AudioFlinger::EffectBase::disconnectHandle(EffectHandle *handle, bool unpinIfLast)
{
+ const auto callback = getCallback();
ALOGV("disconnect() %p handle %p", this, handle);
- if (mCallback->disconnectEffectHandle(handle, unpinIfLast)) {
+ if (callback->disconnectEffectHandle(handle, unpinIfLast)) {
return mHandles.size();
}
@@ -358,7 +360,7 @@
ssize_t numHandles = removeHandle_l(handle);
if ((numHandles == 0) && (!mPinned || unpinIfLast)) {
mLock.unlock();
- mCallback->updateOrphanEffectChains(this);
+ callback->updateOrphanEffectChains(this);
mLock.lock();
}
return numHandles;
@@ -377,7 +379,7 @@
}
void AudioFlinger::EffectBase::checkSuspendOnEffectEnabled(bool enabled, bool threadLocked) {
- mCallback->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
+ getCallback()->checkSuspendOnEffectEnabled(this, enabled, threadLocked);
}
static String8 effectFlagsToString(uint32_t flags) {
@@ -835,7 +837,7 @@
mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
// If an insert effect is idle and input buffer is different from output buffer,
// accumulate input onto output
- if (mCallback->activeTrackCnt() != 0) {
+ if (getCallback()->activeTrackCnt() != 0) {
// similar handling with data_bypass above.
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
accumulateInputToOutput();
@@ -860,6 +862,7 @@
status_t status;
uint32_t size;
audio_channel_mask_t channelMask;
+ sp<EffectCallbackInterface> callback;
if (mEffectInterface == 0) {
status = NO_INIT;
@@ -870,7 +873,8 @@
// TODO: handle configuration of input (record) SW effects above the HAL,
// similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
// in which case input channel masks should be used here.
- channelMask = mCallback->channelMask();
+ callback = getCallback();
+ channelMask = callback->channelMask();
mConfig.inputCfg.channels = channelMask;
mConfig.outputCfg.channels = channelMask;
@@ -899,7 +903,7 @@
#endif
}
if (isHapticGenerator()) {
- audio_channel_mask_t hapticChannelMask = mCallback->hapticChannelMask();
+ audio_channel_mask_t hapticChannelMask = callback->hapticChannelMask();
mConfig.inputCfg.channels |= hapticChannelMask;
mConfig.outputCfg.channels |= hapticChannelMask;
}
@@ -912,11 +916,11 @@
mConfig.outputCfg.format = EFFECT_BUFFER_FORMAT;
// Don't use sample rate for thread if effect isn't offloadable.
- if (mCallback->isOffloadOrDirect() && !isOffloaded()) {
+ if (callback->isOffloadOrDirect() && !isOffloaded()) {
mConfig.inputCfg.samplingRate = DEFAULT_OUTPUT_SAMPLE_RATE;
ALOGV("Overriding effect input as 48kHz");
} else {
- mConfig.inputCfg.samplingRate = mCallback->sampleRate();
+ mConfig.inputCfg.samplingRate = callback->sampleRate();
}
mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
mConfig.inputCfg.bufferProvider.cookie = NULL;
@@ -942,11 +946,11 @@
}
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.inputCfg.buffer.frameCount = mCallback->frameCount();
+ mConfig.inputCfg.buffer.frameCount = callback->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
ALOGV("configure() %p chain %p buffer %p framecount %zu",
- this, mCallback->chain().promote().get(),
+ this, callback->chain().promote().get(),
mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
@@ -962,7 +966,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
- mCallback->isOutput() &&
+ callback->isOutput() &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
// Older effects may require exact STEREO position mask.
@@ -1029,7 +1033,7 @@
size = sizeof(int);
*(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
- uint32_t latency = mCallback->latency();
+ uint32_t latency = callback->latency();
*((int32_t *)p->data + 1)= latency;
mEffectInterface->command(EFFECT_CMD_SET_PARAM,
@@ -1076,7 +1080,7 @@
{
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- (void)mCallback->addEffectToHal(mEffectInterface);
+ (void)getCallback()->addEffectToHal(mEffectInterface);
}
}
@@ -1089,7 +1093,7 @@
status = start_l();
}
if (status == NO_ERROR) {
- mCallback->resetVolume();
+ getCallback()->resetVolume();
}
return status;
}
@@ -1139,7 +1143,7 @@
// We have the EffectChain and EffectModule lock, permit a reentrant call to setVolume:
// resetVolume_l --> setVolume_l --> EffectModule::setVolume
mSetVolumeReentrantTid = gettid();
- mCallback->resetVolume();
+ getCallback()->resetVolume();
mSetVolumeReentrantTid = INVALID_PID;
}
@@ -1172,7 +1176,7 @@
{
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- mCallback->removeEffectFromHal(mEffectInterface);
+ getCallback()->removeEffectFromHal(mEffectInterface);
}
return NO_ERROR;
}
@@ -1288,7 +1292,7 @@
bool AudioFlinger::EffectModule::isOffloadedOrDirect() const
{
- return mCallback->isOffloadOrDirect();
+ return getCallback()->isOffloadOrDirect();
}
bool AudioFlinger::EffectModule::isVolumeControlEnabled() const
@@ -1332,7 +1336,7 @@
|| size > mInConversionBuffer->getSize())) {
mInConversionBuffer.clear();
ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
- (void)mCallback->allocateHalBuffer(size, &mInConversionBuffer);
+ (void)getCallback()->allocateHalBuffer(size, &mInConversionBuffer);
}
if (mInConversionBuffer != nullptr) {
mInConversionBuffer->setFrameCount(inFrameCount);
@@ -1376,7 +1380,7 @@
|| size > mOutConversionBuffer->getSize())) {
mOutConversionBuffer.clear();
ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
- (void)mCallback->allocateHalBuffer(size, &mOutConversionBuffer);
+ (void)getCallback()->allocateHalBuffer(size, &mOutConversionBuffer);
}
if (mOutConversionBuffer != nullptr) {
mOutConversionBuffer->setFrameCount(outFrameCount);
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 9da95bc..661881e 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -138,8 +138,9 @@
int32_t __unused,
std::vector<uint8_t>* __unused) { return NO_ERROR; };
+ // mCallback is atomic so this can be lock-free.
void setCallback(const sp<EffectCallbackInterface>& callback) { mCallback = callback; }
- sp<EffectCallbackInterface>& callback() { return mCallback; }
+ sp<EffectCallbackInterface> getCallback() const { return mCallback.load(); }
status_t addHandle(EffectHandle *handle);
ssize_t disconnectHandle(EffectHandle *handle, bool unpinIfLast);
@@ -170,7 +171,7 @@
DISALLOW_COPY_AND_ASSIGN(EffectBase);
mutable Mutex mLock; // mutex for process, commands and handles list protection
- sp<EffectCallbackInterface> mCallback; // parent effect chain
+ mediautils::atomic_sp<EffectCallbackInterface> mCallback; // parent effect chain
const int mId; // this instance unique ID
const audio_session_t mSessionId; // audio session ID
const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index d6d6e25..2963202 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -107,7 +107,7 @@
mSampleRate = Format_sampleRate(mFormat);
#if !LOG_NDEBUG
unsigned channelCount = Format_channelCount(mFormat);
- ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+ ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_LIMIT);
#endif
}
dumpState->mSampleRate = mSampleRate;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 13e2ced..88d4eaf 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -48,6 +48,15 @@
/*static*/ const FastMixerState FastMixer::sInitial;
+static audio_channel_mask_t getChannelMaskFromCount(size_t count) {
+ const audio_channel_mask_t mask = audio_channel_out_mask_from_count(count);
+ if (mask == AUDIO_CHANNEL_INVALID) {
+ // some counts have no positional masks. TODO: Update this to return index count?
+ return audio_channel_mask_for_index_assignment_from_count(count);
+ }
+ return mask;
+}
+
FastMixer::FastMixer(audio_io_handle_t parentIoHandle)
: FastThread("cycle_ms", "load_us"),
// mFastTrackNames
@@ -79,7 +88,7 @@
mDummyDumpState = &mDummyFastMixerDumpState;
// TODO: Add channel mask to NBAIO_Format.
// We assume that the channel mask must be a valid positional channel mask.
- mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
unsigned i;
for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -238,7 +247,7 @@
LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
if (mSinkChannelMask == AUDIO_CHANNEL_NONE) {
- mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
}
mAudioChannelCount = mSinkChannelCount - audio_channel_count_from_out_mask(
mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index ba868d7..eb640bb 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -29,7 +29,7 @@
audio_channel_mask_t channelMask,
audio_session_t sessionId,
bool isOut,
- const media::permission::Identity& identity,
+ const android::content::AttributionSourceState& attributionSource,
pid_t creatorPid,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual ~MmapTrack();
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 0af4c7b..51a41af 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -15,8 +15,6 @@
** limitations under the License.
*/
-#include <android/media/permission/Identity.h>
-
#ifndef INCLUDING_FROM_AUDIOFLINGER_H
#error This header file should only be included from AudioFlinger.h
#endif
@@ -28,12 +26,12 @@
bool hasOpPlayAudio() const;
static sp<OpPlayAudioMonitor> createIfNeeded(
- const android::media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_attributes_t& attr, int id,
audio_stream_type_t streamType);
private:
- OpPlayAudioMonitor(const android::media::permission::Identity& identity,
+ OpPlayAudioMonitor(const AttributionSourceState& attributionSource,
audio_usage_t usage, int id);
void onFirstRef() override;
static void getPackagesForUid(uid_t uid, Vector<String16>& packages);
@@ -54,7 +52,7 @@
void checkPlayAudioForUsage();
std::atomic_bool mHasOpPlayAudio;
- const android::media::permission::Identity mIdentity;
+ const AttributionSourceState mAttributionSource;
const int32_t mUsage; // on purpose not audio_usage_t because always checked in appOps as int32_t
const int mId; // for logging purposes only
};
@@ -75,13 +73,14 @@
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
pid_t creatorPid,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
/** default behaviour is to start when there are as many frames
* ready as possible (aka. Buffer is full). */
- size_t frameCountToBeReady = SIZE_MAX);
+ size_t frameCountToBeReady = SIZE_MAX,
+ float speed = 1.0f);
virtual ~Track();
virtual status_t initCheck() const;
@@ -183,6 +182,9 @@
mAudioTrackServerProxy->getUnderrunFrames());
}
}
+
+ audio_output_flags_t getOutputFlags() const { return mFlags; }
+ float getSpeed() const { return mSpeed; }
protected:
// for numerous
friend class PlaybackThread;
@@ -311,6 +313,7 @@
bool mPauseHwPending = false; // direct/offload track request for thread pause
audio_output_flags_t mFlags;
TeePatches mTeePatches;
+ const float mSpeed;
}; // end of Track
@@ -329,7 +332,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- const android::media::permission::Identity& identity);
+ const AttributionSourceState& attributionSource);
virtual ~OutputTrack();
virtual status_t start(AudioSystem::sync_event_t event =
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index a1c2de7..88aa7cb 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -15,6 +15,8 @@
** limitations under the License.
*/
+#include <android/content/AttributionSourceState.h>
+
#ifndef INCLUDING_FROM_AUDIOFLINGER_H
#error This header file should only be included from AudioFlinger.h
#endif
@@ -26,11 +28,12 @@
bool hasOp() const;
int32_t getOp() const { return mAppOp; }
- static sp<OpRecordAudioMonitor> createIfNeeded
- (const media::permission::Identity& identity, const audio_attributes_t& attr);
+ static sp<OpRecordAudioMonitor> createIfNeeded(const AttributionSourceState& attributionSource,
+ const audio_attributes_t& attr);
private:
- OpRecordAudioMonitor(const media::permission::Identity& identity, int32_t appOp);
+ OpRecordAudioMonitor(const AttributionSourceState& attributionSource, int32_t appOp);
+
void onFirstRef() override;
AppOpsManager mAppOpsManager;
@@ -50,7 +53,7 @@
void checkOp();
std::atomic_bool mHasOp;
- const media::permission::Identity mIdentity;
+ const AttributionSourceState mAttributionSource;
const int32_t mAppOp;
};
@@ -68,7 +71,7 @@
size_t bufferSize,
audio_session_t sessionId,
pid_t creatorPid,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_input_flags_t flags,
track_type type,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d878611..f62082e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -118,7 +118,7 @@
namespace android {
using media::IEffectClient;
-using media::permission::Identity;
+using content::AttributionSourceState;
// retry counts for buffer fill timeout
// 50 * ~20msecs = 1 second
@@ -1638,7 +1638,7 @@
detachAuxEffect_l(effect->id());
}
- sp<EffectChain> chain = effect->callback()->chain().promote();
+ sp<EffectChain> chain = effect->getCallback()->chain().promote();
if (chain != 0) {
// remove effect chain if removing last effect
if (chain->removeEffect_l(effect, release) == 0) {
@@ -2129,7 +2129,7 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t creatorPid,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
pid_t tid,
status_t *status,
audio_port_handle_t portId,
@@ -2424,8 +2424,8 @@
track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
- sessionId, creatorPid, identity, trackFlags, TrackBase::TYPE_DEFAULT,
- portId, SIZE_MAX /*frameCountToBeReady*/);
+ sessionId, creatorPid, attributionSource, trackFlags,
+ TrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/, speed);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
@@ -3321,6 +3321,17 @@
invalidateTracks_l(streamType);
}
+// getTrackById_l must be called with holding thread lock
+AudioFlinger::PlaybackThread::Track* AudioFlinger::PlaybackThread::getTrackById_l(
+ audio_port_handle_t trackPortId) {
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ if (mTracks[i]->portId() == trackPortId) {
+ return mTracks[i].get();
+ }
+ }
+ return nullptr;
+}
+
status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
{
audio_session_t session = chain->sessionId();
@@ -6875,19 +6886,20 @@
// from different OutputTracks and their associated MixerThreads (e.g. one may
// nearly empty and the other may be dropping data).
- // TODO b/182392769: use identity util, move to server edge
- Identity identity = Identity();
- identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
+ // TODO b/182392769: use attribution source util, move to server edge
+ AttributionSourceState attributionSource = AttributionSourceState();
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
IPCThreadState::self()->getCallingUid()));
- identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
IPCThreadState::self()->getCallingPid()));
+ attributionSource.token = sp<BBinder>::make();
sp<OutputTrack> outputTrack = new OutputTrack(thread,
this,
mSampleRate,
mFormat,
mChannelMask,
frameCount,
- identity);
+ attributionSource);
status_t status = outputTrack != 0 ? outputTrack->initCheck() : (status_t) NO_MEMORY;
if (status != NO_ERROR) {
ALOGE("addOutputTrack() initCheck failed %d", status);
@@ -7811,7 +7823,7 @@
audio_session_t sessionId,
size_t *pNotificationFrameCount,
pid_t creatorPid,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_input_flags_t *flags,
pid_t tid,
status_t *status,
@@ -7825,7 +7837,8 @@
audio_input_flags_t inputFlags = mInput->flags;
audio_input_flags_t requestedFlags = *flags;
uint32_t sampleRate;
- Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
+ AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ attributionSource);
lStatus = initCheck();
if (lStatus != NO_ERROR) {
@@ -7840,7 +7853,7 @@
}
if (maxSharedAudioHistoryMs != 0) {
- if (!captureHotwordAllowed(checkedIdentity)) {
+ if (!captureHotwordAllowed(checkedAttributionSource)) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -7962,16 +7975,17 @@
Mutex::Autolock _l(mLock);
int32_t startFrames = -1;
if (!mSharedAudioPackageName.empty()
- && mSharedAudioPackageName == checkedIdentity.packageName
+ && mSharedAudioPackageName == checkedAttributionSource.packageName
&& mSharedAudioSessionId == sessionId
- && captureHotwordAllowed(checkedIdentity)) {
+ && captureHotwordAllowed(checkedAttributionSource)) {
startFrames = mSharedAudioStartFrames;
}
track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
- checkedIdentity, *flags, TrackBase::TYPE_DEFAULT, portId, startFrames);
+ checkedAttributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
+ startFrames);
lStatus = track->initCheck();
if (lStatus != NO_ERROR) {
@@ -8559,7 +8573,7 @@
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
audio_channel_mask_t mask = (audio_channel_mask_t) value;
if (!audio_is_input_channel(mask) ||
- audio_channel_count_from_in_mask(mask) > FCC_8) {
+ audio_channel_count_from_in_mask(mask) > FCC_LIMIT) {
status = BAD_VALUE;
} else {
channelMask = mask;
@@ -8596,7 +8610,7 @@
if (mInput->stream->getAudioProperties(&config) == OK &&
audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
- audio_channel_count_from_in_mask(config.channel_mask) <= FCC_8) {
+ audio_channel_count_from_in_mask(config.channel_mask) <= FCC_LIMIT) {
status = NO_ERROR;
}
}
@@ -8658,10 +8672,10 @@
mFormat = mHALFormat;
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
if (audio_is_linear_pcm(mFormat)) {
- LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d",
- mChannelCount, FCC_8);
+ LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
+ mChannelCount, FCC_LIMIT);
} else {
- // Can have more that FCC_8 channels in encoded streams.
+ // Can have more that FCC_LIMIT channels in encoded streams.
ALOGI("HAL format %#x is not linear pcm", mFormat);
}
result = mInput->stream->getFrameSize(&mFrameSize);
@@ -9163,7 +9177,7 @@
audio_port_handle_t *handle)
{
ALOGV("%s clientUid %d mStandby %d mPortId %d *handle %d", __FUNCTION__,
- client.identity.uid, mStandby, mPortId, *handle);
+ client.attributionSource.uid, mStandby, mPortId, *handle);
if (mHalStream == 0) {
return NO_INIT;
}
@@ -9195,7 +9209,7 @@
ret = AudioSystem::getOutputForAttr(&mAttr, &io,
mSessionId,
&stream,
- client.identity,
+ client.attributionSource,
&config,
flags,
&deviceId,
@@ -9212,7 +9226,7 @@
ret = AudioSystem::getInputForAttr(&mAttr, &io,
RECORD_RIID_INVALID,
mSessionId,
- client.identity,
+ client.attributionSource,
&config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ,
&deviceId,
@@ -9252,7 +9266,8 @@
// Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
sp<MmapTrack> track = new MmapTrack(this, attr == nullptr ? mAttr : *attr, mSampleRate, mFormat,
- mChannelMask, mSessionId, isOutput(), client.identity,
+ mChannelMask, mSessionId, isOutput(),
+ client.attributionSource,
IPCThreadState::self()->getCallingPid(), portId);
if (isOutput()) {
@@ -9260,7 +9275,7 @@
mHalVolFloat = -1.0f;
} else if (!track->isSilenced_l()) {
for (const sp<MmapTrack> &t : mActiveTracks) {
- if (t->isSilenced_l() && t->uid() != client.identity.uid)
+ if (t->isSilenced_l() && t->uid() != client.attributionSource.uid)
t->invalidate();
}
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 17acb16..eee1f2b 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -918,7 +918,7 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t creatorPid,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
pid_t tid,
status_t *status /*non-NULL*/,
audio_port_handle_t portId,
@@ -1013,6 +1013,8 @@
mDownStreamPatch = *patch;
}
+ PlaybackThread::Track* getTrackById_l(audio_port_handle_t trackId);
+
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1696,7 +1698,7 @@
audio_session_t sessionId,
size_t *pNotificationFrameCount,
pid_t creatorPid,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_input_flags_t *flags,
pid_t tid,
status_t *status /*non-NULL*/,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 8be7c86..57ff0d7 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -65,7 +65,7 @@
using ::android::aidl_utils::binderStatusFromStatusT;
using binder::Status;
-using media::permission::Identity;
+using content::AttributionSourceState;
using media::VolumeShaper;
// ----------------------------------------------------------------------------
// TrackBase
@@ -238,12 +238,13 @@
}
}
-// TODO b/182392769: use identity util
-static Identity audioServerIdentity(pid_t pid) {
- Identity i{};
- i.uid = AID_AUDIOSERVER;
- i.pid = pid;
- return i;
+// TODO b/182392769: use attribution source util
+static AttributionSourceState audioServerAttributionSource(pid_t pid) {
+ AttributionSourceState attributionSource{};
+ attributionSource.uid = AID_AUDIOSERVER;
+ attributionSource.pid = pid;
+ attributionSource.token = sp<BBinder>::make();
+ return attributionSource;
}
status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
@@ -498,11 +499,11 @@
// static
sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
- const Identity& identity, const audio_attributes_t& attr, int id,
+ const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
audio_stream_type_t streamType)
{
Vector <String16> packages;
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
getPackagesForUid(uid, packages);
if (isServiceUid(uid)) {
if (packages.isEmpty()) {
@@ -525,13 +526,15 @@
return nullptr;
}
- Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
- return new OpPlayAudioMonitor(checkedIdentity, attr.usage, id);
+ AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ attributionSource);
+ return new OpPlayAudioMonitor(checkedAttributionSource, attr.usage, id);
}
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
- const Identity& identity, audio_usage_t usage, int id)
- : mHasOpPlayAudio(true), mIdentity(identity), mUsage((int32_t) usage), mId(id)
+ const AttributionSourceState& attributionSource, audio_usage_t usage, int id)
+ : mHasOpPlayAudio(true), mAttributionSource(attributionSource), mUsage((int32_t) usage),
+ mId(id)
{
}
@@ -546,10 +549,11 @@
void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
{
checkPlayAudioForUsage();
- if (mIdentity.packageName.has_value()) {
+ if (mAttributionSource.packageName.has_value()) {
mOpCallback = new PlayAudioOpCallback(this);
mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO,
- VALUE_OR_FATAL(aidl2legacy_string_view_String16(mIdentity.packageName.value_or("")))
+ VALUE_OR_FATAL(aidl2legacy_string_view_String16(
+ mAttributionSource.packageName.value_or("")))
, mOpCallback);
}
}
@@ -563,12 +567,12 @@
// - not called from PlayAudioOpCallback because the callback is not installed in this case
void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
{
- if (!mIdentity.packageName.has_value()) {
+ if (!mAttributionSource.packageName.has_value()) {
mHasOpPlayAudio.store(false);
} else {
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mIdentity.uid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAttributionSource.uid));
String16 packageName = VALUE_OR_FATAL(
- aidl2legacy_string_view_String16(mIdentity.packageName.value_or("")));
+ aidl2legacy_string_view_String16(mAttributionSource.packageName.value_or("")));
bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
mUsage, uid, packageName) == AppOpsManager::MODE_ALLOWED;
ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
@@ -620,11 +624,12 @@
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
pid_t creatorPid,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId,
- size_t frameCountToBeReady)
+ size_t frameCountToBeReady,
+ float speed)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
@@ -633,7 +638,7 @@
(sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, creatorPid,
- VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid)), true /*isOut*/,
+ VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
type,
portId,
@@ -648,7 +653,7 @@
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
mVolumeHandler(new media::VolumeHandler(sampleRate)),
- mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(identity, attr, id(),
+ mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(attributionSource, attr, id(),
streamType)),
// mSinkTimestamp
mFastIndex(-1),
@@ -658,7 +663,8 @@
mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
- mFlags(flags)
+ mFlags(flags),
+ mSpeed(speed)
{
// client == 0 implies sharedBuffer == 0
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -670,7 +676,7 @@
return;
}
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid));
+ uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
ALOGE("%s(%d): no more tracks available", __func__, mId);
releaseCblk(); // this makes the track invalid.
@@ -716,8 +722,8 @@
// HapticGenerator effect, which will generate haptic data, on the track. In that case,
// external vibration is always created for all tracks attached to haptic playback thread.
mAudioVibrationController = new AudioVibrationController(this);
- std::string packageName = identity.packageName.has_value() ?
- identity.packageName.value() : "";
+ std::string packageName = attributionSource.packageName.has_value() ?
+ attributionSource.packageName.value() : "";
mExternalVibration = new os::ExternalVibration(
mUid, packageName, mAttr, mAudioVibrationController);
}
@@ -1404,6 +1410,10 @@
void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
mTeePatches = std::move(teePatches);
+ if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
+ mState == TrackBase::STOPPING_1) {
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
+ }
}
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
@@ -1834,12 +1844,12 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- const Identity& identity)
+ const AttributionSourceState& attributionSource)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
audio_attributes_t{} /* currently unused for output track */,
sampleRate, format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
- AUDIO_SESSION_NONE, getpid(), identity, AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
TYPE_OUTPUT),
mActive(false), mSourceThread(sourceThread)
{
@@ -2069,7 +2079,7 @@
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
- AUDIO_SESSION_NONE, getpid(), audioServerIdentity(getpid()), flags,
+ AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
*playbackThread, timeout)
@@ -2207,32 +2217,33 @@
// static
sp<AudioFlinger::RecordThread::OpRecordAudioMonitor>
AudioFlinger::RecordThread::OpRecordAudioMonitor::createIfNeeded(
- const Identity& identity, const audio_attributes_t& attr)
+ const AttributionSourceState& attributionSource, const audio_attributes_t& attr)
{
- if (isServiceUid(identity.uid)) {
+ if (isServiceUid(attributionSource.uid)) {
ALOGV("not silencing record for service %s",
- identity.toString().c_str());
+ attributionSource.toString().c_str());
return nullptr;
}
// Capturing from FM TUNER output is not controlled by an app op
// because it does not affect users privacy as does capturing from an actual microphone.
if (attr.source == AUDIO_SOURCE_FM_TUNER) {
- ALOGV("not muting FM TUNER capture for uid %d", identity.uid);
+ ALOGV("not muting FM TUNER capture for uid %d", attributionSource.uid);
return nullptr;
}
- Identity checkedIdentity = AudioFlinger::checkIdentityPackage(identity);
- if (!checkedIdentity.packageName.has_value()
- || checkedIdentity.packageName.value().size() == 0) {
+ AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ attributionSource);
+ if (!checkedAttributionSource.packageName.has_value()
+ || checkedAttributionSource.packageName.value().size() == 0) {
return nullptr;
}
- return new OpRecordAudioMonitor(checkedIdentity, getOpForSource(attr.source));
+ return new OpRecordAudioMonitor(checkedAttributionSource, getOpForSource(attr.source));
}
AudioFlinger::RecordThread::OpRecordAudioMonitor::OpRecordAudioMonitor(
- const Identity& identity, int32_t appOp)
- : mHasOp(true), mIdentity(identity), mAppOp(appOp)
+ const AttributionSourceState& attributionSource, int32_t appOp)
+ : mHasOp(true), mAttributionSource(attributionSource), mAppOp(appOp)
{
}
@@ -2248,9 +2259,11 @@
{
checkOp();
mOpCallback = new RecordAudioOpCallback(this);
- ALOGV("start watching op %d for %s", mAppOp, mIdentity.toString().c_str());
- mAppOpsManager.startWatchingMode(mAppOp,
- VALUE_OR_FATAL(aidl2legacy_string_view_String16(mIdentity.packageName.value_or(""))),
+ ALOGV("start watching op %d for %s", mAppOp, mAttributionSource.toString().c_str());
+ // TODO: We need to always watch AppOpsManager::OP_RECORD_AUDIO too
+ // since it controls the mic permission for legacy apps.
+ mAppOpsManager.startWatchingMode(mAppOp, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
+ mAttributionSource.packageName.value_or(""))),
mOpCallback);
}
@@ -2266,17 +2279,17 @@
// - not called from RecordAudioOpCallback because the callback is not installed in this case
void AudioFlinger::RecordThread::OpRecordAudioMonitor::checkOp()
{
-
+ // TODO: We need to always check AppOpsManager::OP_RECORD_AUDIO too
+ // since it controls the mic permission for legacy apps.
const int32_t mode = mAppOpsManager.checkOp(mAppOp,
- mIdentity.uid, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
- mIdentity.packageName.value_or(""))));
- const bool hasIt = (mode == AppOpsManager::MODE_ALLOWED);
+ mAttributionSource.uid, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
+ mAttributionSource.packageName.value_or(""))));
+ const bool hasIt = (mode == AppOpsManager::MODE_ALLOWED);
// verbose logging only log when appOp changed
ALOGI_IF(hasIt != mHasOp.load(),
"App op %d missing, %ssilencing record %s",
- mAppOp, hasIt ? "un" : "", mIdentity.toString().c_str());
+ mAppOp, hasIt ? "un" : "", mAttributionSource.toString().c_str());
mHasOp.store(hasIt);
-
}
AudioFlinger::RecordThread::OpRecordAudioMonitor::RecordAudioOpCallback::RecordAudioOpCallback(
@@ -2376,7 +2389,7 @@
size_t bufferSize,
audio_session_t sessionId,
pid_t creatorPid,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
audio_input_flags_t flags,
track_type type,
audio_port_handle_t portId,
@@ -2384,7 +2397,7 @@
: TrackBase(thread, client, attr, sampleRate, format,
channelMask, frameCount, buffer, bufferSize, sessionId,
creatorPid,
- VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid)),
+ VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
false /*isOut*/,
(type == TYPE_DEFAULT) ?
((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
@@ -2397,7 +2410,7 @@
mRecordBufferConverter(NULL),
mFlags(flags),
mSilenced(false),
- mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(identity, attr)),
+ mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(attributionSource, attr)),
mStartFrames(startFrames)
{
if (mCblk == NULL) {
@@ -2706,10 +2719,11 @@
return PERMISSION_DENIED;
}
- Identity identity{};
- identity.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
- identity.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
- if (!captureHotwordAllowed(identity)) {
+ AttributionSourceState attributionSource{};
+ attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
+ attributionSource.token = sp<BBinder>::make();
+ if (!captureHotwordAllowed(attributionSource)) {
return PERMISSION_DENIED;
}
@@ -2745,7 +2759,7 @@
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
- audioServerIdentity(getpid()), flags, TYPE_PATCH),
+ audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
*recordThread, timeout)
{
@@ -3022,19 +3036,19 @@
audio_channel_mask_t channelMask,
audio_session_t sessionId,
bool isOut,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
pid_t creatorPid,
audio_port_handle_t portId)
: TrackBase(thread, NULL, attr, sampleRate, format,
channelMask, (size_t)0 /* frameCount */,
nullptr /* buffer */, (size_t)0 /* bufferSize */,
sessionId, creatorPid,
- VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.uid)),
+ VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
isOut,
ALLOC_NONE,
TYPE_DEFAULT, portId,
std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
- mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(identity.pid))),
+ mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
mSilenced(false), mSilencedNotified(false)
{
// Once this item is logged by the server, the client can add properties.
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 5f052a5..2e49e71 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -17,15 +17,18 @@
#ifndef ANDROID_AUDIOPOLICY_INTERFACE_H
#define ANDROID_AUDIOPOLICY_INTERFACE_H
+#include <media/AudioCommonTypes.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioSystem.h>
#include <media/AudioPolicy.h>
#include <media/DeviceDescriptorBase.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <utils/String8.h>
namespace android {
+using content::AttributionSourceState;
+
// ----------------------------------------------------------------------------
// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces
@@ -123,7 +126,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSouce,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
@@ -142,7 +145,7 @@
audio_io_handle_t *input,
audio_unique_id_t riid,
audio_session_t session,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSouce,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -453,6 +456,9 @@
virtual void setSoundTriggerCaptureState(bool active) = 0;
virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
+
+ virtual status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
};
// These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 552919d..577f641 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -42,7 +42,7 @@
// For mixed output and inputs, the policy will use max mixer channel count.
// Do not limit channel count otherwise
-#define MAX_MIXER_CHANNEL_COUNT FCC_8
+#define MAX_MIXER_CHANNEL_COUNT FCC_LIMIT
/**
* Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 59876c6..74b3405 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -109,6 +109,9 @@
const std::vector<wp<SwAudioOutputDescriptor>>& getSecondaryOutputs() const {
return mSecondaryOutputs;
};
+ void setSecondaryOutputs(std::vector<wp<SwAudioOutputDescriptor>>&& secondaryOutputs) {
+ mSecondaryOutputs = std::move(secondaryOutputs);
+ }
VolumeSource volumeSource() const { return mVolumeSource; }
const sp<AudioPolicyMix> getPrimaryMix() const {
return mPrimaryMix.promote();
@@ -143,7 +146,7 @@
const product_strategy_t mStrategy;
const VolumeSource mVolumeSource;
const audio_output_flags_t mFlags;
- const std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
+ std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
const wp<AudioPolicyMix> mPrimaryMix;
/**
* required for duplicating thread, prevent from removing active client from an output
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index 38bdedc..faf15d6 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -50,7 +50,7 @@
"libbinder",
"libaudiopolicy",
"libaudiopolicymanagerdefault",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
static_libs: [
"android.hardware.audio.common@7.0-enums",
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 1177b95..7000cd9 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -26,7 +26,7 @@
#include <Serializer.h>
#include <android-base/file.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <libxml/parser.h>
#include <libxml/xinclude.h>
#include <media/AudioPolicy.h>
@@ -47,7 +47,7 @@
using namespace ::android::audio::policy::configuration::V7_0;
}
-using media::permission::Identity;
+using content::AttributionSourceState;
static const std::vector<audio_format_t> kAudioFormats = [] {
std::vector<audio_format_t> result;
@@ -249,11 +249,12 @@
*portId = AUDIO_PORT_HANDLE_NONE;
AudioPolicyInterface::output_type_t outputType;
- // TODO b/182392769: use identity util
- Identity i;
- i.uid = 0;
- if (mManager->getOutputForAttr(&attr, output, AUDIO_SESSION_NONE, &stream, i, &config,
- &flags, selectedDeviceId, portId, {}, &outputType) != OK) {
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource;
+ attributionSource.uid = 0;
+ attributionSource.token = sp<BBinder>::make();
+ if (mManager->getOutputForAttr(&attr, output, AUDIO_SESSION_NONE, &stream, attributionSource,
+ &config, &flags, selectedDeviceId, portId, {}, &outputType) != OK) {
return false;
}
if (*output == AUDIO_IO_HANDLE_NONE || *portId == AUDIO_PORT_HANDLE_NONE) {
@@ -276,10 +277,11 @@
*portId = AUDIO_PORT_HANDLE_NONE;
AudioPolicyInterface::input_type_t inputType;
- Identity i;
- i.uid = 0;
- if (mManager->getInputForAttr(&attr, &input, riid, AUDIO_SESSION_NONE, i, &config,
- flags, selectedDeviceId, &inputType, portId) != OK) {
+ AttributionSourceState attributionSource;
+ attributionSource.uid = 0;
+ attributionSource.token = sp<BBinder>::make();
+ if (mManager->getInputForAttr(&attr, &input, riid, AUDIO_SESSION_NONE, attributionSource,
+ &config, flags, selectedDeviceId, &inputType, portId) != OK) {
return false;
}
if (*portId == AUDIO_PORT_HANDLE_NONE || input == AUDIO_IO_HANDLE_NONE) {
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
index b111db4..0165dc8 100644
--- a/services/audiopolicy/managerdefault/Android.bp
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -34,7 +34,7 @@
// a dependency on it in the device makefile. There will be no build time
// conflict with libaudiopolicyenginedefault.
"libaudiopolicyenginedefault",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 7185435..2e866ff 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -31,6 +31,7 @@
#include <algorithm>
#include <inttypes.h>
+#include <map>
#include <math.h>
#include <set>
#include <unordered_set>
@@ -52,7 +53,7 @@
namespace android {
-using media::permission::Identity;
+using content::AttributionSourceState;
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
@@ -1132,7 +1133,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
@@ -1145,7 +1146,7 @@
return INVALID_OPERATION;
}
const uid_t uid = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_uid_t(identity.uid));
+ aidl2legacy_int32_t_uid_t(attributionSource.uid));
const audio_port_handle_t requestedPortId = *selectedDeviceId;
audio_attributes_t resultAttr;
bool isRequestedDeviceForExclusiveUse = false;
@@ -2114,7 +2115,7 @@
audio_io_handle_t *input,
audio_unique_id_t riid,
audio_session_t session,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -2133,7 +2134,7 @@
sp<AudioInputDescriptor> inputDesc;
sp<RecordClientDescriptor> clientDesc;
audio_port_handle_t requestedDeviceId = *selectedDeviceId;
- uid_t uid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(identity.uid));
+ uid_t uid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(attributionSource.uid));
bool isSoundTrigger;
// The supplied portId must be AUDIO_PORT_HANDLE_NONE
@@ -5694,6 +5695,7 @@
void AudioPolicyManager::checkSecondaryOutputs() {
std::set<audio_stream_type_t> streamsToInvalidate;
+ TrackSecondaryOutputsMap trackSecondaryOutputs;
for (size_t i = 0; i < mOutputs.size(); i++) {
const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
@@ -5710,16 +5712,28 @@
}
}
- if (status != OK ||
- !std::equal(client->getSecondaryOutputs().begin(),
- client->getSecondaryOutputs().end(),
- secondaryDescs.begin(), secondaryDescs.end())) {
+ if (status != OK) {
streamsToInvalidate.insert(client->stream());
+ } else if (!std::equal(
+ client->getSecondaryOutputs().begin(),
+ client->getSecondaryOutputs().end(),
+ secondaryDescs.begin(), secondaryDescs.end())) {
+ std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+ std::vector<audio_io_handle_t> secondaryOutputIds;
+ for (const auto& secondaryDesc : secondaryDescs) {
+ secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+ weakSecondaryDescs.push_back(secondaryDesc);
+ }
+ trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+ client->setSecondaryOutputs(std::move(weakSecondaryDescs));
}
}
}
+ if (!trackSecondaryOutputs.empty()) {
+ mpClientInterface->updateSecondaryOutputs(trackSecondaryOutputs);
+ }
for (audio_stream_type_t stream : streamsToInvalidate) {
- ALOGD("%s Invalidate stream %d due to secondary output change", __func__, stream);
+ ALOGD("%s Invalidate stream %d due to fail getting output for attr", __func__, stream);
mpClientInterface->invalidateStream(stream);
}
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index f5dd20c..98f96d1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -52,6 +52,8 @@
namespace android {
+using content::AttributionSourceState;
+
// ----------------------------------------------------------------------------
// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
@@ -116,7 +118,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
@@ -130,7 +132,7 @@
audio_io_handle_t *input,
audio_unique_id_t riid,
audio_session_t session,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 14be671..0273d29 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -45,12 +45,12 @@
"audiopolicy-aidl-cpp",
"audiopolicy-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
static_libs: [
"libaudiopolicycomponents",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
header_libs: [
@@ -70,6 +70,6 @@
export_shared_lib_headers: [
"libactivitymanager_aidl",
"libsensorprivacy",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
}
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 77b5200..cd53073 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -291,4 +291,14 @@
return af->getAudioPort(port);
}
+status_t AudioPolicyService::AudioPolicyClient::updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ ALOGW("%s: could not get AudioFlinger", __func__);
+ return PERMISSION_DENIED;
+ }
+ return af->updateSecondaryOutputs(trackSecondaryOutputs);
+}
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 8426a77..3f01de9 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -35,7 +35,7 @@
namespace android {
-using media::permission::Identity;
+using content::AttributionSourceState;
// ----------------------------------------------------------------------------
// AudioPolicyEffects Implementation
@@ -123,9 +123,10 @@
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
EffectDesc *effect = effects[i];
- Identity identity;
- identity.packageName = "android";
- sp<AudioEffect> fx = new AudioEffect(identity);
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = "android";
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> fx = new AudioEffect(attributionSource);
fx->set(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
status_t status = fx->initCheck();
if (status != NO_ERROR && status != ALREADY_EXISTS) {
@@ -274,9 +275,10 @@
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
EffectDesc *effect = effects[i];
- Identity identity;
- identity.packageName = "android";
- sp<AudioEffect> fx = new AudioEffect(identity);
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = "android";
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> fx = new AudioEffect(attributionSource);
fx->set(NULL, &effect->mUuid, 0, 0, 0, audioSession, output);
status_t status = fx->initCheck();
if (status != NO_ERROR && status != ALREADY_EXISTS) {
@@ -976,9 +978,10 @@
for (const auto& deviceEffectsIter : mDeviceEffects) {
const auto& deviceEffects = deviceEffectsIter.second;
for (const auto& effectDesc : deviceEffects->mEffectDescriptors->mEffects) {
- Identity identity;
- identity.packageName = "android";
- sp<AudioEffect> fx = new AudioEffect(identity);
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = "android";
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> fx = new AudioEffect(attributionSource);
fx->set(EFFECT_UUID_NULL, &effectDesc->mUuid, 0, nullptr,
nullptr, AUDIO_SESSION_DEVICE, AUDIO_IO_HANDLE_NONE,
AudioDeviceTypeAddr{deviceEffects->getDeviceType(),
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index b9c715e..3298f6b 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -25,7 +25,7 @@
#include <media/MediaMetricsItem.h>
#include <media/PolicyAidlConversion.h>
#include <utils/Log.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#define VALUE_OR_RETURN_BINDER_STATUS(x) \
({ auto _tmp = (x); \
@@ -43,7 +43,7 @@
namespace android {
using binder::Status;
using aidl_utils::binderStatusFromStatusT;
-using media::permission::Identity;
+using content::AttributionSourceState;
const std::vector<audio_usage_t>& SYSTEM_USAGES = {
AUDIO_USAGE_CALL_ASSISTANT,
@@ -64,15 +64,16 @@
}
status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
- return validateUsage(usage, getCallingIdentity());
+ return validateUsage(usage, getCallingAttributionSource());
}
-status_t AudioPolicyService::validateUsage(audio_usage_t usage, const Identity& identity) {
+status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+ const AttributionSourceState& attributionSource) {
if (isSystemUsage(usage)) {
if (isSupportedSystemUsage(usage)) {
- if (!modifyAudioRoutingAllowed(identity)) {
+ if (!modifyAudioRoutingAllowed(attributionSource)) {
ALOGE(("permission denied: modify audio routing not allowed "
- "for identity %s"), identity.toString().c_str());
+ "for attributionSource %s"), attributionSource.toString().c_str());
return PERMISSION_DENIED;
}
} else {
@@ -279,7 +280,7 @@
Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
int32_t sessionAidl,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const media::AudioConfig& configAidl,
int32_t flagsAidl,
int32_t selectedDeviceIdAidl,
@@ -307,28 +308,28 @@
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, identity)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
ALOGV("%s()", __func__);
Mutex::Autolock _l(mLock);
// TODO b/182392553: refactor or remove
- Identity adjIdentity = identity;
+ AttributionSourceState adjAttributionSource = attributionSource;
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isAudioServerOrMediaServerUid(callingUid) || identity.uid == -1) {
+ if (!isAudioServerOrMediaServerUid(callingUid) || attributionSource.uid == -1) {
int32_t callingUidAidl = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_uid_t_int32_t(callingUid));
- ALOGW_IF(identity.uid != -1 && identity.uid != callingUidAidl,
+ ALOGW_IF(attributionSource.uid != -1 && attributionSource.uid != callingUidAidl,
"%s uid %d tried to pass itself off as %d", __func__,
- callingUidAidl, identity.uid);
- adjIdentity.uid = callingUidAidl;
+ callingUidAidl, attributionSource.uid);
+ adjAttributionSource.uid = callingUidAidl;
}
if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_uid_t(adjIdentity.uid)))) {
+ aidl2legacy_int32_t_uid_t(adjAttributionSource.uid)))) {
attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
}
if (((attr.flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
- && !bypassInterruptionPolicyAllowed(identity)) {
+ && !bypassInterruptionPolicyAllowed(adjAttributionSource)) {
attr.flags = static_cast<audio_flags_mask_t>(
attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
}
@@ -336,7 +337,7 @@
AudioPolicyInterface::output_type_t outputType;
status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session,
&stream,
- adjIdentity,
+ adjAttributionSource,
&config,
&flags, &selectedDeviceId, &portId,
&secondaryOutputs,
@@ -349,16 +350,16 @@
case AudioPolicyInterface::API_OUTPUT_LEGACY:
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
- if (!modifyPhoneStateAllowed(adjIdentity)) {
+ if (!modifyPhoneStateAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
- __func__, adjIdentity.uid);
+ __func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
- if (!modifyAudioRoutingAllowed(adjIdentity)) {
+ if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
- __func__, adjIdentity.uid);
+ __func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
}
break;
@@ -371,7 +372,7 @@
if (result == NO_ERROR) {
sp<AudioPlaybackClient> client =
- new AudioPlaybackClient(attr, output, adjIdentity, session,
+ new AudioPlaybackClient(attr, output, adjAttributionSource, session,
portId, selectedDeviceId, stream);
mAudioPlaybackClients.add(portId, client);
@@ -508,7 +509,7 @@
int32_t inputAidl,
int32_t riidAidl,
int32_t sessionAidl,
- const Identity& identity,
+ const AttributionSourceState& attributionSource,
const media::AudioConfigBase& configAidl,
int32_t flagsAidl,
int32_t selectedDeviceIdAidl,
@@ -551,42 +552,46 @@
return binderStatusFromStatusT(BAD_VALUE);
}
- // Make sure identity represents the current caller
- Identity adjIdentity = identity;
+ // Make sure attribution source represents the current caller
+ AttributionSourceState adjAttributionSource = attributionSource;
// TODO b/182392553: refactor or remove
- bool updatePid = (identity.pid == -1);
+ bool updatePid = (attributionSource.pid == -1);
const uid_t callingUid =IPCThreadState::self()->getCallingUid();
- const uid_t currentUid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(identity.uid));
+ const uid_t currentUid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(
+ attributionSource.uid));
if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(currentUid != (uid_t)-1 && currentUid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid,
currentUid);
- adjIdentity.uid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ adjAttributionSource.uid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_uid_t_int32_t(
+ callingUid));
updatePid = true;
}
if (updatePid) {
const int32_t callingPid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_pid_t_int32_t(
IPCThreadState::self()->getCallingPid()));
- ALOGW_IF(identity.pid != -1 && identity.pid != callingPid,
+ ALOGW_IF(attributionSource.pid != -1 && attributionSource.pid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, adjIdentity.uid, callingPid, identity.pid);
- adjIdentity.pid = callingPid;
+ __func__, adjAttributionSource.uid, callingPid, attributionSource.pid);
+ adjAttributionSource.pid = callingPid;
}
- RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, adjIdentity)));
+ RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+ adjAttributionSource)));
// check calling permissions.
// Capturing from FM_TUNER source is controlled by captureTunerAudioInputAllowed() and
// captureAudioOutputAllowed() (deprecated) as this does not affect users privacy
// as does capturing from an actual microphone.
- if (!(recordingAllowed(adjIdentity, attr.source) || attr.source == AUDIO_SOURCE_FM_TUNER)) {
+ if (!(recordingAllowed(adjAttributionSource, attr.source)
+ || attr.source == AUDIO_SOURCE_FM_TUNER)) {
ALOGE("%s permission denied: recording not allowed for %s",
- __func__, adjIdentity.toString().c_str());
+ __func__, adjAttributionSource.toString().c_str());
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureOutput = captureAudioOutputAllowed(adjIdentity);
+ bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK ||
inputSource == AUDIO_SOURCE_VOICE_DOWNLINK ||
inputSource == AUDIO_SOURCE_VOICE_CALL ||
@@ -596,12 +601,12 @@
}
if (inputSource == AUDIO_SOURCE_FM_TUNER
- && !captureTunerAudioInputAllowed(adjIdentity)
+ && !captureTunerAudioInputAllowed(adjAttributionSource)
&& !canCaptureOutput) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureHotword = captureHotwordAllowed(adjIdentity);
+ bool canCaptureHotword = captureHotwordAllowed(adjAttributionSource);
if ((inputSource == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -609,7 +614,7 @@
if (((flags & AUDIO_INPUT_FLAG_HW_HOTWORD) != 0)
&& !canCaptureHotword) {
ALOGE("%s: permission denied: hotword mode not allowed"
- " for uid %d pid %d", __func__, adjIdentity.uid, adjIdentity.pid);
+ " for uid %d pid %d", __func__, adjAttributionSource.uid, adjAttributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -623,7 +628,7 @@
AutoCallerClear acc;
// the audio_in_acoustics_t parameter is ignored by get_input()
status = mAudioPolicyManager->getInputForAttr(&attr, &input, riid, session,
- adjIdentity, &config,
+ adjAttributionSource, &config,
flags, &selectedDeviceId,
&inputType, &portId);
@@ -647,7 +652,7 @@
}
break;
case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
- if (!modifyAudioRoutingAllowed(adjIdentity)) {
+ if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
status = PERMISSION_DENIED;
}
@@ -668,7 +673,7 @@
}
sp<AudioRecordClient> client = new AudioRecordClient(attr, input, session, portId,
- selectedDeviceId, adjIdentity,
+ selectedDeviceId, adjAttributionSource,
canCaptureOutput, canCaptureHotword);
mAudioRecordClients.add(portId, client);
}
@@ -723,11 +728,11 @@
msg << "Audio recording on session " << client->session;
// check calling permissions
- if (!(startRecording(client->identity, String16(msg.str().c_str()),
+ if (!(startRecording(client->attributionSource, String16(msg.str().c_str()),
client->attributes.source)
|| client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
- ALOGE("%s permission denied: recording not allowed for identity %s",
- __func__, client->identity.toString().c_str());
+ ALOGE("%s permission denied: recording not allowed for attribution source %s",
+ __func__, client->attributionSource.toString().c_str());
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -771,13 +776,13 @@
item->setCString(kAudioPolicyRqstSrc,
toString(client->attributes.source).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
- if (client->identity.packageName.has_value() &&
- client->identity.packageName.value().size() != 0) {
+ if (client->attributionSource.packageName.has_value() &&
+ client->attributionSource.packageName.value().size() != 0) {
item->setCString(kAudioPolicyRqstPkg,
- client->identity.packageName.value().c_str());
+ client->attributionSource.packageName.value().c_str());
} else {
item->setCString(kAudioPolicyRqstPkg,
- std::to_string(client->identity.uid).c_str());
+ std::to_string(client->attributionSource.uid).c_str());
}
item->setCString(
kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
@@ -793,13 +798,13 @@
item->setCString(kAudioPolicyActiveSrc,
toString(other->attributes.source).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
- if (other->identity.packageName.has_value() &&
- other->identity.packageName.value().size() != 0) {
+ if (other->attributionSource.packageName.has_value() &&
+ other->attributionSource.packageName.value().size() != 0) {
item->setCString(kAudioPolicyActivePkg,
- other->identity.packageName.value().c_str());
+ other->attributionSource.packageName.value().c_str());
} else {
item->setCString(kAudioPolicyRqstPkg, std::to_string(
- other->identity.uid).c_str());
+ other->attributionSource.uid).c_str());
}
item->setCString(kAudioPolicyActiveDevice,
getDeviceTypeStrForPortId(other->deviceId).c_str());
@@ -815,7 +820,7 @@
client->active = false;
client->startTimeNs = 0;
updateUidStates_l();
- finishRecording(client->identity, client->attributes.source);
+ finishRecording(client->attributionSource, client->attributes.source);
}
return binderStatusFromStatusT(status);
@@ -844,7 +849,7 @@
updateUidStates_l();
// finish the recording app op
- finishRecording(client->identity, client->attributes.source);
+ finishRecording(client->attributionSource, client->attributes.source);
AutoCallerClear acc;
return binderStatusFromStatusT(mAudioPolicyManager->stopInput(portId));
}
@@ -1641,15 +1646,15 @@
bool needCaptureMediaOutput = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
return mix.mAllowPrivilegedMediaPlaybackCapture; });
- const Identity identity = getCallingIdentity();
+ const AttributionSourceState attributionSource = getCallingAttributionSource();
- if (needCaptureMediaOutput && !captureMediaOutputAllowed(identity)) {
+ if (needCaptureMediaOutput && !captureMediaOutputAllowed(attributionSource)) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (needCaptureVoiceCommunicationOutput &&
- !captureVoiceCommunicationOutputAllowed(identity)) {
+ !captureVoiceCommunicationOutputAllowed(attributionSource)) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index fb38e3d..201273e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -594,7 +594,8 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- uid_t currentUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(current->identity.uid));
+ uid_t currentUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
+ current->attributionSource.uid));
if (!current->active) {
continue;
}
@@ -641,7 +642,7 @@
|| (isInCommunication && currentUid == mPhoneStateOwnerUid)) {
if (!isInCommunication || latestSensitiveActiveOrComm == nullptr
|| VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
- latestSensitiveActiveOrComm->identity.uid))
+ latestSensitiveActiveOrComm->attributionSource.uid))
!= mPhoneStateOwnerUid) {
latestSensitiveActiveOrComm = current;
latestSensitiveStartNs = current->startTimeNs;
@@ -676,7 +677,7 @@
// if audio mode is IN_COMMUNICATION, favor audio mode owner over an app with
// foreground UI in case both are capturing with privacy sensitive flag.
uid_t latestActiveUid = VALUE_OR_FATAL(
- aidl2legacy_int32_t_uid_t(latestSensitiveActiveOrComm->identity.uid));
+ aidl2legacy_int32_t_uid_t(latestSensitiveActiveOrComm->attributionSource.uid));
if (isInCommunication && latestActiveUid == mPhoneStateOwnerUid) {
topSensitiveActive = latestSensitiveActiveOrComm;
topSensitiveStartNs = latestSensitiveStartNs;
@@ -696,20 +697,20 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
uid_t currentUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
- current->identity.uid));
+ current->attributionSource.uid));
if (!current->active) {
continue;
}
audio_source_t source = current->attributes.source;
bool isTopOrLatestActive = topActive == nullptr ? false :
- current->identity.uid == topActive->identity.uid;
+ current->attributionSource.uid == topActive->attributionSource.uid;
bool isTopOrLatestSensitive = topSensitiveActive == nullptr ? false :
- current->identity.uid == topSensitiveActive->identity.uid;
+ current->attributionSource.uid == topSensitiveActive->attributionSource.uid;
auto canCaptureIfInCallOrCommunication = [&](const auto &recordClient) REQUIRES(mLock) {
uid_t recordUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(
- recordClient->identity.uid));
+ recordClient->attributionSource.uid));
bool canCaptureCall = recordClient->canCaptureOutput;
bool canCaptureCommunication = recordClient->canCaptureOutput
|| !isPhoneStateOwnerActive
@@ -785,7 +786,7 @@
allowCapture = true;
}
}
- setAppState_l(current->portId,
+ setAppState_l(current,
allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(currentUid)) :
APP_STATE_IDLE);
}
@@ -795,7 +796,7 @@
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!isVirtualSource(current->attributes.source)) {
- setAppState_l(current->portId, APP_STATE_IDLE);
+ setAppState_l(current, APP_STATE_IDLE);
}
}
}
@@ -829,17 +830,32 @@
return false;
}
-void AudioPolicyService::setAppState_l(audio_port_handle_t portId, app_state_t state)
+void AudioPolicyService::setAppState_l(sp<AudioRecordClient> client, app_state_t state)
{
AutoCallerClear acc;
if (mAudioPolicyManager) {
- mAudioPolicyManager->setAppState(portId, state);
+ mAudioPolicyManager->setAppState(client->portId, state);
}
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af) {
bool silenced = state == APP_STATE_IDLE;
- af->setRecordSilenced(portId, silenced);
+ if (client->silenced != silenced) {
+ if (client->active) {
+ if (silenced) {
+ finishRecording(client->attributionSource, client->attributes.source);
+ } else {
+ std::stringstream msg;
+ msg << "Audio recording un-silenced on session " << client->session;
+ if (!startRecording(client->attributionSource, String16(msg.str().c_str()),
+ client->attributes.source)) {
+ silenced = true;
+ }
+ }
+ }
+ af->setRecordSilenced(client->portId, silenced);
+ client->silenced = silenced;
+ }
}
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 00d9670..ac9c20f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2009 The Android Open Source Project
*
@@ -38,12 +39,14 @@
#include "CaptureStateNotifier.h"
#include <AudioPolicyInterface.h>
#include <android/hardware/BnSensorPrivacyListener.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <unordered_map>
namespace android {
+using content::AttributionSourceState;
+
// ----------------------------------------------------------------------------
class AudioPolicyService :
@@ -81,7 +84,7 @@
media::AudioPolicyForcedConfig* _aidl_return) override;
binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
- const media::permission::Identity &identity,
+ const AttributionSourceState &attributionSource,
const media::AudioConfig& config,
int32_t flags, int32_t selectedDeviceId,
media::GetOutputForAttrResponse* _aidl_return) override;
@@ -90,7 +93,7 @@
binder::Status releaseOutput(int32_t portId) override;
binder::Status getInputForAttr(const media::AudioAttributesInternal& attr, int32_t input,
int32_t riid, int32_t session,
- const media::permission::Identity &identity,
+ const AttributionSourceState &attributionSource,
const media::AudioConfigBase& config, int32_t flags,
int32_t selectedDeviceId,
media::GetInputForAttrResponse* _aidl_return) override;
@@ -321,8 +324,10 @@
// Handles binder shell commands
virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
+ class AudioRecordClient;
+
// Sets whether the given UID records only silence
- virtual void setAppState_l(audio_port_handle_t portId, app_state_t state) REQUIRES(mLock);
+ virtual void setAppState_l(sp<AudioRecordClient> client, app_state_t state) REQUIRES(mLock);
// Overrides the UID state as if it is idle
status_t handleSetUidState(Vector<String16>& args, int err);
@@ -344,7 +349,7 @@
bool isSupportedSystemUsage(audio_usage_t usage);
status_t validateUsage(audio_usage_t usage);
- status_t validateUsage(audio_usage_t usage, const media::permission::Identity& identity);
+ status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
void updateUidStates();
void updateUidStates_l() REQUIRES(mLock);
@@ -735,6 +740,9 @@
status_t getAudioPort(struct audio_port_v7 *port) override;
+ status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
+
private:
AudioPolicyService *mAudioPolicyService;
};
@@ -788,17 +796,18 @@
public:
AudioClient(const audio_attributes_t attributes,
const audio_io_handle_t io,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
const audio_session_t session, audio_port_handle_t portId,
const audio_port_handle_t deviceId) :
- attributes(attributes), io(io), identity(identity),
- session(session), portId(portId), deviceId(deviceId), active(false) {}
+ attributes(attributes), io(io), attributionSource(
+ attributionSource), session(session), portId(portId),
+ deviceId(deviceId), active(false) {}
~AudioClient() override = default;
const audio_attributes_t attributes; // source, flags ...
const audio_io_handle_t io; // audio HAL stream IO handle
- const media::permission::Identity& identity; //client identity
+ const AttributionSourceState& attributionSource; //client attributionsource
const audio_session_t session; // audio session ID
const audio_port_handle_t portId;
const audio_port_handle_t deviceId; // selected input device port ID
@@ -814,17 +823,19 @@
const audio_io_handle_t io,
const audio_session_t session, audio_port_handle_t portId,
const audio_port_handle_t deviceId,
- const media::permission::Identity& identity,
+ const AttributionSourceState& attributionSource,
bool canCaptureOutput, bool canCaptureHotword) :
- AudioClient(attributes, io, identity,
- session, portId, deviceId), identity(identity), startTimeNs(0),
- canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
+ AudioClient(attributes, io, attributionSource,
+ session, portId, deviceId), attributionSource(attributionSource),
+ startTimeNs(0), canCaptureOutput(canCaptureOutput),
+ canCaptureHotword(canCaptureHotword), silenced(false) {}
~AudioRecordClient() override = default;
- const media::permission::Identity identity; // identity of client
+ const AttributionSourceState attributionSource; // attribution source of client
nsecs_t startTimeNs;
const bool canCaptureOutput;
const bool canCaptureHotword;
+ bool silenced;
};
// --- AudioPlaybackClient ---
@@ -833,10 +844,10 @@
class AudioPlaybackClient : public AudioClient {
public:
AudioPlaybackClient(const audio_attributes_t attributes,
- const audio_io_handle_t io, media::permission::Identity identity,
+ const audio_io_handle_t io, AttributionSourceState attributionSource,
const audio_session_t session, audio_port_handle_t portId,
audio_port_handle_t deviceId, audio_stream_type_t stream) :
- AudioClient(attributes, io, identity, session, portId,
+ AudioClient(attributes, io, attributionSource, session, portId,
deviceId), stream(stream) {}
~AudioPlaybackClient() override = default;
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index f480210..b296fb0 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -25,7 +25,8 @@
"libmedia_helper",
"libutils",
"libxml2",
- "media_permission-aidl-cpp",
+ "libpermission",
+ "libbinder",
],
static_libs: [
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index e2d7d17..f7b0565 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -134,6 +134,11 @@
size_t getRoutingUpdatedCounter() const {
return mRoutingUpdatedUpdateCount; }
+ status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs __unused) override {
+ return NO_ERROR;
+ }
+
private:
audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index d289e15..1384864 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -91,6 +91,10 @@
status_t getAudioPort(struct audio_port_v7 *port __unused) override {
return INVALID_OPERATION;
};
+ status_t updateSecondaryOutputs(
+ const TrackSecondaryOutputsMap& trackSecondaryOutputs __unused) override {
+ return NO_INIT;
+ }
};
} // namespace android
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 8f12ecf..a16ab7d 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -25,7 +25,7 @@
#define LOG_TAG "APM_Test"
#include <Serializer.h>
#include <android-base/file.h>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <media/AudioPolicy.h>
#include <media/PatchBuilder.h>
#include <media/RecordingActivityTracker.h>
@@ -40,7 +40,7 @@
using namespace android;
using testing::UnorderedElementsAre;
-using media::permission::Identity;
+using android::content::AttributionSourceState;
TEST(AudioPolicyManagerTestInit, EngineFailure) {
AudioPolicyTestClient client;
@@ -216,11 +216,12 @@
if (!portId) portId = &localPortId;
*portId = AUDIO_PORT_HANDLE_NONE;
AudioPolicyInterface::output_type_t outputType;
- // TODO b/182392769: use identity util
- Identity i = Identity();
- i.uid = 0;
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource = AttributionSourceState();
+ attributionSource.uid = 0;
+ attributionSource.token = sp<BBinder>::make();
ASSERT_EQ(OK, mManager->getOutputForAttr(
- &attr, output, AUDIO_SESSION_NONE, &stream, i, &config, &flags,
+ &attr, output, AUDIO_SESSION_NONE, &stream, attributionSource, &config, &flags,
selectedDeviceId, portId, {}, &outputType));
ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
ASSERT_NE(AUDIO_IO_HANDLE_NONE, *output);
@@ -244,11 +245,12 @@
if (!portId) portId = &localPortId;
*portId = AUDIO_PORT_HANDLE_NONE;
AudioPolicyInterface::input_type_t inputType;
- // TODO b/182392769: use identity util
- Identity i = Identity();
- i.uid = 0;
+ // TODO b/182392769: use attribution source util
+ AttributionSourceState attributionSource = AttributionSourceState();
+ attributionSource.uid = 0;
+ attributionSource.token = sp<BBinder>::make();
ASSERT_EQ(OK, mManager->getInputForAttr(
- &attr, &input, riid, AUDIO_SESSION_NONE, i, &config, flags,
+ &attr, &input, riid, AUDIO_SESSION_NONE, attributionSource, &config, flags,
selectedDeviceId, &inputType, portId));
ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
}
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 6dcf440..5afdfb9 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -669,15 +669,22 @@
const std::vector<int32_t> &sensorPixelModesUsed, int format, int width, int height,
const CameraMetadata &staticInfo, bool flexibleConsumer,
std::unordered_set<int32_t> *overriddenSensorPixelModesUsed) {
+
+ const std::unordered_set<int32_t> &sensorPixelModesUsedSet =
+ convertToSet(sensorPixelModesUsed);
if (!isUltraHighResolutionSensor(staticInfo)) {
+ if (sensorPixelModesUsedSet.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
+ sensorPixelModesUsedSet.end()) {
+ // invalid value for non ultra high res sensors
+ return BAD_VALUE;
+ }
overriddenSensorPixelModesUsed->clear();
overriddenSensorPixelModesUsed->insert(ANDROID_SENSOR_PIXEL_MODE_DEFAULT);
return OK;
}
StreamConfigurationPair streamConfigurationPair = getStreamConfigurationPair(staticInfo);
- const std::unordered_set<int32_t> &sensorPixelModesUsedSet =
- convertToSet(sensorPixelModesUsed);
+
bool isInDefaultStreamConfigurationMap =
inStreamConfigurationMap(format, width, height,
streamConfigurationPair.mDefaultStreamConfigurationMap);
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index 6edad7c..1b312b5 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -57,8 +57,8 @@
// string kRecorderLogSessionId = "android.media.mediarecorder.log-session-id";
std::string log_session_id;
- if (item->getString("android.media.mediarecorder.log_session_id", &log_session_id)) {
- metrics_proto.set_log_session_id(std::move(log_session_id));
+ if (item->getString("android.media.mediarecorder.log-session-id", &log_session_id)) {
+ metrics_proto.set_log_session_id(log_session_id);
}
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
std::string audio_mime;
@@ -214,8 +214,7 @@
<< " video_bitrate:" << video_bitrate
<< " iframe_interval:" << iframe_interval
- // TODO Recorder - add log_session_id
- // << " log_session_id:" << log_session_id
+ << " log_session_id:" << log_session_id
<< " }";
statsdLog->log(android::util::MEDIAMETRICS_RECORDER_REPORTED, log.str());
return true;
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 3224cfc..20e4cc5 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -184,7 +184,7 @@
// An app can avoid having this happen by closing their streams when
// the app is paused.
pid_t pid = VALUE_OR_FATAL(
- aidl2legacy_int32_t_pid_t(request.getIdentity().pid));
+ aidl2legacy_int32_t_pid_t(request.getAttributionSource().pid));
AAudioClientTracker::getInstance().setExclusiveEnabled(pid, false);
endpointToSteal = endpoint; // return it to caller
}
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 0b69bf6..40a664e 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -22,7 +22,7 @@
#include <iostream>
#include <sstream>
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#include <aaudio/AAudio.h>
#include <media/AidlConversion.h>
#include <mediautils/ServiceUtilities.h>
@@ -47,18 +47,18 @@
std::move(_tmp.value()); })
using android::AAudioService;
-using android::media::permission::Identity;
+using android::content::AttributionSourceState;
using binder::Status;
android::AAudioService::AAudioService()
: BnAAudioService(),
mAdapter(this) {
// TODO consider using geteuid()
- // TODO b/182392769: use identity util
- mAudioClient.identity.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
- mAudioClient.identity.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
- mAudioClient.identity.packageName = std::nullopt;
- mAudioClient.identity.attributionTag = std::nullopt;
+ // TODO b/182392769: use attribution source util
+ mAudioClient.attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ mAudioClient.attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ mAudioClient.attributionSource.packageName = std::nullopt;
+ mAudioClient.attributionSource.attributionTag = std::nullopt;
AAudioClientTracker::getInstance().setAAudioService(this);
}
@@ -115,13 +115,14 @@
aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
// Enforce limit on client processes.
- Identity callingIdentity = request.getIdentity();
+ AttributionSourceState attributionSource = request.getAttributionSource();
pid_t pid = IPCThreadState::self()->getCallingPid();
- callingIdentity.pid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
+ attributionSource.pid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
legacy2aidl_pid_t_int32_t(pid));
- callingIdentity.uid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
+ attributionSource.uid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
- if (callingIdentity.pid != mAudioClient.identity.pid) {
+ attributionSource.token = sp<BBinder>::make();
+ if (attributionSource.pid != mAudioClient.attributionSource.pid) {
int32_t count = AAudioClientTracker::getInstance().getStreamCount(pid);
if (count >= MAX_STREAMS_PER_PROCESS) {
ALOGE("openStream(): exceeded max streams per process %d >= %d",
@@ -280,8 +281,8 @@
}
bool AAudioService::isCallerInService() {
- pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAudioClient.identity.pid));
- uid_t clientUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAudioClient.identity.uid));
+ pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAudioClient.attributionSource.pid));
+ uid_t clientUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
return clientPid == IPCThreadState::self()->getCallingPid() &&
clientUid == IPCThreadState::self()->getCallingUid();
}
@@ -307,7 +308,7 @@
const uid_t callingUserId = IPCThreadState::self()->getCallingUid();
const uid_t ownerUserId = serviceStream->getOwnerUserId();
const uid_t clientUid = VALUE_OR_FATAL(
- aidl2legacy_int32_t_uid_t(mAudioClient.identity.uid));
+ aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
bool callerOwnsIt = callingUserId == ownerUserId;
bool serverCalling = callingUserId == clientUid;
bool serverOwnsIt = ownerUserId == clientUid;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index b4efd1a..117218a 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -71,11 +71,11 @@
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAUDIO_OK;
copyFrom(request.getConstantConfiguration());
- mMmapClient.identity = request.getIdentity();
- // TODO b/182392769: use identity util
- mMmapClient.identity.uid = VALUE_OR_FATAL(
+ mMmapClient.attributionSource = request.getAttributionSource();
+ // TODO b/182392769: use attribution source util
+ mMmapClient.attributionSource.uid = VALUE_OR_FATAL(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
- mMmapClient.identity.pid = VALUE_OR_FATAL(
+ mMmapClient.attributionSource.pid = VALUE_OR_FATAL(
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
audio_format_t audioFormat = getFormat();
@@ -165,8 +165,8 @@
this, // callback
mMmapStream,
&mPortHandle);
- ALOGD("%s() mMapClient.identity = %s => portHandle = %d\n",
- __func__, mMmapClient.identity.toString().c_str(), mPortHandle);
+ ALOGD("%s() mMapClient.attributionSource = %s => portHandle = %d\n",
+ __func__, mMmapClient.attributionSource.toString().c_str(), mPortHandle);
if (status != OK) {
// This can happen if the resource is busy or the config does
// not match the hardware.
@@ -216,7 +216,7 @@
// Exclusive mode can only be used by the service because the FD cannot be shared.
int32_t audioServiceUid =
VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
- if ((mMmapClient.identity.uid != audioServiceUid) &&
+ if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
ALOGW("%s() - exclusive FD cannot be used by client", __func__);
result = AAUDIO_ERROR_UNAVAILABLE;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index dbacd75..34ddd4d 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -39,7 +39,7 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
-using media::permission::Identity;
+using content::AttributionSourceState;
/**
* Base class for streams in the service.
@@ -50,7 +50,7 @@
: mTimestampThread("AATime")
, mAtomicStreamTimestamp()
, mAudioService(audioService) {
- mMmapClient.identity = Identity();
+ mMmapClient.attributionSource = AttributionSourceState();
}
AAudioServiceStreamBase::~AAudioServiceStreamBase() {
@@ -81,7 +81,7 @@
result << " 0x" << std::setfill('0') << std::setw(8) << std::hex << mHandle
<< std::dec << std::setfill(' ') ;
- result << std::setw(6) << mMmapClient.identity.uid;
+ result << std::setw(6) << mMmapClient.attributionSource.uid;
result << std::setw(7) << mClientHandle;
result << std::setw(4) << (isRunning() ? "yes" : " no");
result << std::setw(6) << getState();
@@ -127,11 +127,11 @@
AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
aaudio_result_t result = AAUDIO_OK;
- mMmapClient.identity = request.getIdentity();
- // TODO b/182392769: use identity util
- mMmapClient.identity.uid = VALUE_OR_FATAL(
+ mMmapClient.attributionSource = request.getAttributionSource();
+ // TODO b/182392769: use attribution source util
+ mMmapClient.attributionSource.uid = VALUE_OR_FATAL(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
- mMmapClient.identity.pid = VALUE_OR_FATAL(
+ mMmapClient.attributionSource.pid = VALUE_OR_FATAL(
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
// Limit scope of lock to avoid recursive lock in close().
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index c42df0f..976996d 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -160,11 +160,13 @@
}
uid_t getOwnerUserId() const {
- return VALUE_OR_FATAL(android::aidl2legacy_int32_t_uid_t(mMmapClient.identity.uid));
+ return VALUE_OR_FATAL(android::aidl2legacy_int32_t_uid_t(
+ mMmapClient.attributionSource.uid));
}
pid_t getOwnerProcessId() const {
- return VALUE_OR_FATAL(android::aidl2legacy_int32_t_pid_t(mMmapClient.identity.pid));
+ return VALUE_OR_FATAL(android::aidl2legacy_int32_t_pid_t(
+ mMmapClient.attributionSource.pid));
}
aaudio_handle_t getHandle() const {
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index a419dd5..4c58040 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -66,13 +66,13 @@
"liblog",
"libutils",
"aaudio-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
export_shared_lib_headers: [
"libaaudio_internal",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
],
header_libs: [
diff --git a/services/oboeservice/fuzzer/Android.bp b/services/oboeservice/fuzzer/Android.bp
index f4e8a81..605ac01 100644
--- a/services/oboeservice/fuzzer/Android.bp
+++ b/services/oboeservice/fuzzer/Android.bp
@@ -46,7 +46,7 @@
"liblog",
"libutils",
"aaudio-aidl-cpp",
- "media_permission-aidl-cpp",
+ "framework-permission-aidl-cpp",
"libaudioclient_aidl_conversion",
],
static_libs: [
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 8e508d3..4bc661c 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -23,7 +23,7 @@
#include <AAudioService.h>
#include <aaudio/AAudio.h>
#include "aaudio/BnAAudioClient.h"
-#include <android/media/permission/Identity.h>
+#include <android/content/AttributionSourceState.h>
#define UNUSED_PARAM __attribute__((unused))
@@ -295,11 +295,12 @@
? fdp.ConsumeIntegral<int32_t>()
: kAAudioFormats[fdp.ConsumeIntegralInRange<int32_t>(0, kNumAAudioFormats - 1)]));
- // TODO b/182392769: use identity util
- media::permission::Identity identity;
- identity.uid = getuid();
- identity.pid = getpid();
- request.setIdentity(identity);
+ // TODO b/182392769: use attribution source util
+ android::content::AttributionSourceState attributionSource;
+ attributionSource.uid = getuid();
+ attributionSource.pid = getpid();
+ attributionSource.token = sp<BBinder>::make();
+ request.setAttributionSource(attributionSource);
request.setInService(fdp.ConsumeBool());
request.getConfiguration().setDeviceId(fdp.ConsumeIntegral<int32_t>());
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index 77e1c40..5b4129a 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -445,90 +445,118 @@
TunerFrontendCapabilities caps;
switch (halInfo.type) {
case FrontendType::ANALOG: {
- TunerFrontendAnalogCapabilities analogCaps{
- .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
- .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
- };
- caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::analogCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAnalogCapabilities analogCaps{
+ .typeCap = (int)halInfo.frontendCaps.analogCaps().typeCap,
+ .sifStandardCap = (int)halInfo.frontendCaps.analogCaps().sifStandardCap,
+ };
+ caps.set<TunerFrontendCapabilities::analogCaps>(analogCaps);
+ }
break;
}
case FrontendType::ATSC: {
- TunerFrontendAtscCapabilities atscCaps{
- .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
- };
- caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atscCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAtscCapabilities atscCaps{
+ .modulationCap = (int)halInfo.frontendCaps.atscCaps().modulationCap,
+ };
+ caps.set<TunerFrontendCapabilities::atscCaps>(atscCaps);
+ }
break;
}
case FrontendType::ATSC3: {
- TunerFrontendAtsc3Capabilities atsc3Caps{
- .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
- .timeInterleaveModeCap =
- (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
- .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
- .demodOutputFormatCap = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
- .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
- };
- caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::atsc3Caps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendAtsc3Capabilities atsc3Caps{
+ .bandwidthCap = (int)halInfo.frontendCaps.atsc3Caps().bandwidthCap,
+ .modulationCap = (int)halInfo.frontendCaps.atsc3Caps().modulationCap,
+ .timeInterleaveModeCap =
+ (int)halInfo.frontendCaps.atsc3Caps().timeInterleaveModeCap,
+ .codeRateCap = (int)halInfo.frontendCaps.atsc3Caps().codeRateCap,
+ .demodOutputFormatCap
+ = (int)halInfo.frontendCaps.atsc3Caps().demodOutputFormatCap,
+ .fecCap = (int)halInfo.frontendCaps.atsc3Caps().fecCap,
+ };
+ caps.set<TunerFrontendCapabilities::atsc3Caps>(atsc3Caps);
+ }
break;
}
case FrontendType::DVBC: {
- TunerFrontendCableCapabilities cableCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
- .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
- .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
- };
- caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbcCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendCableCapabilities cableCaps{
+ .modulationCap = (int)halInfo.frontendCaps.dvbcCaps().modulationCap,
+ .codeRateCap = (int64_t)halInfo.frontendCaps.dvbcCaps().fecCap,
+ .annexCap = (int)halInfo.frontendCaps.dvbcCaps().annexCap,
+ };
+ caps.set<TunerFrontendCapabilities::cableCaps>(cableCaps);
+ }
break;
}
case FrontendType::DVBS: {
- TunerFrontendDvbsCapabilities dvbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
- .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
- .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
- };
- caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbsCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendDvbsCapabilities dvbsCaps{
+ .modulationCap = (int)halInfo.frontendCaps.dvbsCaps().modulationCap,
+ .codeRateCap = (long)halInfo.frontendCaps.dvbsCaps().innerfecCap,
+ .standard = (int)halInfo.frontendCaps.dvbsCaps().standard,
+ };
+ caps.set<TunerFrontendCapabilities::dvbsCaps>(dvbsCaps);
+ }
break;
}
case FrontendType::DVBT: {
- TunerFrontendDvbtCapabilities dvbtCaps{
- .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
- .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
- .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
- .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
- .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
- .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
- };
- caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::dvbtCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendDvbtCapabilities dvbtCaps{
+ .transmissionModeCap = (int)halInfo.frontendCaps.dvbtCaps().transmissionModeCap,
+ .bandwidthCap = (int)halInfo.frontendCaps.dvbtCaps().bandwidthCap,
+ .constellationCap = (int)halInfo.frontendCaps.dvbtCaps().constellationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.dvbtCaps().coderateCap,
+ .hierarchyCap = (int)halInfo.frontendCaps.dvbtCaps().hierarchyCap,
+ .guardIntervalCap = (int)halInfo.frontendCaps.dvbtCaps().guardIntervalCap,
+ .isT2Supported = (bool)halInfo.frontendCaps.dvbtCaps().isT2Supported,
+ .isMisoSupported = (bool)halInfo.frontendCaps.dvbtCaps().isMisoSupported,
+ };
+ caps.set<TunerFrontendCapabilities::dvbtCaps>(dvbtCaps);
+ }
break;
}
case FrontendType::ISDBS: {
- TunerFrontendIsdbsCapabilities isdbsCaps{
- .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbsCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbsCapabilities isdbsCaps{
+ .modulationCap = (int)halInfo.frontendCaps.isdbsCaps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbsCaps().coderateCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbsCaps>(isdbsCaps);
+ }
break;
}
case FrontendType::ISDBS3: {
- TunerFrontendIsdbs3Capabilities isdbs3Caps{
- .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
- };
- caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbs3Caps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbs3Capabilities isdbs3Caps{
+ .modulationCap = (int)halInfo.frontendCaps.isdbs3Caps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbs3Caps().coderateCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbs3Caps>(isdbs3Caps);
+ }
break;
}
case FrontendType::ISDBT: {
- TunerFrontendIsdbtCapabilities isdbtCaps{
- .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
- .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
- .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
- .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
- .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
- };
- caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+ if (FrontendInfo::FrontendCapabilities::hidl_discriminator::isdbtCaps
+ == halInfo.frontendCaps.getDiscriminator()) {
+ TunerFrontendIsdbtCapabilities isdbtCaps{
+ .modeCap = (int)halInfo.frontendCaps.isdbtCaps().modeCap,
+ .bandwidthCap = (int)halInfo.frontendCaps.isdbtCaps().bandwidthCap,
+ .modulationCap = (int)halInfo.frontendCaps.isdbtCaps().modulationCap,
+ .codeRateCap = (int)halInfo.frontendCaps.isdbtCaps().coderateCap,
+ .guardIntervalCap = (int)halInfo.frontendCaps.isdbtCaps().guardIntervalCap,
+ };
+ caps.set<TunerFrontendCapabilities::isdbtCaps>(isdbtCaps);
+ }
break;
}
default: