Merge "Spatializer: Cumulative headtracking fixes" into tm-qpr-dev
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index ddf797c..100c0cd 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -46,7 +46,6 @@
mFirstIFrameProvided(false),
mLastCvo(-1),
mLastIFrameProvidedAtMs(0),
- mLastRtpTimeJitterDataUs(0),
mWidth(0),
mHeight(0) {
}
@@ -123,20 +122,11 @@
}
sp<ABuffer> buffer = *queue->begin();
+ uint32_t seqNum = (uint32_t)buffer->int32Data();
buffer->meta()->setObject("source", source);
- /**
- * RFC3550 calculates the interarrival jitter time for 'ALL packets'.
- * But that is not useful as an ingredient of buffering time.
- * Instead, we calculates the time only for all 'NAL units'.
- */
int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
int64_t nowTimeUs = ALooper::GetNowUs();
- if (rtpTime != mLastRtpTimeJitterDataUs) {
- source->putBaseJitterData(rtpTime, nowTimeUs);
- mLastRtpTimeJitterDataUs = rtpTime;
- }
- source->putInterArrivalJitterData(rtpTime, nowTimeUs);
const int64_t startTimeMs = source->mSysAnchorTime / 1000;
const int64_t nowTimeMs = nowTimeUs / 1000;
@@ -168,7 +158,7 @@
const int32_t dynamicJbTimeMs = std::min(dynamicJitterTimeMs, 150);
const int64_t dynamicJbTimeRtp = MsToRtp(dynamicJbTimeMs, clockRate);
/* Fundamental jitter time */
- const int32_t jitterTimeMs = baseJbTimeMs;
+ const int32_t jitterTimeMs = baseJbTimeMs + dynamicJbTimeMs;
const int64_t jitterTimeRtp = MsToRtp(jitterTimeMs, clockRate);
// Till (T), this assembler waits unconditionally to collect current NAL unit
@@ -177,7 +167,7 @@
bool isExpired = (diffTimeRtp >= 0); // It's expired if T is passed away
// From (T), this assembler tries to complete the NAL till (T + try)
- int32_t tryJbTimeMs = baseJitterTimeMs / 2 + dynamicJbTimeMs;
+ int32_t tryJbTimeMs = dynamicJbTimeMs;
int64_t tryJbTimeRtp = MsToRtp(tryJbTimeMs, clockRate);
bool isFirstLineBroken = (diffTimeRtp > tryJbTimeRtp);
@@ -208,10 +198,10 @@
String8 info;
info.appendFormat("RTP diff from exp =%lld \t MS diff from stamp = %lld\t\t"
"Seq# %d \t ExpSeq# %d \t"
- "JitterMs %d + (%d + %d * %.3f)",
+ "JitterMs [%d + (~%d~)] + %d * %.3f",
(long long)diffTimeRtp, (long long)totalDiffTimeMs,
- buffer->int32Data(), mNextExpectedSeqNo,
- jitterTimeMs, tryJbTimeMs, dynamicJbTimeMs, JITTER_MULTIPLE);
+ seqNum, mNextExpectedSeqNo,
+ baseJbTimeMs, dynamicJbTimeMs, tryJbTimeMs, JITTER_MULTIPLE);
if (isSecondLineBroken) {
ALOGE("%s", info.string());
printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
@@ -223,6 +213,9 @@
}
if (mNextExpectedSeqNoValid) {
+ if (mNextExpectedSeqNo > seqNum) {
+ ALOGE("Reversed exp seq# %d \t current head %d", mNextExpectedSeqNo, seqNum);
+ }
mNextExpectedSeqNo = pickStartSeq(queue, firstRTPTime, playedTimeRtp, jitterTimeRtp);
int32_t cntRemove = deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
@@ -241,10 +234,10 @@
if (!mNextExpectedSeqNoValid) {
mNextExpectedSeqNoValid = true;
- mNextExpectedSeqNo = (uint32_t)buffer->int32Data();
- } else if ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) {
- ALOGV("Not the sequence number I expected");
-
+ mNextExpectedSeqNo = seqNum;
+ } else if (seqNum != mNextExpectedSeqNo) {
+ ALOGV("Not the sequence number(%d) I expected. Actual seq# is %d",
+ mNextExpectedSeqNo, seqNum);
return WRONG_SEQUENCE_NUMBER;
}
@@ -332,6 +325,11 @@
}
bool AAVCAssembler::dropFramesUntilIframe(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ ALOGE("b/230630526 buffer->size() == 0");
+ android_errorWriteLog(0x534e4554, "230630526");
+ return false;
+ }
const uint8_t *data = buffer->data();
unsigned nalType = data[0] & 0x1f;
if (!mFirstIFrameProvided && nalType < 0x5) {
@@ -624,8 +622,7 @@
int32_t firstSeqNo = buffer->int32Data();
// This only works for FU-A type & non-start sequence
- int32_t nalType = buffer->size() >= 1 ? buffer->data()[0] & 0x1f : -1;
- if (nalType != 28 || (buffer->size() >= 2 && buffer->data()[1] & 0x80)) {
+ if (buffer->size() < 2 || (buffer->data()[0] & 0x1f) != 28 || buffer->data()[1] & 0x80) {
return firstSeqNo;
}
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index bb42d1f..7b5c24a 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -53,7 +53,6 @@
mFirstIFrameProvided(false),
mLastCvo(-1),
mLastIFrameProvidedAtMs(0),
- mLastRtpTimeJitterDataUs(0),
mWidth(0),
mHeight(0) {
@@ -133,20 +132,11 @@
}
sp<ABuffer> buffer = *queue->begin();
+ uint32_t seqNum = (uint32_t)buffer->int32Data();
buffer->meta()->setObject("source", source);
- /**
- * RFC3550 calculates the interarrival jitter time for 'ALL packets'.
- * But that is not useful as an ingredient of buffering time.
- * Instead, we calculates the time only for all 'NAL units'.
- */
int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
int64_t nowTimeUs = ALooper::GetNowUs();
- if (rtpTime != mLastRtpTimeJitterDataUs) {
- source->putBaseJitterData(rtpTime, nowTimeUs);
- mLastRtpTimeJitterDataUs = rtpTime;
- }
- source->putInterArrivalJitterData(rtpTime, nowTimeUs);
const int64_t startTimeMs = source->mSysAnchorTime / 1000;
const int64_t nowTimeMs = nowTimeUs / 1000;
@@ -178,7 +168,7 @@
const int32_t dynamicJbTimeMs = std::min(dynamicJitterTimeMs, 150);
const int64_t dynamicJbTimeRtp = MsToRtp(dynamicJbTimeMs, clockRate);
/* Fundamental jitter time */
- const int32_t jitterTimeMs = baseJbTimeMs;
+ const int32_t jitterTimeMs = baseJbTimeMs + dynamicJbTimeMs;
const int64_t jitterTimeRtp = MsToRtp(jitterTimeMs, clockRate);
// Till (T), this assembler waits unconditionally to collect current NAL unit
@@ -187,7 +177,7 @@
bool isExpired = (diffTimeRtp >= 0); // It's expired if T is passed away
// From (T), this assembler tries to complete the NAL till (T + try)
- int32_t tryJbTimeMs = baseJitterTimeMs / 2 + dynamicJbTimeMs;
+ int32_t tryJbTimeMs = dynamicJbTimeMs;
int64_t tryJbTimeRtp = MsToRtp(tryJbTimeMs, clockRate);
bool isFirstLineBroken = (diffTimeRtp > tryJbTimeRtp);
@@ -218,10 +208,10 @@
String8 info;
info.appendFormat("RTP diff from exp =%lld \t MS diff from stamp = %lld\t\t"
"Seq# %d \t ExpSeq# %d \t"
- "JitterMs %d + (%d + %d * %.3f)",
+ "JitterMs [%d + (~%d~)] + %d * %.3f",
(long long)diffTimeRtp, (long long)totalDiffTimeMs,
- buffer->int32Data(), mNextExpectedSeqNo,
- jitterTimeMs, tryJbTimeMs, dynamicJbTimeMs, JITTER_MULTIPLE);
+ seqNum, mNextExpectedSeqNo,
+ baseJbTimeMs, dynamicJbTimeMs, tryJbTimeMs, JITTER_MULTIPLE);
if (isSecondLineBroken) {
ALOGE("%s", info.string());
printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
@@ -251,10 +241,10 @@
if (!mNextExpectedSeqNoValid) {
mNextExpectedSeqNoValid = true;
- mNextExpectedSeqNo = (uint32_t)buffer->int32Data();
- } else if ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) {
- ALOGV("Not the sequence number I expected");
-
+ mNextExpectedSeqNo = seqNum;
+ } else if (seqNum != mNextExpectedSeqNo) {
+ ALOGV("Not the sequence number(%d) I expected. Actual seq# is %d",
+ mNextExpectedSeqNo, seqNum);
return WRONG_SEQUENCE_NUMBER;
}
@@ -629,13 +619,13 @@
int32_t AHEVCAssembler::pickStartSeq(const Queue *queue,
uint32_t first, int64_t play, int64_t jit) {
+ CHECK(!queue->empty());
// pick the first sequence number has the start bit.
sp<ABuffer> buffer = *(queue->begin());
int32_t firstSeqNo = buffer->int32Data();
// This only works for FU-A type & non-start sequence
- unsigned nalType = buffer->data()[0] & 0x1f;
- if (nalType != 28 || buffer->data()[2] & 0x80) {
+ if (buffer->size() < 3 || (buffer->data()[0] & 0x1f) != 28 || buffer->data()[2] & 0x80) {
return firstSeqNo;
}
@@ -645,7 +635,7 @@
if (rtpTime + jit >= play) {
break;
}
- if ((data[2] & 0x80)) {
+ if (it->size() >= 3 && (data[2] & 0x80)) {
const int32_t seqNo = it->int32Data();
ALOGE("finding [HEAD] pkt. \t Seq# (%d ~ )[%d", firstSeqNo, seqNo);
firstSeqNo = seqNo;
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 717d8af..c5b0a1e 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -264,12 +264,12 @@
bool ARTPSource::queuePacket(const sp<ABuffer> &buffer) {
int64_t nowUs = ALooper::GetNowUs();
+ int64_t rtpTime = 0;
uint32_t seqNum = (uint32_t)buffer->int32Data();
- int32_t ssrc = 0, rtpTime = 0;
+ int32_t ssrc = 0;
buffer->meta()->findInt32("ssrc", &ssrc);
CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
- mLatestRtpTime = rtpTime;
if (mNumBuffersReceived++ == 0 && mFirstSysTime == 0) {
mFirstSysTime = nowUs;
@@ -277,7 +277,7 @@
mLastSysAnchorTimeUpdatedUs = nowUs;
mHighestSeqNumber = seqNum;
mBaseSeqNumber = seqNum;
- mFirstRtpTime = rtpTime;
+ mFirstRtpTime = (uint32_t)rtpTime;
mFirstSsrc = ssrc;
ALOGD("first-rtp arrived: first-rtp-time=%u, sys-time=%lld, seq-num=%u, ssrc=%d",
mFirstRtpTime, (long long)mFirstSysTime, mHighestSeqNumber, mFirstSsrc);
@@ -352,6 +352,18 @@
mQueue.insert(it, buffer);
+ /**
+ * RFC3550 calculates the interarrival jitter time for 'ALL packets'.
+ * We calculate anothor jitter only for all 'Head NAL units'
+ */
+ ALOGV("<======== Insert %d", seqNum);
+ rtpTime = mAssembler->findRTPTime(mFirstRtpTime, buffer);
+ if (rtpTime != mLatestRtpTime) {
+ mJitterCalc->putBaseData(rtpTime, nowUs);
+ }
+ mJitterCalc->putInterArrivalData(rtpTime, nowUs);
+ mLatestRtpTime = rtpTime;
+
return true;
}
@@ -680,14 +692,6 @@
mStaticJbTimeMs = jbTimeMs;
}
-void ARTPSource::putBaseJitterData(uint32_t timeStamp, int64_t arrivalTime) {
- mJitterCalc->putBaseData(timeStamp, arrivalTime);
-}
-
-void ARTPSource::putInterArrivalJitterData(uint32_t timeStamp, int64_t arrivalTime) {
- mJitterCalc->putInterArrivalData(timeStamp, arrivalTime);
-}
-
void ARTPSource::setJbTimer(const sp<AMessage> timer) {
mJbTimer = timer;
}
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
index 2f8b8ba..70ce388 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AAVCAssembler.h
@@ -50,7 +50,6 @@
bool mFirstIFrameProvided;
int32_t mLastCvo;
uint64_t mLastIFrameProvidedAtMs;
- int64_t mLastRtpTimeJitterDataUs;
int32_t mWidth;
int32_t mHeight;
List<sp<ABuffer> > mNALUnits;
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
index 9575d8c..ed3f1ae 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/AHEVCAssembler.h
@@ -51,7 +51,6 @@
bool mFirstIFrameProvided;
int32_t mLastCvo;
uint64_t mLastIFrameProvidedAtMs;
- int64_t mLastRtpTimeJitterDataUs;
int32_t mWidth;
int32_t mHeight;
List<sp<ABuffer> > mNALUnits;
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
index 39161b6..8f87642 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPAssembler.h
@@ -44,6 +44,13 @@
virtual void onByeReceived() = 0;
virtual bool initCheck() { return true; }
+ // Utility functions
+ inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
+ inline int64_t MsToRtp(int64_t ms, int64_t clockRate);
+ inline int64_t RtpToMs(int64_t rtp, int64_t clockRate);
+ inline void printNowTimeMs(int64_t start, int64_t now, int64_t play);
+ inline void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
+
protected:
virtual AssemblyStatus assembleMore(const sp<ARTPSource> &source) = 0;
virtual void packetLost() = 0;
@@ -64,13 +71,6 @@
bool mShowQueue;
int32_t mShowQueueCnt;
- // Utility functions
- inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
- inline int64_t MsToRtp(int64_t ms, int64_t clockRate);
- inline int64_t RtpToMs(int64_t rtp, int64_t clockRate);
- inline void printNowTimeMs(int64_t start, int64_t now, int64_t play);
- inline void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
-
private:
int64_t mFirstFailureTimeUs;
diff --git a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
index 3fa5713..7d1faf2 100644
--- a/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/include/media/stagefright/rtsp/ARTPSource.h
@@ -82,8 +82,6 @@
int32_t getBaseJitterTimeMs();
int32_t getInterArrivalJitterTimeMs();
void setStaticJitterTimeMs(const uint32_t jbTimeMs);
- void putBaseJitterData(uint32_t timeStamp, int64_t arrivalTime);
- void putInterArrivalJitterData(uint32_t timeStamp, int64_t arrivalTime);
void setJbTimer(const sp<AMessage> timer);
void setJbAlarmTime(int64_t nowTimeUs, int64_t alarmAfterUs);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 4653f96..d2363d8 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -283,7 +283,6 @@
return opPackageLegacy == package; }) == packages.end()) {
ALOGW("The package name(%s) provided does not correspond to the uid %d",
attributionSource.packageName.value_or("").c_str(), attributionSource.uid);
- checkedAttributionSource.packageName = std::optional<std::string>();
}
}
return checkedAttributionSource;
@@ -582,6 +581,33 @@
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE;
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
audio_attributes_t localAttr = *attr;
+
+ // TODO b/182392553: refactor or make clearer
+ pid_t clientPid =
+ VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(client.attributionSource.pid));
+ bool updatePid = (clientPid == (pid_t)-1);
+ const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+
+ AttributionSourceState adjAttributionSource = client.attributionSource;
+ if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
+ uid_t clientUid =
+ VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(client.attributionSource.uid));
+ ALOGW_IF(clientUid != callingUid,
+ "%s uid %d tried to pass itself off as %d",
+ __FUNCTION__, callingUid, clientUid);
+ adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
+ updatePid = true;
+ }
+ if (updatePid) {
+ const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW_IF(clientPid != (pid_t)-1 && clientPid != callingPid,
+ "%s uid %d pid %d tried to pass itself off as pid %d",
+ __func__, callingUid, callingPid, clientPid);
+ adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
+ }
+ adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ adjAttributionSource);
+
if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
audio_config_t fullConfig = AUDIO_CONFIG_INITIALIZER;
fullConfig.sample_rate = config->sample_rate;
@@ -591,7 +617,7 @@
bool isSpatialized;
ret = AudioSystem::getOutputForAttr(&localAttr, &io,
actualSessionId,
- &streamType, client.attributionSource,
+ &streamType, adjAttributionSource,
&fullConfig,
(audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
AUDIO_OUTPUT_FLAG_DIRECT),
@@ -602,7 +628,7 @@
ret = AudioSystem::getInputForAttr(&localAttr, &io,
RECORD_RIID_INVALID,
actualSessionId,
- client.attributionSource,
+ adjAttributionSource,
config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ, deviceId, &portId);
}
@@ -1051,7 +1077,7 @@
audio_attributes_t localAttr = input.attr;
AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
- if (!isAudioServerOrMediaServerUid(callingUid)) {
+ if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
@@ -1067,6 +1093,8 @@
clientPid = callingPid;
adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
}
+ adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ adjAttributionSource);
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
@@ -2273,7 +2301,7 @@
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(
adjAttributionSource.uid));
- if (!isAudioServerOrMediaServerUid(callingUid)) {
+ if (!isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
ALOGW_IF(currentUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, currentUid);
@@ -2289,7 +2317,8 @@
__func__, callingUid, callingPid, currentPid);
adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
}
-
+ adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(
+ adjAttributionSource);
// we don't yet support anything other than linear PCM
if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
ALOGE("createRecord() invalid format %#x", input.config.format);
@@ -3704,6 +3733,12 @@
using namespace std::chrono_literals;
auto inChannelMask = audio_channel_mask_out_to_in(track->channelMask());
+ if (inChannelMask == AUDIO_CHANNEL_INVALID) {
+ // The downstream PatchTrack has the proper output channel mask,
+ // so if there is no input channel mask equivalent, we can just
+ // use an index mask here to create the PatchRecord.
+ inChannelMask = audio_channel_mask_out_to_in_index_mask(track->channelMask());
+ }
sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
track->sampleRate(),
inChannelMask,
@@ -3906,7 +3941,7 @@
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid));
- if (currentPid == -1 || !isAudioServerOrMediaServerUid(callingUid)) {
+ if (currentPid == -1 || !isAudioServerOrMediaServerOrSystemServerOrRootUid(callingUid)) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
ALOGW_IF(currentPid != -1 && currentPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
@@ -3914,6 +3949,7 @@
adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
currentPid = callingPid;
}
+ adjAttributionSource = AudioFlinger::checkAttributionSourcePackage(adjAttributionSource);
ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
adjAttributionSource.pid, effectClient.get(), priority, sessionId, io,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b72788c..7731339 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -269,6 +269,23 @@
return ss.str();
}
+static std::string toString(audio_latency_mode_t mode) {
+ // We convert to the AIDL type to print (eventually the legacy type will be removed).
+ const auto result = legacy2aidl_audio_latency_mode_t_LatencyMode(mode);
+ return result.has_value() ? media::toString(*result) : "UNKNOWN";
+}
+
+// Could be made a template, but other toString overloads for std::vector are confused.
+static std::string toString(const std::vector<audio_latency_mode_t>& elements) {
+ std::string s("{ ");
+ for (const auto& e : elements) {
+ s.append(toString(e));
+ s.append(" ");
+ }
+ s.append("}");
+ return s;
+}
+
static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
static void sFastTrackMultiplierInit()
@@ -7347,10 +7364,13 @@
void AudioFlinger::SpatializerThread::updateHalSupportedLatencyModes_l() {
std::vector<audio_latency_mode_t> latencyModes;
- if (mOutput->stream->getRecommendedLatencyModes(&latencyModes) != NO_ERROR) {
+ const status_t status = mOutput->stream->getRecommendedLatencyModes(&latencyModes);
+ if (status != NO_ERROR) {
latencyModes.clear();
}
if (latencyModes != mSupportedLatencyModes) {
+ ALOGD("%s: thread(%d) status %d supported latency modes: %s",
+ __func__, mId, status, toString(latencyModes).c_str());
mSupportedLatencyModes.swap(latencyModes);
sendHalLatencyModesChangedEvent_l();
}
@@ -7390,6 +7410,8 @@
if (latencyMode != mSetLatencyMode) {
status_t status = mOutput->stream->setLatencyMode(latencyMode);
+ ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
+ __func__, mId, toString(latencyMode).c_str(), status);
if (status == NO_ERROR) {
mSetLatencyMode = latencyMode;
}
@@ -7471,6 +7493,8 @@
std::vector<audio_latency_mode_t> modes) {
Mutex::Autolock _l(mLock);
if (modes != mSupportedLatencyModes) {
+ ALOGD("%s: thread(%d) supported latency modes: %s",
+ __func__, mId, toString(modes).c_str());
mSupportedLatencyModes.swap(modes);
sendHalLatencyModesChangedEvent_l();
}
@@ -8329,8 +8353,6 @@
audio_input_flags_t inputFlags = mInput->flags;
audio_input_flags_t requestedFlags = *flags;
uint32_t sampleRate;
- AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
- attributionSource);
lStatus = initCheck();
if (lStatus != NO_ERROR) {
@@ -8345,7 +8367,7 @@
}
if (maxSharedAudioHistoryMs != 0) {
- if (!captureHotwordAllowed(checkedAttributionSource)) {
+ if (!captureHotwordAllowed(attributionSource)) {
lStatus = PERMISSION_DENIED;
goto Exit;
}
@@ -8466,16 +8488,16 @@
Mutex::Autolock _l(mLock);
int32_t startFrames = -1;
if (!mSharedAudioPackageName.empty()
- && mSharedAudioPackageName == checkedAttributionSource.packageName
+ && mSharedAudioPackageName == attributionSource.packageName
&& mSharedAudioSessionId == sessionId
- && captureHotwordAllowed(checkedAttributionSource)) {
+ && captureHotwordAllowed(attributionSource)) {
startFrames = mSharedAudioStartFrames;
}
track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
- checkedAttributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
+ attributionSource, *flags, TrackBase::TYPE_DEFAULT, portId,
startFrames);
lStatus = track->initCheck();
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 0a1bc81..2a77d22 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -529,10 +529,7 @@
id, attr.flags);
return nullptr;
}
-
- AttributionSourceState checkedAttributionSource = AudioFlinger::checkAttributionSourcePackage(
- attributionSource);
- return new OpPlayAudioMonitor(checkedAttributionSource, attr.usage, id);
+ return new OpPlayAudioMonitor(attributionSource, attr.usage, id);
}
AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 31dabd9..fcf5e7f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -6343,10 +6343,10 @@
SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
uint32_t maxLatency = 0;
- bool invalidate = false;
+ std::vector<sp<SwAudioOutputDescriptor>> invalidatedOutputs;
// take into account dynamic audio policies related changes: if a client is now associated
// to a different policy mix than at creation time, invalidate corresponding stream
- for (size_t i = 0; i < mPreviousOutputs.size() && !invalidate; i++) {
+ for (size_t i = 0; i < mPreviousOutputs.size(); i++) {
const sp<SwAudioOutputDescriptor>& desc = mPreviousOutputs.valueAt(i);
if (desc->isDuplicated()) {
continue;
@@ -6362,16 +6362,15 @@
continue;
}
if (client->getPrimaryMix() != primaryMix || client->hasLostPrimaryMix()) {
- invalidate = true;
- if (desc->isStrategyActive(psId)) {
+ if (desc->isStrategyActive(psId) && maxLatency < desc->latency()) {
maxLatency = desc->latency();
}
- break;
+ invalidatedOutputs.push_back(desc);
}
}
}
- if (srcOutputs != dstOutputs || invalidate) {
+ if (srcOutputs != dstOutputs || !invalidatedOutputs.empty()) {
// get maximum latency of all source outputs to determine the minimum mute time guaranteeing
// audio from invalidated tracks will be rendered when unmuting
for (audio_io_handle_t srcOut : srcOutputs) {
@@ -6382,8 +6381,7 @@
maxLatency = desc->latency();
}
- if (invalidate) continue;
-
+ bool invalidate = false;
for (auto client : desc->clientsList(false /*activeOnly*/)) {
if (desc->isDuplicated() || !desc->mProfile->isDirectOutput()) {
// a client on a non direct outputs has necessarily a linear PCM format
@@ -6411,21 +6409,14 @@
}
}
}
- }
-
- ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
- "%s: strategy %d, moving from output %s to output %s", __func__, psId,
- std::to_string(srcOutputs[0]).c_str(),
- std::to_string(dstOutputs[0]).c_str());
- // mute strategy while moving tracks from one output to another
- for (audio_io_handle_t srcOut : srcOutputs) {
- sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
- if (desc == nullptr) continue;
-
- if (desc->isStrategyActive(psId)) {
- setStrategyMute(psId, true, desc);
- setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
- newDevices.types());
+ // mute strategy while moving tracks from one output to another
+ if (invalidate) {
+ invalidatedOutputs.push_back(desc);
+ if (desc->isStrategyActive(psId)) {
+ setStrategyMute(psId, true, desc);
+ setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
+ newDevices.types());
+ }
}
sp<SourceClientDescriptor> source = getSourceForAttributesOnOutput(srcOut, attr);
if (source != nullptr && !isCallRxAudioSource(source) && !source->isInternal()) {
@@ -6433,19 +6424,21 @@
}
}
+ ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
+ "%s: strategy %d, moving from output %s to output %s", __func__, psId,
+ std::to_string(srcOutputs[0]).c_str(),
+ std::to_string(dstOutputs[0]).c_str());
+
// Move effects associated to this stream from previous output to new output
if (followsSameRouting(attr, attributes_initializer(AUDIO_USAGE_MEDIA))) {
selectOutputForMusicEffects();
}
// Move tracks associated to this stream (and linked) from previous output to new output
- if (invalidate) {
+ if (!invalidatedOutputs.empty()) {
for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
mpClientInterface->invalidateStream(stream);
}
- for (audio_io_handle_t srcOut : srcOutputs) {
- sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
- if (desc == nullptr) continue;
-
+ for (sp<SwAudioOutputDescriptor> desc : invalidatedOutputs) {
desc->setTracksInvalidatedStatusByStrategy(psId);
}
}
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index df49bba..49224c5 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -352,31 +352,20 @@
ALOGV("%s()", __func__);
Mutex::Autolock _l(mLock);
- // TODO b/182392553: refactor or remove
- AttributionSourceState adjAttributionSource = attributionSource;
- const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isAudioServerOrMediaServerUid(callingUid) || attributionSource.uid == -1) {
- int32_t callingUidAidl = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_uid_t_int32_t(callingUid));
- ALOGW_IF(attributionSource.uid != -1 && attributionSource.uid != callingUidAidl,
- "%s uid %d tried to pass itself off as %d", __func__,
- callingUidAidl, attributionSource.uid);
- adjAttributionSource.uid = callingUidAidl;
- }
if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_uid_t(adjAttributionSource.uid)))) {
+ aidl2legacy_int32_t_uid_t(attributionSource.uid)))) {
attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
}
if (((attr.flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
- && !bypassInterruptionPolicyAllowed(adjAttributionSource)) {
+ && !bypassInterruptionPolicyAllowed(attributionSource)) {
attr.flags = static_cast<audio_flags_mask_t>(
attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
}
if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
- if (!accessUltrasoundAllowed(adjAttributionSource)) {
+ if (!accessUltrasoundAllowed(attributionSource)) {
ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
- __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+ __func__, attributionSource.uid, attributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
}
}
@@ -386,7 +375,7 @@
bool isSpatialized = false;
status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session,
&stream,
- adjAttributionSource,
+ attributionSource,
&config,
&flags, &selectedDeviceId, &portId,
&secondaryOutputs,
@@ -401,20 +390,20 @@
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
- && !callAudioInterceptionAllowed(adjAttributionSource)) {
+ && !callAudioInterceptionAllowed(attributionSource)) {
ALOGE("%s() permission denied: call redirection not allowed for uid %d",
- __func__, adjAttributionSource.uid);
+ __func__, attributionSource.uid);
result = PERMISSION_DENIED;
- } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+ } else if (!modifyPhoneStateAllowed(attributionSource)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
- __func__, adjAttributionSource.uid);
+ __func__, attributionSource.uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
- if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
+ if (!modifyAudioRoutingAllowed(attributionSource)) {
ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
- __func__, adjAttributionSource.uid);
+ __func__, attributionSource.uid);
result = PERMISSION_DENIED;
}
break;
@@ -427,7 +416,7 @@
if (result == NO_ERROR) {
sp<AudioPlaybackClient> client =
- new AudioPlaybackClient(attr, output, adjAttributionSource, session,
+ new AudioPlaybackClient(attr, output, attributionSource, session,
portId, selectedDeviceId, stream, isSpatialized);
mAudioPlaybackClients.add(portId, client);
@@ -613,33 +602,8 @@
return binderStatusFromStatusT(BAD_VALUE);
}
- // Make sure attribution source represents the current caller
- AttributionSourceState adjAttributionSource = attributionSource;
- // TODO b/182392553: refactor or remove
- bool updatePid = (attributionSource.pid == -1);
- const uid_t callingUid =IPCThreadState::self()->getCallingUid();
- const uid_t currentUid = VALUE_OR_RETURN_BINDER_STATUS(aidl2legacy_int32_t_uid_t(
- attributionSource.uid));
- if (!isAudioServerOrMediaServerUid(callingUid)) {
- ALOGW_IF(currentUid != (uid_t)-1 && currentUid != callingUid,
- "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid,
- currentUid);
- adjAttributionSource.uid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_uid_t_int32_t(
- callingUid));
- updatePid = true;
- }
-
- if (updatePid) {
- const int32_t callingPid = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_pid_t_int32_t(
- IPCThreadState::self()->getCallingPid()));
- ALOGW_IF(attributionSource.pid != -1 && attributionSource.pid != callingPid,
- "%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, adjAttributionSource.uid, callingPid, attributionSource.pid);
- adjAttributionSource.pid = callingPid;
- }
-
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
- adjAttributionSource)));
+ attributionSource)));
// check calling permissions.
// Capturing from the following sources does not require permission RECORD_AUDIO
@@ -650,17 +614,17 @@
// type is API_INPUT_MIX_EXT_POLICY_REROUTE and by AudioService if a media projection
// is used and input type is API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK
// - ECHO_REFERENCE source is controlled by captureAudioOutputAllowed()
- if (!(recordingAllowed(adjAttributionSource, inputSource)
+ if (!(recordingAllowed(attributionSource, inputSource)
|| inputSource == AUDIO_SOURCE_FM_TUNER
|| inputSource == AUDIO_SOURCE_REMOTE_SUBMIX
|| inputSource == AUDIO_SOURCE_ECHO_REFERENCE)) {
ALOGE("%s permission denied: recording not allowed for %s",
- __func__, adjAttributionSource.toString().c_str());
+ __func__, attributionSource.toString().c_str());
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
- bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+ bool canCaptureOutput = captureAudioOutputAllowed(attributionSource);
+ bool canInterceptCallAudio = callAudioInterceptionAllowed(attributionSource);
bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
|| inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
|| inputSource == AUDIO_SOURCE_VOICE_CALL;
@@ -674,11 +638,11 @@
}
if (inputSource == AUDIO_SOURCE_FM_TUNER
&& !canCaptureOutput
- && !captureTunerAudioInputAllowed(adjAttributionSource)) {
+ && !captureTunerAudioInputAllowed(attributionSource)) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
- bool canCaptureHotword = captureHotwordAllowed(adjAttributionSource);
+ bool canCaptureHotword = captureHotwordAllowed(attributionSource);
if ((inputSource == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return binderStatusFromStatusT(PERMISSION_DENIED);
}
@@ -686,14 +650,14 @@
if (((flags & AUDIO_INPUT_FLAG_HW_HOTWORD) != 0)
&& !canCaptureHotword) {
ALOGE("%s: permission denied: hotword mode not allowed"
- " for uid %d pid %d", __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+ " for uid %d pid %d", __func__, attributionSource.uid, attributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
}
if (attr.source == AUDIO_SOURCE_ULTRASOUND) {
- if (!accessUltrasoundAllowed(adjAttributionSource)) {
+ if (!accessUltrasoundAllowed(attributionSource)) {
ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
- __func__, adjAttributionSource.uid, adjAttributionSource.pid);
+ __func__, attributionSource.uid, attributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
}
}
@@ -708,7 +672,7 @@
AutoCallerClear acc;
// the audio_in_acoustics_t parameter is ignored by get_input()
status = mAudioPolicyManager->getInputForAttr(&attr, &input, riid, session,
- adjAttributionSource, &config,
+ attributionSource, &config,
flags, &selectedDeviceId,
&inputType, &portId);
@@ -737,7 +701,7 @@
}
break;
case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
- if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+ if (!(modifyAudioRoutingAllowed(attributionSource)
|| ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
&& canInterceptCallAudio))) {
ALOGE("%s permission denied for remote submix capture", __func__);
@@ -760,7 +724,7 @@
}
sp<AudioRecordClient> client = new AudioRecordClient(attr, input, session, portId,
- selectedDeviceId, adjAttributionSource,
+ selectedDeviceId, attributionSource,
canCaptureOutput, canCaptureHotword,
mOutputCommandThread);
mAudioRecordClients.add(portId, client);
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index dfdd351..83a11fb 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -1814,12 +1814,14 @@
void AudioPolicyService::SensorPrivacyPolicy::registerSelf() {
SensorPrivacyManager spm;
mSensorPrivacyEnabled = spm.isSensorPrivacyEnabled();
+ (void)spm.addToggleSensorPrivacyListener(this);
spm.addSensorPrivacyListener(this);
}
void AudioPolicyService::SensorPrivacyPolicy::unregisterSelf() {
SensorPrivacyManager spm;
spm.removeSensorPrivacyListener(this);
+ spm.removeToggleSensorPrivacyListener(this);
}
bool AudioPolicyService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index fdded34..277c91b 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -935,12 +935,15 @@
void Spatializer::checkSensorsState_l() {
audio_latency_mode_t requestedLatencyMode = AUDIO_LATENCY_MODE_FREE;
- bool lowLatencySupported = mSupportedLatencyModes.empty()
- || (std::find(mSupportedLatencyModes.begin(), mSupportedLatencyModes.end(),
- AUDIO_LATENCY_MODE_LOW) != mSupportedLatencyModes.end());
+ const bool supportsSetLatencyMode = !mSupportedLatencyModes.empty();
+ const bool supportsLowLatencyMode = supportsSetLatencyMode && std::find(
+ mSupportedLatencyModes.begin(), mSupportedLatencyModes.end(),
+ AUDIO_LATENCY_MODE_LOW) != mSupportedLatencyModes.end();
if (mSupportsHeadTracking) {
if (mPoseController != nullptr) {
- if (lowLatencySupported && mNumActiveTracks > 0 && mLevel != SpatializationLevel::NONE
+ // TODO(b/253297301, b/255433067) reenable low latency condition check
+ // for Head Tracking after Bluetooth HAL supports it correctly.
+ if (mNumActiveTracks > 0 && mLevel != SpatializationLevel::NONE
&& mDesiredHeadTrackingMode != HeadTrackingMode::STATIC
&& mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
if (mEngine != nullptr) {
@@ -949,7 +952,7 @@
}
mPoseController->setHeadSensor(mHeadSensor);
mPoseController->setScreenSensor(mScreenSensor);
- requestedLatencyMode = AUDIO_LATENCY_MODE_LOW;
+ if (supportsLowLatencyMode) requestedLatencyMode = AUDIO_LATENCY_MODE_LOW;
} else {
mPoseController->setHeadSensor(SpatializerPoseController::INVALID_SENSOR);
mPoseController->setScreenSensor(SpatializerPoseController::INVALID_SENSOR);
@@ -959,8 +962,11 @@
resetEngineHeadPose_l();
}
}
- if (mOutput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::setRequestedLatencyMode(mOutput, requestedLatencyMode);
+ if (mOutput != AUDIO_IO_HANDLE_NONE && supportsSetLatencyMode) {
+ const status_t status =
+ AudioSystem::setRequestedLatencyMode(mOutput, requestedLatencyMode);
+ ALOGD("%s: setRequestedLatencyMode for output thread(%d) to %s returned %d",
+ __func__, mOutput, toString(requestedLatencyMode).c_str(), status);
}
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index bcbd92b..7415b1e 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -165,14 +165,10 @@
std::string toString(unsigned level) const NO_THREAD_SAFETY_ANALYSIS;
static std::string toString(audio_latency_mode_t mode) {
- switch (mode) {
- case AUDIO_LATENCY_MODE_FREE:
- return "LATENCY_MODE_FREE";
- case AUDIO_LATENCY_MODE_LOW:
- return "LATENCY_MODE_LOW";
- }
- return "EnumNotImplemented";
- };
+ // We convert to the AIDL type to print (eventually the legacy type will be removed).
+ const auto result = legacy2aidl_audio_latency_mode_t_LatencyMode(mode);
+ return result.has_value() ? media::toString(*result) : "unknown_latency_mode";
+ }
/**
* Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 2daacd1..74423e5 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -59,6 +59,8 @@
m3aState.aeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
m3aState.afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
m3aState.awbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+
+ mLastFocalLength = l.mParameters.params.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
}
}
@@ -92,9 +94,32 @@
client->notifyRequestId(mCurrentRequestId);
}
+ processLensState(frame.mMetadata, client);
+
return FrameProcessorBase::processSingleFrame(frame, device);
}
+void FrameProcessor::processLensState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client) {
+ ATRACE_CALL();
+ camera_metadata_ro_entry_t entry;
+
+ entry = frame.find(ANDROID_LENS_FOCAL_LENGTH);
+ if (entry.count == 0) {
+ return;
+ }
+
+ if (fabs(entry.data.f[0] - mLastFocalLength) > 0.001f) {
+ SharedParameters::Lock l(client->getParameters());
+ l.mParameters.params.setFloat(
+ CameraParameters::KEY_FOCAL_LENGTH,
+ entry.data.f[0]);
+ l.mParameters.paramsFlattened = l.mParameters.params.flatten();
+
+ mLastFocalLength = entry.data.f[0];
+ }
+}
+
status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
const sp<Camera2Client> &client) {
status_t res = BAD_VALUE;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index bb985f6..6c8d221 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -57,6 +57,9 @@
virtual bool processSingleFrame(CaptureResult &frame,
const sp<FrameProducer> &device);
+ void processLensState(const CameraMetadata &frame,
+ const sp<Camera2Client> &client);
+
status_t processFaceDetect(const CameraMetadata &frame,
const sp<Camera2Client> &client);
@@ -110,6 +113,9 @@
// Emit FaceDetection event to java if faces changed
void callbackFaceDetection(const sp<Camera2Client>& client,
const camera_frame_metadata &metadata);
+
+ // Track most recent focal length sent by the camera device
+ float mLastFocalLength;
};
diff --git a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
index d3377f4..ae4d5dd 100644
--- a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
+++ b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
@@ -31,47 +31,48 @@
std::map<int, std::vector<camera_metadata_tag>> static_api_level_to_keys{
{30, {
ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES,
+ ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
ANDROID_CONTROL_ZOOM_RATIO_RANGE,
ANDROID_SCALER_AVAILABLE_ROTATE_AND_CROP_MODES,
- ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
} },
{31, {
- ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION,
ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION,
- ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_PHYSICAL_CAMERA_MULTI_RESOLUTION_STREAM_CONFIGURATIONS,
- ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_MULTI_RESOLUTION_STREAM_SUPPORTED,
- ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION,
- ANDROID_SCALER_DEFAULT_SECURE_IMAGE_SIZE,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
- ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION,
+ ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_LENS_DISTORTION_MAXIMUM_RESOLUTION,
+ ANDROID_LENS_INTRINSIC_CALIBRATION_MAXIMUM_RESOLUTION,
+ ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP_MAXIMUM_RESOLUTION,
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_SCALER_DEFAULT_SECURE_IMAGE_SIZE,
+ ANDROID_SCALER_MULTI_RESOLUTION_STREAM_SUPPORTED,
+ ANDROID_SCALER_PHYSICAL_CAMERA_MULTI_RESOLUTION_STREAM_CONFIGURATIONS,
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
ANDROID_SENSOR_INFO_BINNING_FACTOR,
+ ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE_MAXIMUM_RESOLUTION,
+ ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION,
+ ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION,
} },
{32, {
ANDROID_INFO_DEVICE_STATE_ORIENTATIONS,
} },
{33, {
- ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL,
ANDROID_AUTOMOTIVE_LENS_FACING,
ANDROID_AUTOMOTIVE_LOCATION,
+ ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL,
+ ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP,
ANDROID_REQUEST_RECOMMENDED_TEN_BIT_DYNAMIC_RANGE_PROFILE,
ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES,
- ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP,
- ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL,
+ ANDROID_SENSOR_READOUT_TIMESTAMP,
} },
};
@@ -81,9 +82,9 @@
*/
std::map<int, std::vector<camera_metadata_tag>> dynamic_api_level_to_keys{
{30, {
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE,
ANDROID_CONTROL_ZOOM_RATIO,
ANDROID_SCALER_ROTATE_AND_CROP,
- ANDROID_CONTROL_EXTENDED_SCENE_MODE,
} },
{31, {
ANDROID_SENSOR_PIXEL_MODE,