Merge "audio: replace speaker to speaker safe if ring is active" into sc-dev
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1cde4c6..52cd4b4 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -8908,13 +8908,27 @@
* camera's crop region is set to maximum size, the FOV of the physical streams for the
* ultrawide lens will be the same as the logical stream, by making the crop region
* smaller than its active array size to compensate for the smaller focal length.</p>
- * <p>Even if the underlying physical cameras have different RAW characteristics (such as
- * size or CFA pattern), a logical camera can still advertise RAW capability. In this
- * case, when the application configures a RAW stream, the camera device will make sure
- * the active physical camera will remain active to ensure consistent RAW output
- * behavior, and not switch to other physical cameras.</p>
+ * <p>There are two ways for the application to capture RAW images from a logical camera
+ * with RAW capability:</p>
+ * <ul>
+ * <li>Because the underlying physical cameras may have different RAW capabilities (such
+ * as resolution or CFA pattern), to maintain backward compatibility, when a RAW stream
+ * is configured, the camera device makes sure the default active physical camera remains
+ * active and does not switch to other physical cameras. (One exception is that, if the
+ * logical camera consists of identical image sensors and advertises multiple focalLength
+ * due to different lenses, the camera device may generate RAW images from different
+ * physical cameras based on the focalLength being set by the application.) This
+ * backward-compatible approach usually results in loss of optical zoom, to telephoto
+ * lens or to ultrawide lens.</li>
+ * <li>Alternatively, to take advantage of the full zoomRatio range of the logical camera,
+ * the application should use <a href="https://developer.android.com/reference/android/hardware/camera2/MultiResolutionImageReader.html">MultiResolutionImageReader</a>
+ * to capture RAW images from the currently active physical camera. Because different
+ * physical camera may have different RAW characteristics, the application needs to use
+ * the characteristics and result metadata of the active physical camera for the
+ * relevant RAW metadata.</li>
+ * </ul>
* <p>The capture request and result metadata tags required for backward compatible camera
- * functionalities will be solely based on the logical camera capabiltity. On the other
+ * functionalities will be solely based on the logical camera capability. On the other
* hand, the use of manual capture controls (sensor or post-processing) with a
* logical camera may result in unexpected behavior when the HAL decides to switch
* between physical cameras with different characteristics under the hood. For example,
diff --git a/drm/drmserver/drmserver.rc b/drm/drmserver/drmserver.rc
index de46fb9..eb176c1 100644
--- a/drm/drmserver/drmserver.rc
+++ b/drm/drmserver/drmserver.rc
@@ -1,5 +1,12 @@
service drm /system/bin/drmserver
+ disabled
class main
user drm
group drm system inet drmrpc readproc
writepid /dev/cpuset/foreground/tasks
+
+on property:drm.service.enabled=true
+ start drm
+
+on property:drm.service.enabled=1
+ start drm
diff --git a/drm/libdrmframework/DrmManagerClientImpl.cpp b/drm/libdrmframework/DrmManagerClientImpl.cpp
index b0a441b..a2cac3f 100644
--- a/drm/libdrmframework/DrmManagerClientImpl.cpp
+++ b/drm/libdrmframework/DrmManagerClientImpl.cpp
@@ -52,25 +52,13 @@
const sp<IDrmManagerService>& DrmManagerClientImpl::getDrmManagerService() {
Mutex::Autolock lock(sMutex);
if (NULL == sDrmManagerService.get()) {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("drm.service.enabled", value, NULL) == 0) {
- // Drm is undefined for this device
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("drm.drmManager"));
+ if (binder == NULL) {
+ // Do NOT retry; IServiceManager already waits for ~5 seconds
+ // in getService if a service doesn't yet exist.
return sDrmManagerService;
}
-
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder;
- do {
- binder = sm->getService(String16("drm.drmManager"));
- if (binder != 0) {
- break;
- }
- ALOGW("DrmManagerService not published, waiting...");
- struct timespec reqt;
- reqt.tv_sec = 0;
- reqt.tv_nsec = 500000000; //0.5 sec
- nanosleep(&reqt, NULL);
- } while (true);
if (NULL == sDeathNotifier.get()) {
sDeathNotifier = new DeathNotifier();
}
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index 7137767..30d7394 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "C2SoftMp3Dec"
+#include <inttypes.h>
#include <log/log.h>
#include <numeric>
@@ -485,10 +486,10 @@
}
}
- uint64_t outTimeStamp = mProcessedSamples * 1000000ll / samplingRate;
+ int64_t outTimeStamp = mProcessedSamples * 1000000ll / samplingRate;
mProcessedSamples += ((outSize - outOffset) / (numChannels * sizeof(int16_t)));
- ALOGV("out buffer attr. offset %d size %d timestamp %u", outOffset, outSize - outOffset,
- (uint32_t)(mAnchorTimeStamp + outTimeStamp));
+ ALOGV("out buffer attr. offset %d size %d timestamp %" PRId64 " ", outOffset,
+ outSize - outOffset, mAnchorTimeStamp + outTimeStamp);
decodedSizes.clear();
work->worklets.front()->output.flags = work->input.flags;
work->worklets.front()->output.buffers.clear();
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.h b/media/codec2/components/mp3/C2SoftMp3Dec.h
index 402bdc4..e2dfcf3 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.h
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.h
@@ -63,7 +63,7 @@
bool mSignalledError;
bool mSignalledOutputEos;
bool mGaplessBytes;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
status_t initDecoder();
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 6a00edd..a6507e7 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -334,6 +334,12 @@
int bytesCount = nWidth * nHeight * 3 >> 1;
int32_t timestampIncr = ENCODER_TIMESTAMP_INCREMENT;
c2_status_t err = C2_OK;
+
+ // Query component's memory usage flags
+ std::vector<std::unique_ptr<C2Param>> params;
+ C2StreamUsageTuning::input compUsage(0u, 0u);
+ component->query({&compUsage}, {}, C2_DONT_BLOCK, ¶ms);
+
while (1) {
if (nFrames == 0) break;
uint32_t flags = 0;
@@ -384,7 +390,8 @@
}
std::shared_ptr<C2GraphicBlock> block;
err = graphicPool->fetchGraphicBlock(nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
- {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+ {C2MemoryUsage::CPU_READ | compUsage.value,
+ C2MemoryUsage::CPU_WRITE | compUsage.value},
&block);
if (err != C2_OK) {
fprintf(stderr, "fetchGraphicBlock failed : %d\n", err);
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6765bdb..5f802de 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -472,7 +472,7 @@
status = BAD_VALUE;
goto exit;
}
- mStreamType = streamType;
+ mOriginalStreamType = streamType;
} else {
// stream type shouldn't be looked at, this track has audio attributes
@@ -481,7 +481,7 @@
" usage=%d content=%d flags=0x%x tags=[%s]",
__func__,
mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
- mStreamType = AUDIO_STREAM_DEFAULT;
+ mOriginalStreamType = AUDIO_STREAM_DEFAULT;
audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
}
@@ -1605,9 +1605,6 @@
audio_stream_type_t AudioTrack::streamType() const
{
- if (mStreamType == AUDIO_STREAM_DEFAULT) {
- return AudioSystem::attributesToStreamType(mAttributes);
- }
return mStreamType;
}
@@ -1688,8 +1685,9 @@
}
IAudioFlinger::CreateTrackInput input;
- if (mStreamType != AUDIO_STREAM_DEFAULT) {
- input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
+ if (mOriginalStreamType != AUDIO_STREAM_DEFAULT) {
+ // Legacy: This is based on original parameters even if the track is recreated.
+ input.attr = AudioSystem::streamTypeToAttributes(mOriginalStreamType);
} else {
input.attr = mAttributes;
}
@@ -1745,6 +1743,7 @@
mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
mRoutedDeviceId = output.selectedDeviceId;
mSessionId = output.sessionId;
+ mStreamType = output.streamType;
mSampleRate = output.sampleRate;
if (mOriginalSampleRate == 0) {
@@ -3284,8 +3283,6 @@
result.appendFormat(" id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
mPortId, mStatus, mState, mSessionId, mFlags);
result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
- (mStreamType == AUDIO_STREAM_DEFAULT) ?
- AudioSystem::attributesToStreamType(mAttributes) :
mStreamType,
mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 0564cdf..e46b349 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -101,6 +101,8 @@
legacy2aidl_audio_port_handle_t_int32_t(selectedDeviceId));
aidl.sessionId = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(sessionId));
aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
+ aidl.streamType = VALUE_OR_RETURN(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(streamType));
aidl.afFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(afFrameCount));
aidl.afSampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(afSampleRate));
aidl.afLatencyMs = VALUE_OR_RETURN(convertIntegral<int32_t>(afLatencyMs));
@@ -122,6 +124,8 @@
aidl2legacy_int32_t_audio_port_handle_t(aidl.selectedDeviceId));
legacy.sessionId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.sessionId));
legacy.sampleRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
+ legacy.streamType = VALUE_OR_RETURN(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
legacy.afFrameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.afFrameCount));
legacy.afSampleRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.afSampleRate));
legacy.afLatencyMs = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.afLatencyMs));
diff --git a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
index 6bdd8e4..40473fa 100644
--- a/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateTrackResponse.aidl
@@ -16,6 +16,7 @@
package android.media;
+import android.media.AudioStreamType;
import android.media.IAudioTrack;
/**
@@ -34,6 +35,7 @@
int selectedDeviceId;
int sessionId;
int sampleRate;
+ AudioStreamType streamType;
long afFrameCount;
int afSampleRate;
int afLatencyMs;
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index f61eef2..cb00990 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -1164,8 +1164,9 @@
// constant after constructor or set()
audio_format_t mFormat; // as requested by client, not forced to 16-bit
- audio_stream_type_t mStreamType; // mStreamType == AUDIO_STREAM_DEFAULT implies
- // this AudioTrack has valid attributes
+ // mOriginalStreamType == AUDIO_STREAM_DEFAULT implies this AudioTrack has valid attributes
+ audio_stream_type_t mOriginalStreamType = AUDIO_STREAM_DEFAULT;
+ audio_stream_type_t mStreamType = AUDIO_STREAM_DEFAULT;
uint32_t mChannelCount;
audio_channel_mask_t mChannelMask;
sp<IMemory> mSharedBuffer;
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 327b37e..0e059f7 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -110,6 +110,7 @@
/* output */
uint32_t sampleRate;
+ audio_stream_type_t streamType;
size_t afFrameCount;
uint32_t afSampleRate;
uint32_t afLatencyMs;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 54a6425..65a163f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -947,6 +947,7 @@
output.frameCount = input.frameCount;
output.notificationFrameCount = input.notificationFrameCount;
output.flags = input.flags;
+ output.streamType = streamType;
track = thread->createTrack_l(client, streamType, localAttr, &output.sampleRate,
input.config.format, input.config.channel_mask,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 9e099ce..b9cdab8 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -8220,6 +8220,7 @@
status_t AudioFlinger::RecordThread::shareAudioHistory_l(
const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
int64_t sharedAudioStartMs) {
+
if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
return BAD_VALUE;
}
@@ -8234,18 +8235,21 @@
// after one wraparound
// We assume recent wraparounds on mRsmpInRear only given it is unlikely that the requesting
// app waits several hours after the start time was computed.
- const int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
+ int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
const int32_t sharedOffset = audio_utils::safe_sub_overflow(mRsmpInRear,
(int32_t)sharedAudioStartFrames);
- if (sharedOffset < 0
- || sharedOffset > mRsmpInFrames) {
- return BAD_VALUE;
+ // Bring the start frame position within the input buffer to match the documented
+ // "best effort" behavior of the API.
+ if (sharedOffset < 0) {
+ sharedAudioStartFrames = mRsmpInRear;
+ } else if (sharedOffset > mRsmpInFrames) {
+ sharedAudioStartFrames =
+ audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
}
mSharedAudioPackageName = sharedAudioPackageName;
if (mSharedAudioPackageName.empty()) {
- mSharedAudioSessionId = AUDIO_SESSION_NONE;
- mSharedAudioStartFrames = -1;
+ resetAudioHistory_l();
} else {
mSharedAudioSessionId = sharedSessionId;
mSharedAudioStartFrames = (int32_t)sharedAudioStartFrames;
@@ -8253,6 +8257,12 @@
return NO_ERROR;
}
+void AudioFlinger::RecordThread::resetAudioHistory_l() {
+ mSharedAudioSessionId = AUDIO_SESSION_NONE;
+ mSharedAudioStartFrames = -1;
+ mSharedAudioPackageName = "";
+}
+
void AudioFlinger::RecordThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
@@ -8862,23 +8872,22 @@
int32_t AudioFlinger::RecordThread::getOldestFront_l()
{
if (mTracks.size() == 0) {
- return 0;
+ return mRsmpInRear;
}
int32_t oldestFront = mRsmpInRear;
int32_t maxFilled = 0;
for (size_t i = 0; i < mTracks.size(); i++) {
int32_t front = mTracks[i]->mResamplerBufferProvider->getFront();
int32_t filled;
- if (front <= mRsmpInRear) {
- filled = mRsmpInRear - front;
- } else {
- filled = (int32_t)((int64_t)mRsmpInRear + UINT32_MAX + 1 - front);
- }
+ (void)__builtin_sub_overflow(mRsmpInRear, front, &filled);
if (filled > maxFilled) {
oldestFront = front;
maxFilled = filled;
}
}
+ if (maxFilled > mRsmpInFrames) {
+ (void)__builtin_sub_overflow(mRsmpInRear, mRsmpInFrames, &oldestFront);
+ }
return oldestFront;
}
@@ -8928,7 +8937,7 @@
"resizeInputBuffer_l() called with shared history and unallocated buffer");
size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
// never reduce resampler input buffer size
- if (rsmpInFrames < mRsmpInFrames) {
+ if (rsmpInFrames <= mRsmpInFrames) {
return;
}
mRsmpInFrames = rsmpInFrames;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index eee1f2b..16082a9 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1789,6 +1789,7 @@
status_t shareAudioHistory_l(const std::string& sharedAudioPackageName,
audio_session_t sharedSessionId = AUDIO_SESSION_NONE,
int64_t sharedAudioStartMs = -1);
+ void resetAudioHistory_l();
virtual bool isStreamInitialized() {
return !(mInput == nullptr || mInput->stream == nullptr);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index a6e3c06..d2a30b1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -2458,7 +2458,7 @@
RecordThread *recordThread = (RecordThread *) thread.get();
priorState = mState;
if (!mSharedAudioPackageName.empty()) {
- recordThread->shareAudioHistory_l("");
+ recordThread->resetAudioHistory_l();
}
recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
}
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index b4b6ddf..9987252 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -675,7 +675,7 @@
sp<AudioRecordClient> client = new AudioRecordClient(attr, input, session, portId,
selectedDeviceId, adjAttributionSource,
canCaptureOutput, canCaptureHotword,
- mAudioCommandThread);
+ mOutputCommandThread);
mAudioRecordClients.add(portId, client);
}