Merge "AudioTrack: Obtain stream type from AudioFlinger" into sc-dev
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index 7137767..30d7394 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "C2SoftMp3Dec"
+#include <inttypes.h>
#include <log/log.h>
#include <numeric>
@@ -485,10 +486,10 @@
}
}
- uint64_t outTimeStamp = mProcessedSamples * 1000000ll / samplingRate;
+ int64_t outTimeStamp = mProcessedSamples * 1000000ll / samplingRate;
mProcessedSamples += ((outSize - outOffset) / (numChannels * sizeof(int16_t)));
- ALOGV("out buffer attr. offset %d size %d timestamp %u", outOffset, outSize - outOffset,
- (uint32_t)(mAnchorTimeStamp + outTimeStamp));
+ ALOGV("out buffer attr. offset %d size %d timestamp %" PRId64 " ", outOffset,
+ outSize - outOffset, mAnchorTimeStamp + outTimeStamp);
decodedSizes.clear();
work->worklets.front()->output.flags = work->input.flags;
work->worklets.front()->output.buffers.clear();
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.h b/media/codec2/components/mp3/C2SoftMp3Dec.h
index 402bdc4..e2dfcf3 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.h
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.h
@@ -63,7 +63,7 @@
bool mSignalledError;
bool mSignalledOutputEos;
bool mGaplessBytes;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
status_t initDecoder();
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 1ed240a..09d9535 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -452,8 +452,8 @@
void* threadArg)
{
if (mHasThread) {
- ALOGE("%s() - mHasThread already true", __func__);
- return AAUDIO_ERROR_INVALID_STATE;
+ ALOGD("%s() - previous thread was not joined, join now to be safe", __func__);
+ joinThread_l(nullptr);
}
if (threadProc == nullptr) {
return AAUDIO_ERROR_NULL;
@@ -462,6 +462,7 @@
mThreadProc = threadProc;
mThreadArg = threadArg;
setPeriodNanoseconds(periodNanoseconds);
+ mHasThread = true;
// Prevent this object from getting deleted before the thread has a chance to create
// its strong pointer. Assume the thread will call decStrong().
this->incStrong(nullptr);
@@ -470,6 +471,7 @@
android::status_t status = -errno;
ALOGE("%s() - pthread_create() failed, %d", __func__, status);
this->decStrong(nullptr); // Because the thread won't do it.
+ mHasThread = false;
return AAudioConvert_androidToAAudioResult(status);
} else {
// TODO Use AAudioThread or maybe AndroidThread
@@ -484,7 +486,6 @@
err = pthread_setname_np(mThread, name);
ALOGW_IF((err != 0), "Could not set name of AAudio thread. err = %d", err);
- mHasThread = true;
return AAUDIO_OK;
}
}
@@ -498,7 +499,7 @@
// This must be called under mStreamLock.
aaudio_result_t AudioStream::joinThread_l(void** returnArg) {
if (!mHasThread) {
- ALOGD("joinThread() - but has no thread");
+ ALOGD("joinThread() - but has no thread or already join()ed");
return AAUDIO_ERROR_INVALID_STATE;
}
aaudio_result_t result = AAUDIO_OK;
@@ -515,8 +516,7 @@
result = AAudioConvert_androidToAAudioResult(-err);
} else {
ALOGD("%s() pthread_join succeeded", __func__);
- // This must be set false so that the callback thread can be created
- // when the stream is restarted.
+ // Prevent joining a second time, which has undefined behavior.
mHasThread = false;
}
} else {
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 2b45ed3..9835c8c 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -157,9 +157,13 @@
virtual aaudio_result_t setBufferSize(int32_t requestedFrames) = 0;
- virtual aaudio_result_t createThread_l(int64_t periodNanoseconds,
- aaudio_audio_thread_proc_t threadProc,
- void *threadArg);
+ aaudio_result_t createThread(int64_t periodNanoseconds,
+ aaudio_audio_thread_proc_t threadProc,
+ void *threadArg)
+ EXCLUDES(mStreamLock) {
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ return createThread_l(periodNanoseconds, threadProc, threadArg);
+ }
aaudio_result_t joinThread(void **returnArg);
@@ -535,6 +539,11 @@
mSessionId = sessionId;
}
+ aaudio_result_t createThread_l(int64_t periodNanoseconds,
+ aaudio_audio_thread_proc_t threadProc,
+ void *threadArg)
+ REQUIRES(mStreamLock);
+
aaudio_result_t joinThread_l(void **returnArg) REQUIRES(mStreamLock);
std::atomic<bool> mCallbackEnabled{false};
@@ -658,6 +667,7 @@
std::atomic<pid_t> mErrorCallbackThread{CALLBACK_THREAD_NONE};
// background thread ----------------------------------
+ // Use mHasThread to prevent joining twice, which has undefined behavior.
bool mHasThread GUARDED_BY(mStreamLock) = false;
pthread_t mThread GUARDED_BY(mStreamLock) = {};
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 9533ae5..8e05de8 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -145,15 +145,17 @@
return;
}
- // Close socket before posting message to RTSPSource message handler.
- if (mHandler != NULL) {
- close(mHandler->getARTSPConnection()->getSocket());
- }
-
sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
sp<AMessage> dummy;
msg->postAndAwaitResponse(&dummy);
+
+ // Close socket after posting message to RTSPSource message handler.
+ if (mHandler != NULL && mHandler->getARTSPConnection()->getSocket() >= 0) {
+ ALOGD("closing rtsp socket if not closed yet.");
+ close(mHandler->getARTSPConnection()->getSocket());
+ }
+
}
status_t NuPlayer::RTSPSource::feedMoreTSData() {
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 8f4df8e..169df46 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -594,4 +594,15 @@
return mFormat;
}
+bool APacketSource::isVideo() {
+ bool isVideo = false;
+
+ const char *mime;
+ if (mFormat->findCString(kKeyMIMEType, &mime)) {
+ isVideo = !strncasecmp(mime, "video/", 6);
+ }
+
+ return isVideo;
+}
+
} // namespace android
diff --git a/media/libstagefright/rtsp/APacketSource.h b/media/libstagefright/rtsp/APacketSource.h
index 530e537..2b9b5ba 100644
--- a/media/libstagefright/rtsp/APacketSource.h
+++ b/media/libstagefright/rtsp/APacketSource.h
@@ -33,6 +33,8 @@
virtual sp<MetaData> getFormat();
+ bool isVideo();
+
protected:
virtual ~APacketSource();
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 33c85a7..a4da433 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -104,6 +104,11 @@
msg->post();
}
+void ARTPConnection::seekStream() {
+ sp<AMessage> msg = new AMessage(kWhatSeekStream, this);
+ msg->post();
+}
+
void ARTPConnection::removeStream(int rtpSocket, int rtcpSocket) {
sp<AMessage> msg = new AMessage(kWhatRemoveStream, this);
msg->setInt32("rtp-socket", rtpSocket);
@@ -283,6 +288,12 @@
break;
}
+ case kWhatSeekStream:
+ {
+ onSeekStream(msg);
+ break;
+ }
+
case kWhatRemoveStream:
{
onRemoveStream(msg);
@@ -353,6 +364,18 @@
}
}
+void ARTPConnection::onSeekStream(const sp<AMessage> &msg) {
+ (void)msg; // unused param as of now.
+ List<StreamInfo>::iterator it = mStreams.begin();
+ while (it != mStreams.end()) {
+ for (size_t i = 0; i < it->mSources.size(); ++i) {
+ sp<ARTPSource> source = it->mSources.valueAt(i);
+ source->timeReset();
+ }
+ ++it;
+ }
+}
+
void ARTPConnection::onRemoveStream(const sp<AMessage> &msg) {
int32_t rtpSocket, rtcpSocket;
CHECK(msg->findInt32("rtp-socket", &rtpSocket));
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index ea0a374..adf9670 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -40,7 +40,7 @@
const sp<ASessionDescription> &sessionDesc, size_t index,
const sp<AMessage> ¬ify,
bool injected);
-
+ void seekStream();
void removeStream(int rtpSocket, int rtcpSocket);
void injectPacket(int index, const sp<ABuffer> &buffer);
@@ -69,6 +69,7 @@
private:
enum {
kWhatAddStream,
+ kWhatSeekStream,
kWhatRemoveStream,
kWhatPollStreams,
kWhatInjectPacket,
@@ -94,6 +95,7 @@
int32_t mCumulativeBytes;
void onAddStream(const sp<AMessage> &msg);
+ void onSeekStream(const sp<AMessage> &msg);
void onRemoveStream(const sp<AMessage> &msg);
void onPollStreams();
void onInjectPacket(const sp<AMessage> &msg);
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 8787d65..f960482 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -130,6 +130,24 @@
notify->post();
}
+void ARTPSource::timeReset() {
+ mFirstRtpTime = 0;
+ mFirstSysTime = 0;
+ mFirstSsrc = 0;
+ mHighestNackNumber = 0;
+ mHighestSeqNumber = 0;
+ mPrevExpected = 0;
+ mBaseSeqNumber = 0;
+ mNumBuffersReceived = 0;
+ mPrevNumBuffersReceived = 0;
+ mPrevExpectedForRR = 0;
+ mPrevNumBuffersReceivedForRR = 0;
+ mLastNTPTime = 0;
+ mLastNTPTimeUpdateUs = 0;
+ mIssueFIRByAssembler = false;
+ mLastFIRRequestUs = -1;
+}
+
bool ARTPSource::queuePacket(const sp<ABuffer> &buffer) {
uint32_t seqNum = (uint32_t)buffer->int32Data();
@@ -147,6 +165,11 @@
ALOGD("first-rtp arrived: first-rtp-time=%u, sys-time=%lld, seq-num=%u, ssrc=%d",
mFirstRtpTime, (long long)mFirstSysTime, mHighestSeqNumber, mFirstSsrc);
mJitterCalc->init(mFirstRtpTime, mFirstSysTime, 0, mStaticJbTimeMs * 1000);
+ if (mQueue.size() > 0) {
+ ALOGD("clearing buffers which belonged to previous timeline"
+ " since a base timeline has been changed.");
+ mQueue.clear();
+ }
mQueue.push_back(buffer);
return true;
}
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index 0edff23..2d804d8 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -56,6 +56,7 @@
};
void processRTPPacket(const sp<ABuffer> &buffer);
+ void timeReset();
void timeUpdate(uint32_t rtpTime, uint64_t ntpTime);
void byeReceived();
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 0fdf431..988cec7 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -74,7 +74,8 @@
// The allowed maximum number of stale access units at the beginning of
// a new sequence.
-static int32_t kMaxAllowedStaleAccessUnits = 20;
+static int32_t kMaxAllowedStaleAudioAccessUnits = 20;
+static int32_t kMaxAllowedStaleVideoAccessUnits = 400;
static int64_t kTearDownTimeoutUs = 3000000ll;
@@ -108,6 +109,10 @@
}
}
+static int32_t GetMaxAllowedStaleCount(bool isVideo) {
+ return isVideo ? kMaxAllowedStaleVideoAccessUnits : kMaxAllowedStaleAudioAccessUnits;
+}
+
struct MyHandler : public AHandler {
enum {
kWhatConnected = 'conn',
@@ -1330,6 +1335,8 @@
ALOGV("rtp-info: %s", response->mHeaders.valueAt(i).c_str());
+ mRTPConn->seekStream();
+
ALOGI("seek completed.");
}
}
@@ -1514,7 +1521,7 @@
TrackInfo *info = &mTracks.editItemAt(trackIndex);
info->mFirstSeqNumInSegment = seq;
info->mNewSegment = true;
- info->mAllowedStaleAccessUnits = kMaxAllowedStaleAccessUnits;
+ info->mAllowedStaleAccessUnits = GetMaxAllowedStaleCount(info->mIsVideo);
CHECK(GetAttribute((*it).c_str(), "rtptime", &val));
@@ -1556,6 +1563,7 @@
int mRTPSocket;
int mRTCPSocket;
bool mUsingInterleavedTCP;
+ bool mIsVideo;
uint32_t mFirstSeqNumInSegment;
bool mNewSegment;
int32_t mAllowedStaleAccessUnits;
@@ -1640,9 +1648,10 @@
info->mURL = trackURL;
info->mPacketSource = source;
info->mUsingInterleavedTCP = false;
+ info->mIsVideo = source->isVideo();
info->mFirstSeqNumInSegment = 0;
info->mNewSegment = true;
- info->mAllowedStaleAccessUnits = kMaxAllowedStaleAccessUnits;
+ info->mAllowedStaleAccessUnits = GetMaxAllowedStaleCount(info->mIsVideo);
info->mRTPSocket = -1;
info->mRTCPSocket = -1;
info->mRTPAnchor = 0;
@@ -1838,11 +1847,12 @@
// by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
// RTSP "PLAY" command should be used to detect the first RTP packet
// after seeking.
+ int32_t maxAllowedStaleAccessUnits = GetMaxAllowedStaleCount(track->mIsVideo);
if (mSeekable) {
if (track->mAllowedStaleAccessUnits > 0) {
uint32_t seqNum16 = seqNum & 0xffff;
uint32_t firstSeqNumInSegment16 = track->mFirstSeqNumInSegment & 0xffff;
- if (seqNum16 > firstSeqNumInSegment16 + kMaxAllowedStaleAccessUnits
+ if (seqNum16 > firstSeqNumInSegment16 + maxAllowedStaleAccessUnits
|| seqNum16 < firstSeqNumInSegment16) {
// Not the first rtp packet of the stream after seeking, discarding.
track->mAllowedStaleAccessUnits--;
@@ -1857,7 +1867,7 @@
mNumAccessUnitsReceived = 0;
ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
"Still no first rtp packet after %d stale ones",
- kMaxAllowedStaleAccessUnits);
+ maxAllowedStaleAccessUnits);
track->mAllowedStaleAccessUnits = -1;
return UNKNOWN_ERROR;
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 9e099ce..b9cdab8 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -8220,6 +8220,7 @@
status_t AudioFlinger::RecordThread::shareAudioHistory_l(
const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
int64_t sharedAudioStartMs) {
+
if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
return BAD_VALUE;
}
@@ -8234,18 +8235,21 @@
// after one wraparound
// We assume recent wraparounds on mRsmpInRear only given it is unlikely that the requesting
// app waits several hours after the start time was computed.
- const int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
+ int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
const int32_t sharedOffset = audio_utils::safe_sub_overflow(mRsmpInRear,
(int32_t)sharedAudioStartFrames);
- if (sharedOffset < 0
- || sharedOffset > mRsmpInFrames) {
- return BAD_VALUE;
+ // Bring the start frame position within the input buffer to match the documented
+ // "best effort" behavior of the API.
+ if (sharedOffset < 0) {
+ sharedAudioStartFrames = mRsmpInRear;
+ } else if (sharedOffset > mRsmpInFrames) {
+ sharedAudioStartFrames =
+ audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
}
mSharedAudioPackageName = sharedAudioPackageName;
if (mSharedAudioPackageName.empty()) {
- mSharedAudioSessionId = AUDIO_SESSION_NONE;
- mSharedAudioStartFrames = -1;
+ resetAudioHistory_l();
} else {
mSharedAudioSessionId = sharedSessionId;
mSharedAudioStartFrames = (int32_t)sharedAudioStartFrames;
@@ -8253,6 +8257,12 @@
return NO_ERROR;
}
+void AudioFlinger::RecordThread::resetAudioHistory_l() {
+ mSharedAudioSessionId = AUDIO_SESSION_NONE;
+ mSharedAudioStartFrames = -1;
+ mSharedAudioPackageName = "";
+}
+
void AudioFlinger::RecordThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
@@ -8862,23 +8872,22 @@
int32_t AudioFlinger::RecordThread::getOldestFront_l()
{
if (mTracks.size() == 0) {
- return 0;
+ return mRsmpInRear;
}
int32_t oldestFront = mRsmpInRear;
int32_t maxFilled = 0;
for (size_t i = 0; i < mTracks.size(); i++) {
int32_t front = mTracks[i]->mResamplerBufferProvider->getFront();
int32_t filled;
- if (front <= mRsmpInRear) {
- filled = mRsmpInRear - front;
- } else {
- filled = (int32_t)((int64_t)mRsmpInRear + UINT32_MAX + 1 - front);
- }
+ (void)__builtin_sub_overflow(mRsmpInRear, front, &filled);
if (filled > maxFilled) {
oldestFront = front;
maxFilled = filled;
}
}
+ if (maxFilled > mRsmpInFrames) {
+ (void)__builtin_sub_overflow(mRsmpInRear, mRsmpInFrames, &oldestFront);
+ }
return oldestFront;
}
@@ -8928,7 +8937,7 @@
"resizeInputBuffer_l() called with shared history and unallocated buffer");
size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
// never reduce resampler input buffer size
- if (rsmpInFrames < mRsmpInFrames) {
+ if (rsmpInFrames <= mRsmpInFrames) {
return;
}
mRsmpInFrames = rsmpInFrames;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index eee1f2b..16082a9 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1789,6 +1789,7 @@
status_t shareAudioHistory_l(const std::string& sharedAudioPackageName,
audio_session_t sharedSessionId = AUDIO_SESSION_NONE,
int64_t sharedAudioStartMs = -1);
+ void resetAudioHistory_l();
virtual bool isStreamInitialized() {
return !(mInput == nullptr || mInput->stream == nullptr);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index a6e3c06..d2a30b1 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -2458,7 +2458,7 @@
RecordThread *recordThread = (RecordThread *) thread.get();
priorState = mState;
if (!mSharedAudioPackageName.empty()) {
- recordThread->shareAudioHistory_l("");
+ recordThread->resetAudioHistory_l();
}
recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
}
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index b4b6ddf..9987252 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -675,7 +675,7 @@
sp<AudioRecordClient> client = new AudioRecordClient(attr, input, session, portId,
selectedDeviceId, adjAttributionSource,
canCaptureOutput, canCaptureHotword,
- mAudioCommandThread);
+ mOutputCommandThread);
mAudioRecordClients.add(portId, client);
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 3deea6b..dc101ff 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -237,10 +237,16 @@
}
}
- //Derive primary rear/front cameras, and filter their charactierstics.
- //This needs to be done after all cameras are enumerated and camera ids are sorted.
+ // Derive primary rear/front cameras, and filter their charactierstics.
+ // This needs to be done after all cameras are enumerated and camera ids are sorted.
if (SessionConfigurationUtils::IS_PERF_CLASS) {
- filterSPerfClassCharacteristics();
+ // Assume internal cameras are advertised from the same
+ // provider. If multiple providers are registered at different time,
+ // and each provider contains multiple internal color cameras, the current
+ // logic may filter the characteristics of more than one front/rear color
+ // cameras.
+ Mutex::Autolock l(mServiceLock);
+ filterSPerfClassCharacteristicsLocked();
}
return OK;
@@ -313,7 +319,7 @@
filterAPI1SystemCameraLocked(mNormalDeviceIds);
}
-void CameraService::filterSPerfClassCharacteristics() {
+void CameraService::filterSPerfClassCharacteristicsLocked() {
// To claim to be S Performance primary cameras, the cameras must be
// backward compatible. So performance class primary camera Ids must be API1
// compatible.
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 1fb7104..9021170 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -945,9 +945,10 @@
void updateCameraNumAndIds();
/**
- * Filter camera characteristics for S Performance class primary cameras
+ * Filter camera characteristics for S Performance class primary cameras.
+ * mServiceLock should be locked.
*/
- void filterSPerfClassCharacteristics();
+ void filterSPerfClassCharacteristicsLocked();
// File descriptor to temp file used for caching previous open
// session dumpsys info.
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 7045128..4f2b878 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -476,15 +476,16 @@
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
bool preexisting) {
+ status_t res = OK;
std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- addProviderLocked(name, preexisting);
+ res = addProviderLocked(name, preexisting);
}
sp<StatusListener> listener = getStatusListener();
- if (nullptr != listener.get()) {
+ if (nullptr != listener.get() && res == OK) {
listener->onNewProviderRegistered();
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index ab861ad..03b77fc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -657,17 +657,17 @@
size_t remainingBuffers = (mState == STATE_PREPARING ? mTotalBufferCount :
camera_stream::max_buffers) - mHandoutTotalBufferCount;
mLock.unlock();
- std::unique_lock<std::mutex> batchLock(mBatchLock);
nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
- if (mBatchSize == 1) {
+ size_t batchSize = mBatchSize.load();
+ if (batchSize == 1) {
sp<ANativeWindow> anw = consumer;
res = anw->dequeueBuffer(anw.get(), anb, fenceFd);
} else {
+ std::unique_lock<std::mutex> batchLock(mBatchLock);
res = OK;
if (mBatchedBuffers.size() == 0) {
- size_t batchSize = mBatchSize;
if (remainingBuffers == 0) {
ALOGE("%s: cannot get buffer while all buffers are handed out", __FUNCTION__);
return INVALID_OPERATION;
@@ -675,13 +675,17 @@
if (batchSize > remainingBuffers) {
batchSize = remainingBuffers;
}
+ batchLock.unlock();
// Refill batched buffers
- mBatchedBuffers.resize(batchSize);
- res = consumer->dequeueBuffers(&mBatchedBuffers);
+ std::vector<Surface::BatchBuffer> batchedBuffers;
+ batchedBuffers.resize(batchSize);
+ res = consumer->dequeueBuffers(&batchedBuffers);
+ batchLock.lock();
if (res != OK) {
ALOGE("%s: batch dequeueBuffers call failed! %s (%d)",
__FUNCTION__, strerror(-res), res);
- mBatchedBuffers.clear();
+ } else {
+ mBatchedBuffers = std::move(batchedBuffers);
}
}
@@ -692,7 +696,6 @@
mBatchedBuffers.pop_back();
}
}
- batchLock.unlock();
nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
@@ -1129,7 +1132,6 @@
status_t Camera3OutputStream::setBatchSize(size_t batchSize) {
Mutex::Autolock l(mLock);
- std::lock_guard<std::mutex> lock(mBatchLock);
if (batchSize == 0) {
ALOGE("%s: invalid batch size 0", __FUNCTION__);
return BAD_VALUE;
@@ -1145,31 +1147,36 @@
return INVALID_OPERATION;
}
- if (batchSize != mBatchSize) {
- if (mBatchedBuffers.size() != 0) {
- ALOGE("%s: change batch size from %zu to %zu dynamically is not supported",
- __FUNCTION__, mBatchSize, batchSize);
- return INVALID_OPERATION;
- }
-
- if (camera_stream::max_buffers < batchSize) {
- ALOGW("%s: batch size is capped by max_buffers %d", __FUNCTION__,
- camera_stream::max_buffers);
- batchSize = camera_stream::max_buffers;
- }
- mBatchSize = batchSize;
+ if (camera_stream::max_buffers < batchSize) {
+ ALOGW("%s: batch size is capped by max_buffers %d", __FUNCTION__,
+ camera_stream::max_buffers);
+ batchSize = camera_stream::max_buffers;
}
+
+ size_t defaultBatchSize = 1;
+ if (!mBatchSize.compare_exchange_strong(defaultBatchSize, batchSize)) {
+ ALOGE("%s: change batch size from %zu to %zu dynamically is not supported",
+ __FUNCTION__, defaultBatchSize, batchSize);
+ return INVALID_OPERATION;
+ }
+
return OK;
}
void Camera3OutputStream::returnPrefetchedBuffersLocked() {
- std::lock_guard<std::mutex> batchLock(mBatchLock);
- if (mBatchedBuffers.size() != 0) {
- ALOGW("%s: %zu extra prefetched buffers detected. Returning",
- __FUNCTION__, mBatchedBuffers.size());
+ std::vector<Surface::BatchBuffer> batchedBuffers;
- mConsumer->cancelBuffers(mBatchedBuffers);
- mBatchedBuffers.clear();
+ {
+ std::lock_guard<std::mutex> batchLock(mBatchLock);
+ if (mBatchedBuffers.size() != 0) {
+ ALOGW("%s: %zu extra prefetched buffers detected. Returning",
+ __FUNCTION__, mBatchedBuffers.size());
+ batchedBuffers = std::move(mBatchedBuffers);
+ }
+ }
+
+ if (batchedBuffers.size() > 0) {
+ mConsumer->cancelBuffers(batchedBuffers);
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 00e4854..ad03b53 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -312,15 +312,14 @@
bool mDropBuffers;
- // Protecting batch states below, must be acquired after mLock
- std::mutex mBatchLock;
// The batch size for buffer operation
- size_t mBatchSize = 1;
+ std::atomic_size_t mBatchSize = 1;
+ // Protecting batch states below, must be acquired after mLock
+ std::mutex mBatchLock;
// Prefetched buffers (ready to be handed to client)
std::vector<Surface::BatchBuffer> mBatchedBuffers;
-
// ---- End of mBatchLock protected scope ----
/**
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 0d453cf..5fbcadb 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -126,9 +126,9 @@
// Prevent this object from getting deleted before the thread has a chance to create
// its strong pointer. Assume the thread will call decStrong().
this->incStrong(nullptr);
- aaudio_result_t result = getStreamInternal()->createThread_l(periodNanos,
- aaudio_endpoint_thread_proc,
- this);
+ aaudio_result_t result = getStreamInternal()->createThread(periodNanos,
+ aaudio_endpoint_thread_proc,
+ this);
if (result != AAUDIO_OK) {
this->decStrong(nullptr); // Because the thread won't do it.
}