Merge "Revert "restrict binder transactions to audioserver""
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 741d084..2432cac 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -69,7 +69,8 @@
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
}
@@ -96,9 +97,10 @@
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
- mProxy(NULL)
+ mProxy(NULL),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
- (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+ mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
uid, pid, pAttributes, selectedDeviceId);
}
@@ -149,11 +151,6 @@
const audio_attributes_t* pAttributes,
audio_port_handle_t selectedDeviceId)
{
- status_t status = NO_ERROR;
- uint32_t channelCount;
- pid_t callingPid;
- pid_t myPid;
-
ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
"uid %d, pid %d",
@@ -173,8 +170,7 @@
case TRANSFER_CALLBACK:
if (cbf == NULL) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
- status = BAD_VALUE;
- goto exit;
+ return BAD_VALUE;
}
break;
case TRANSFER_OBTAIN:
@@ -182,16 +178,14 @@
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- status = BAD_VALUE;
- goto exit;
+ return BAD_VALUE;
}
mTransfer = transferType;
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
ALOGE("Track already in use");
- status = INVALID_OPERATION;
- goto exit;
+ return INVALID_OPERATION;
}
if (pAttributes == NULL) {
@@ -215,18 +209,16 @@
// AudioFlinger capture only supports linear PCM
if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
ALOGE("Format %#x is not linear pcm", format);
- status = BAD_VALUE;
- goto exit;
+ return BAD_VALUE;
}
mFormat = format;
if (!audio_is_input_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- status = BAD_VALUE;
- goto exit;
+ return BAD_VALUE;
}
mChannelMask = channelMask;
- channelCount = audio_channel_count_from_in_mask(channelMask);
+ uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
mChannelCount = channelCount;
if (audio_is_linear_pcm(format)) {
@@ -235,24 +227,28 @@
mFrameSize = sizeof(uint8_t);
}
- // mFrameCount is initialized in createRecord_l
+ // mFrameCount is initialized in openRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
- // mNotificationFramesAct is initialized in createRecord_l
+ // mNotificationFramesAct is initialized in openRecord_l
- mSessionId = sessionId;
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ } else {
+ mSessionId = sessionId;
+ }
ALOGV("set(): mSessionId %d", mSessionId);
- callingPid = IPCThreadState::self()->getCallingPid();
- myPid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
+ int callingpid = IPCThreadState::self()->getCallingPid();
+ int mypid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingPid != myPid)) {
- mClientPid = callingPid;
+ if (pid == -1 || (callingpid != mypid)) {
+ mClientPid = callingpid;
} else {
mClientPid = pid;
}
@@ -267,7 +263,7 @@
}
// create the IAudioRecord
- status = createRecord_l(0 /*epoch*/, mOpPackageName);
+ status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
if (status != NO_ERROR) {
if (mAudioRecordThread != 0) {
@@ -275,9 +271,10 @@
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
- goto exit;
+ return status;
}
+ mStatus = NO_ERROR;
mUserData = user;
// TODO: add audio hardware input latency here
mLatency = (1000LL * mFrameCount) / mSampleRate;
@@ -292,9 +289,7 @@
mFramesRead = 0;
mFramesReadServerOffset = 0;
-exit:
- mStatus = status;
- return status;
+ return NO_ERROR;
}
// -------------------------------------------------------------------------
@@ -545,29 +540,70 @@
}
// must be called with mLock held
-status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
+status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
- IAudioFlinger::CreateRecordInput input;
- IAudioFlinger::CreateRecordOutput output;
- audio_session_t originalSessionId;
- sp<media::IAudioRecord> record;
- void *iMemPointer;
- audio_track_cblk_t* cblk;
- status_t status;
-
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- status = NO_INIT;
- goto exit;
+ return NO_INIT;
}
+ audio_io_handle_t input;
+
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioRecord is re-created.
+ status_t status;
+
+ // Not a conventional loop, but a retry loop for at most two iterations total.
+ // Try first maybe with FAST flag then try again without FAST flag if that fails.
+ // Exits loop normally via a return at the bottom, or with error via a break.
+ // The sp<> references will be dropped when re-entering scope.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
+ for (;;) {
+ audio_config_base_t config = {
+ .sample_rate = mSampleRate,
+ .channel_mask = mChannelMask,
+ .format = mFormat
+ };
+ mRoutedDeviceId = mSelectedDeviceId;
+ status = AudioSystem::getInputForAttr(&mAttributes, &input,
+ mSessionId,
+ // FIXME compare to AudioTrack
+ mClientPid,
+ mClientUid,
+ &config,
+ mFlags, &mRoutedDeviceId, &mPortId);
+
+ if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
+ "format %#x, channel mask %#x, flags %#x",
+ mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
+ return BAD_VALUE;
+ }
+
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
+#if 0
+ size_t afFrameCount;
+ status = AudioSystem::getFrameCount(input, &afFrameCount);
+ if (status != NO_ERROR) {
+ ALOGE("getFrameCount(input=%d) status %d", input, status);
+ break;
+ }
+#endif
+
+ uint32_t afSampleRate;
+ status = AudioSystem::getSamplingRate(input, &afSampleRate);
+ if (status != NO_ERROR) {
+ ALOGE("getSamplingRate(input=%d) status %d", input, status);
+ break;
+ }
+ if (mSampleRate == 0) {
+ mSampleRate = afSampleRate;
+ }
+
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
@@ -586,41 +622,66 @@
if (!useCaseAllowed) {
ALOGW("AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
convertTransferToText(mTransfer));
+ }
+
+ // sample rates must also match
+ bool sampleRateAllowed = mSampleRate == afSampleRate;
+ if (!sampleRateAllowed) {
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
+ mSampleRate, afSampleRate);
+ }
+
+ bool fastAllowed = useCaseAllowed && sampleRateAllowed;
+ if (!fastAllowed) {
mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
AUDIO_INPUT_FLAG_RAW));
+ AudioSystem::releaseInput(input, mSessionId);
+ continue; // retry
}
}
- input.attr = mAttributes;
- input.config.sample_rate = mSampleRate;
- input.config.channel_mask = mChannelMask;
- input.config.format = mFormat;
- input.clientInfo.clientUid = mClientUid;
- input.clientInfo.clientPid = mClientPid;
- input.clientInfo.clientTid = -1;
- if (mFlags & AUDIO_INPUT_FLAG_FAST) {
- if (mAudioRecordThread != 0) {
- input.clientInfo.clientTid = mAudioRecordThread->getTid();
- }
- }
- input.opPackageName = opPackageName;
-
- input.flags = mFlags;
// The notification frame count is the period between callbacks, as suggested by the client
// but moderated by the server. For record, the calculations are done entirely on server side.
- input.frameCount = mReqFrameCount;
- input.notificationFrameCount = mNotificationFramesReq;
- input.selectedDeviceId = mSelectedDeviceId;
- input.sessionId = mSessionId;
- originalSessionId = mSessionId;
+ size_t notificationFrames = mNotificationFramesReq;
+ size_t frameCount = mReqFrameCount;
- record = audioFlinger->createRecord(input,
- output,
- &status);
+ audio_input_flags_t flags = mFlags;
+
+ pid_t tid = -1;
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ if (mAudioRecordThread != 0) {
+ tid = mAudioRecordThread->getTid();
+ }
+ }
+
+ size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
+ // but we will still need the original value also
+ audio_session_t originalSessionId = mSessionId;
+
+ sp<IMemory> iMem; // for cblk
+ sp<IMemory> bufferMem;
+ sp<media::IAudioRecord> record = audioFlinger->openRecord(input,
+ mSampleRate,
+ mFormat,
+ mChannelMask,
+ opPackageName,
+ &temp,
+ &flags,
+ mClientPid,
+ tid,
+ mClientUid,
+ &mSessionId,
+ ¬ificationFrames,
+ iMem,
+ bufferMem,
+ &status,
+ mPortId);
+ ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
+ "session ID changed from %d to %d", originalSessionId, mSessionId);
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
- goto exit;
+ break;
}
ALOG_ASSERT(record != 0);
@@ -628,41 +689,41 @@
// so we are no longer responsible for releasing it.
mAwaitBoost = false;
- if (output.flags & AUDIO_INPUT_FLAG_FAST) {
- ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
- mReqFrameCount, output.frameCount);
- mAwaitBoost = true;
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ if (flags & AUDIO_INPUT_FLAG_FAST) {
+ ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
+ mAwaitBoost = true;
+ } else {
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
+ mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
+ AUDIO_INPUT_FLAG_RAW));
+ continue; // retry
+ }
}
- mFlags = output.flags;
- mRoutedDeviceId = output.selectedDeviceId;
- mSessionId = output.sessionId;
- mSampleRate = output.sampleRate;
+ mFlags = flags;
- if (output.cblk == 0) {
+ if (iMem == 0) {
ALOGE("Could not get control block");
- status = NO_INIT;
- goto exit;
+ return NO_INIT;
}
- iMemPointer = output.cblk ->pointer();
+ void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
- status = NO_INIT;
- goto exit;
+ return NO_INIT;
}
- cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
+ audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
// Starting address of buffers in shared memory.
// The buffers are either immediately after the control block,
// or in a separate area at discretion of server.
void *buffers;
- if (output.buffers == 0) {
+ if (bufferMem == 0) {
buffers = cblk + 1;
} else {
- buffers = output.buffers->pointer();
+ buffers = bufferMem->pointer();
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
- status = NO_INIT;
- goto exit;
+ return NO_INIT;
}
}
@@ -672,42 +733,43 @@
mDeathNotifier.clear();
}
mAudioRecord = record;
- mCblkMemory = output.cblk;
- mBufferMemory = output.buffers;
+ mCblkMemory = iMem;
+ mBufferMemory = bufferMem;
IPCThreadState::self()->flushCommands();
mCblk = cblk;
- // note that output.frameCount is the (possibly revised) value of mReqFrameCount
- if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
- ALOGW("Requested frameCount %zu but received frameCount %zu",
- mReqFrameCount, output.frameCount);
+ // note that temp is the (possibly revised) value of frameCount
+ if (temp < frameCount || (frameCount == 0 && temp == 0)) {
+ ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
}
+ frameCount = temp;
// Make sure that application is notified with sufficient margin before overrun.
// The computation is done on server side.
- if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
+ if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
- mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
+ mNotificationFramesReq, notificationFrames, frameCount);
}
- mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+ mNotificationFramesAct = (uint32_t) notificationFrames;
+
//mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
- if (mDeviceCallback != 0 && mInput != output.inputId) {
+ if (mDeviceCallback != 0 && mInput != input) {
if (mInput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mInput);
}
- AudioSystem::addAudioDeviceCallback(this, output.inputId);
+ AudioSystem::addAudioDeviceCallback(this, input);
}
// We retain a copy of the I/O handle, but don't own the reference
- mInput = output.inputId;
+ mInput = input;
mRefreshRemaining = true;
- mFrameCount = output.frameCount;
+ mFrameCount = frameCount;
// If IAudioRecord is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
- if (mFrameCount > mReqFrameCount) {
- mReqFrameCount = mFrameCount;
+ if (frameCount > mReqFrameCount) {
+ mReqFrameCount = frameCount;
}
// update proxy
@@ -718,9 +780,17 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
-exit:
- mStatus = status;
- // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
+ return NO_ERROR;
+
+ // End of retry loop.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
+ }
+
+// Arrive here on error, via a break
+ AudioSystem::releaseInput(input, mSessionId);
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
return status;
}
@@ -1152,12 +1222,12 @@
mFlags = mOrigFlags;
- // if the new IAudioRecord is created, createRecord_l() will modify the
+ // if the new IAudioRecord is created, openRecord_l() will modify the
// following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
// It will also delete the strong references on previous IAudioRecord and IMemory
Modulo<uint32_t> position(mProxy->getPosition());
mNewPosition = position + mUpdatePeriod;
- status_t result = createRecord_l(position, mOpPackageName);
+ status_t result = openRecord_l(position, mOpPackageName);
if (result == NO_ERROR) {
if (mActive) {
// callback thread or sync event hasn't changed
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 5db60f3..5cf2bdb 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -30,7 +30,7 @@
enum {
CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
- CREATE_RECORD,
+ OPEN_RECORD,
SAMPLE_RATE,
RESERVED, // obsolete, was CHANNEL_COUNT
FORMAT,
@@ -130,39 +130,102 @@
return track;
}
- virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
- CreateRecordOutput& output,
- status_t *status)
+ virtual sp<media::IAudioRecord> openRecord(
+ audio_io_handle_t input,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& opPackageName,
+ size_t *pFrameCount,
+ audio_input_flags_t *flags,
+ pid_t pid,
+ pid_t tid,
+ int clientUid,
+ audio_session_t *sessionId,
+ size_t *notificationFrames,
+ sp<IMemory>& cblk,
+ sp<IMemory>& buffers,
+ status_t *status,
+ audio_port_handle_t portId)
{
Parcel data, reply;
sp<media::IAudioRecord> record;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-
- if (status == nullptr) {
- return record;
+ data.writeInt32((int32_t) input);
+ data.writeInt32(sampleRate);
+ data.writeInt32(format);
+ data.writeInt32(channelMask);
+ data.writeString16(opPackageName);
+ size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
+ data.writeInt64(frameCount);
+ audio_input_flags_t lFlags = flags != NULL ? *flags : AUDIO_INPUT_FLAG_NONE;
+ data.writeInt32(lFlags);
+ data.writeInt32((int32_t) pid);
+ data.writeInt32((int32_t) tid);
+ data.writeInt32((int32_t) clientUid);
+ audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
+ if (sessionId != NULL) {
+ lSessionId = *sessionId;
}
-
- input.writeToParcel(&data);
-
- status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
+ data.writeInt32(lSessionId);
+ data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
+ data.writeInt32(portId);
+ cblk.clear();
+ buffers.clear();
+ status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
if (lStatus != NO_ERROR) {
- ALOGE("createRecord transaction error %d", lStatus);
- *status = DEAD_OBJECT;
- return record;
+ ALOGE("openRecord error: %s", strerror(-lStatus));
+ } else {
+ frameCount = reply.readInt64();
+ if (pFrameCount != NULL) {
+ *pFrameCount = frameCount;
+ }
+ lFlags = (audio_input_flags_t)reply.readInt32();
+ if (flags != NULL) {
+ *flags = lFlags;
+ }
+ lSessionId = (audio_session_t) reply.readInt32();
+ if (sessionId != NULL) {
+ *sessionId = lSessionId;
+ }
+ size_t lNotificationFrames = (size_t) reply.readInt64();
+ if (notificationFrames != NULL) {
+ *notificationFrames = lNotificationFrames;
+ }
+ lStatus = reply.readInt32();
+ record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
+ cblk = interface_cast<IMemory>(reply.readStrongBinder());
+ if (cblk != 0 && cblk->pointer() == NULL) {
+ cblk.clear();
+ }
+ buffers = interface_cast<IMemory>(reply.readStrongBinder());
+ if (buffers != 0 && buffers->pointer() == NULL) {
+ buffers.clear();
+ }
+ if (lStatus == NO_ERROR) {
+ if (record == 0) {
+ ALOGE("openRecord should have returned an IAudioRecord");
+ lStatus = UNKNOWN_ERROR;
+ } else if (cblk == 0) {
+ ALOGE("openRecord should have returned a cblk");
+ lStatus = NO_MEMORY;
+ }
+ // buffers is permitted to be 0
+ } else {
+ if (record != 0 || cblk != 0 || buffers != 0) {
+ ALOGE("openRecord returned an IAudioRecord, cblk, "
+ "or buffers but with status %d", lStatus);
+ }
+ }
+ if (lStatus != NO_ERROR) {
+ record.clear();
+ cblk.clear();
+ buffers.clear();
+ }
}
- *status = reply.readInt32();
- if (*status != NO_ERROR) {
- ALOGE("createRecord returned error %d", *status);
- return record;
+ if (status != NULL) {
+ *status = lStatus;
}
-
- record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
- if (record == 0) {
- ALOGE("createRecord returned a NULL IAudioRecord with status OK");
- *status = DEAD_OBJECT;
- return record;
- }
- output.readFromParcel(&reply);
return record;
}
@@ -842,7 +905,7 @@
// TODO should select more wisely the items from the list
switch (code) {
case CREATE_TRACK:
- case CREATE_RECORD:
+ case OPEN_RECORD:
case SET_MASTER_VOLUME:
case SET_MASTER_MUTE:
case SET_STREAM_VOLUME:
@@ -885,29 +948,37 @@
output.writeToParcel(reply);
return NO_ERROR;
} break;
- case CREATE_RECORD: {
+ case OPEN_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
-
- CreateRecordInput input;
- if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
- reply->writeInt32(DEAD_OBJECT);
- return NO_ERROR;
- }
-
- status_t status;
- CreateRecordOutput output;
-
- sp<media::IAudioRecord> record = createRecord(input,
- output,
- &status);
-
+ audio_io_handle_t input = (audio_io_handle_t) data.readInt32();
+ uint32_t sampleRate = data.readInt32();
+ audio_format_t format = (audio_format_t) data.readInt32();
+ audio_channel_mask_t channelMask = data.readInt32();
+ const String16& opPackageName = data.readString16();
+ size_t frameCount = data.readInt64();
+ audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
+ pid_t pid = (pid_t) data.readInt32();
+ pid_t tid = (pid_t) data.readInt32();
+ int clientUid = data.readInt32();
+ audio_session_t sessionId = (audio_session_t) data.readInt32();
+ size_t notificationFrames = data.readInt64();
+ audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
+ sp<IMemory> cblk;
+ sp<IMemory> buffers;
+ status_t status = NO_ERROR;
+ sp<media::IAudioRecord> record = openRecord(input,
+ sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
+ pid, tid, clientUid, &sessionId, ¬ificationFrames, cblk, buffers,
+ &status, portId);
LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
+ reply->writeInt64(frameCount);
+ reply->writeInt32(flags);
+ reply->writeInt32(sessionId);
+ reply->writeInt64(notificationFrames);
reply->writeInt32(status);
- if (status != NO_ERROR) {
- return NO_ERROR;
- }
reply->writeStrongBinder(IInterface::asBinder(record));
- output.writeToParcel(reply);
+ reply->writeStrongBinder(IInterface::asBinder(cblk));
+ reply->writeStrongBinder(IInterface::asBinder(buffers));
return NO_ERROR;
} break;
case SAMPLE_RATE: {
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index 7572671..50ce78f 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -16,7 +16,6 @@
package android.media;
-/* Native code must specify namespace media (media::IAudioRecord) when referring to this class */
interface IAudioRecord {
/* After it's created the track is not active. Call start() to
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 247af9e..108e326 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -19,13 +19,12 @@
#define ANDROID_AUDIO_CLIENT_H
#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include <system/audio.h>
#include <utils/String16.h>
namespace android {
-class AudioClient : public Parcelable {
+class AudioClient {
public:
AudioClient() :
clientUid(-1), clientPid(-1), clientTid(-1), packageName("") {}
@@ -35,7 +34,7 @@
pid_t clientTid;
String16 packageName;
- status_t readFromParcel(const Parcel *parcel) override {
+ status_t readFromParcel(Parcel *parcel) {
clientUid = parcel->readInt32();
clientPid = parcel->readInt32();
clientTid = parcel->readInt32();
@@ -43,7 +42,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const override {
+ status_t writeToParcel(Parcel *parcel) const {
parcel->writeInt32(clientUid);
parcel->writeInt32(clientPid);
parcel->writeInt32(clientTid);
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 00c2a88..51596a2 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -570,7 +570,7 @@
// caller must hold lock on mLock for all _l methods
- status_t createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
+ status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
// FIXME enum is faster than strcmp() for parameter 'from'
status_t restoreRecord_l(const char *from);
@@ -682,6 +682,7 @@
// May not match the app selection depending on other
// activity and connected devices
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ audio_port_handle_t mPortId; // unique ID allocated by audio policy
};
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 24a6e22..66601da 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -231,7 +231,7 @@
audio_stream_type_t stream,
audio_session_t session);
- // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
+ // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
// or release it with releaseInput().
static status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 57d9778..9061c26 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -25,7 +25,6 @@
#include <utils/Errors.h>
#include <binder/IInterface.h>
#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include <media/AudioClient.h>
#include <media/IAudioTrack.h>
#include <media/IAudioFlingerClient.h>
@@ -51,9 +50,9 @@
* when calling createTrack() including arguments that will be updated by AudioFlinger
* and returned in CreateTrackOutput object
*/
- class CreateTrackInput : public Parcelable {
+ class CreateTrackInput {
public:
- status_t readFromParcel(const Parcel *parcel) override {
+ status_t readFromParcel(Parcel *parcel) {
/* input arguments*/
memset(&attr, 0, sizeof(audio_attributes_t));
if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
@@ -64,9 +63,7 @@
if (parcel->read(&config, sizeof(audio_config_t)) != NO_ERROR) {
return DEAD_OBJECT;
}
- if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
- return DEAD_OBJECT;
- }
+ (void)clientInfo.readFromParcel(parcel);
if (parcel->readInt32() != 0) {
sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
@@ -85,7 +82,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const override {
+ status_t writeToParcel(Parcel *parcel) const {
/* input arguments*/
(void)parcel->write(&attr, sizeof(audio_attributes_t));
(void)parcel->write(&config, sizeof(audio_config_t));
@@ -128,9 +125,9 @@
* when calling createTrack() including arguments that were passed as I/O for update by
* CreateTrackInput.
*/
- class CreateTrackOutput : public Parcelable {
+ class CreateTrackOutput {
public:
- status_t readFromParcel(const Parcel *parcel) override {
+ status_t readFromParcel(Parcel *parcel) {
/* input/output arguments*/
(void)parcel->read(&flags, sizeof(audio_output_flags_t));
frameCount = parcel->readInt64();
@@ -147,7 +144,7 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const override {
+ status_t writeToParcel(Parcel *parcel) const {
/* input/output arguments*/
(void)parcel->write(&flags, sizeof(audio_output_flags_t));
(void)parcel->writeInt64(frameCount);
@@ -179,140 +176,6 @@
audio_io_handle_t outputId;
};
- /* CreateRecordInput contains all input arguments sent by AudioRecord to AudioFlinger
- * when calling createRecord() including arguments that will be updated by AudioFlinger
- * and returned in CreateRecordOutput object
- */
- class CreateRecordInput : public Parcelable {
- public:
- status_t readFromParcel(const Parcel *parcel) override {
- /* input arguments*/
- memset(&attr, 0, sizeof(audio_attributes_t));
- if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
- return DEAD_OBJECT;
- }
- attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
- memset(&config, 0, sizeof(audio_config_base_t));
- if (parcel->read(&config, sizeof(audio_config_base_t)) != NO_ERROR) {
- return DEAD_OBJECT;
- }
- if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
- return DEAD_OBJECT;
- }
- opPackageName = parcel->readString16();
-
- /* input/output arguments*/
- (void)parcel->read(&flags, sizeof(audio_input_flags_t));
- frameCount = parcel->readInt64();
- notificationFrameCount = parcel->readInt64();
- (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
- (void)parcel->read(&sessionId, sizeof(audio_session_t));
- return NO_ERROR;
- }
-
- status_t writeToParcel(Parcel *parcel) const override {
- /* input arguments*/
- (void)parcel->write(&attr, sizeof(audio_attributes_t));
- (void)parcel->write(&config, sizeof(audio_config_base_t));
- (void)clientInfo.writeToParcel(parcel);
- (void)parcel->writeString16(opPackageName);
-
- /* input/output arguments*/
- (void)parcel->write(&flags, sizeof(audio_input_flags_t));
- (void)parcel->writeInt64(frameCount);
- (void)parcel->writeInt64(notificationFrameCount);
- (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
- (void)parcel->write(&sessionId, sizeof(audio_session_t));
- return NO_ERROR;
- }
-
- /* input */
- audio_attributes_t attr;
- audio_config_base_t config;
- AudioClient clientInfo;
- String16 opPackageName;
-
- /* input/output */
- audio_input_flags_t flags;
- size_t frameCount;
- size_t notificationFrameCount;
- audio_port_handle_t selectedDeviceId;
- audio_session_t sessionId;
- };
-
- /* CreateRecordOutput contains all output arguments returned by AudioFlinger to AudioRecord
- * when calling createRecord() including arguments that were passed as I/O for update by
- * CreateRecordInput.
- */
- class CreateRecordOutput : public Parcelable {
- public:
- status_t readFromParcel(const Parcel *parcel) override {
- /* input/output arguments*/
- (void)parcel->read(&flags, sizeof(audio_input_flags_t));
- frameCount = parcel->readInt64();
- notificationFrameCount = parcel->readInt64();
- (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
- (void)parcel->read(&sessionId, sizeof(audio_session_t));
-
- /* output arguments*/
- sampleRate = parcel->readUint32();
- (void)parcel->read(&inputId, sizeof(audio_io_handle_t));
- if (parcel->readInt32() != 0) {
- cblk = interface_cast<IMemory>(parcel->readStrongBinder());
- if (cblk == 0 || cblk->pointer() == NULL) {
- return BAD_VALUE;
- }
- }
- if (parcel->readInt32() != 0) {
- buffers = interface_cast<IMemory>(parcel->readStrongBinder());
- if (buffers == 0 || buffers->pointer() == NULL) {
- return BAD_VALUE;
- }
- }
- return NO_ERROR;
- }
-
- status_t writeToParcel(Parcel *parcel) const override {
- /* input/output arguments*/
- (void)parcel->write(&flags, sizeof(audio_input_flags_t));
- (void)parcel->writeInt64(frameCount);
- (void)parcel->writeInt64(notificationFrameCount);
- (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
- (void)parcel->write(&sessionId, sizeof(audio_session_t));
-
- /* output arguments*/
- (void)parcel->writeUint32(sampleRate);
- (void)parcel->write(&inputId, sizeof(audio_io_handle_t));
- if (cblk != 0) {
- (void)parcel->writeInt32(1);
- (void)parcel->writeStrongBinder(IInterface::asBinder(cblk));
- } else {
- (void)parcel->writeInt32(0);
- }
- if (buffers != 0) {
- (void)parcel->writeInt32(1);
- (void)parcel->writeStrongBinder(IInterface::asBinder(buffers));
- } else {
- (void)parcel->writeInt32(0);
- }
-
- return NO_ERROR;
- }
-
- /* input/output */
- audio_input_flags_t flags;
- size_t frameCount;
- size_t notificationFrameCount;
- audio_port_handle_t selectedDeviceId;
- audio_session_t sessionId;
-
- /* output */
- uint32_t sampleRate;
- audio_io_handle_t inputId;
- sp<IMemory> cblk;
- sp<IMemory> buffers;
- };
-
// invariant on exit for all APIs that return an sp<>:
// (return value != 0) == (*status == NO_ERROR)
@@ -323,9 +186,26 @@
CreateTrackOutput& output,
status_t *status) = 0;
- virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
- CreateRecordOutput& output,
- status_t *status) = 0;
+ virtual sp<media::IAudioRecord> openRecord(
+ // On successful return, AudioFlinger takes over the handle
+ // reference and will release it when the track is destroyed.
+ // However on failure, the client is responsible for release.
+ audio_io_handle_t input,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& callingPackage,
+ size_t *pFrameCount,
+ audio_input_flags_t *flags,
+ pid_t pid,
+ pid_t tid, // -1 means unused, otherwise must be valid non-0
+ int clientUid,
+ audio_session_t *sessionId,
+ size_t *notificationFrames,
+ sp<IMemory>& cblk,
+ sp<IMemory>& buffers, // return value 0 means it follows cblk
+ status_t *status,
+ audio_port_handle_t portId) = 0;
// FIXME Surprisingly, format/latency don't work for input handles
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
index c290aec..7b0f341 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
@@ -430,7 +430,15 @@
}
- if(bChange){
+ // During operating mode transition, there is a race condition where the mode
+ // is still LVEQNB_ON, but the effect is considered disabled in the upper layers.
+ // modeChange handles this special race condition.
+ const int /* bool */ modeChange = pParams->OperatingMode != OperatingModeSave
+ || (OperatingModeSave == LVEQNB_ON
+ && pInstance->bInOperatingModeTransition
+ && LVC_Mixer_GetTarget(&pInstance->BypassMixer.MixerStream[0]) == 0);
+
+ if (bChange || modeChange) {
/*
* If the sample rate has changed clear the history
@@ -462,8 +470,7 @@
LVEQNB_SetCoefficients(pInstance); /* Instance pointer */
}
- if(pParams->OperatingMode != OperatingModeSave)
- {
+ if (modeChange) {
if(pParams->OperatingMode == LVEQNB_ON)
{
#ifdef BUILD_FLOAT
@@ -479,6 +486,8 @@
else
{
/* Stay on the ON operating mode until the transition is done */
+ // This may introduce a state race condition if the effect is enabled again
+ // while in transition. This is fixed in the modeChange logic.
pInstance->Params.OperatingMode = LVEQNB_ON;
#ifdef BUILD_FLOAT
LVC_Mixer_SetTarget(&pInstance->BypassMixer.MixerStream[0], 0.0f);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 146e9e8..8ebae11 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -3330,14 +3330,19 @@
//ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
- // 2 is for stereo input
+
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- for (size_t i=0; i < outBuffer->frameCount*2; i++){
- outBuffer->s16[i] =
- clamp16((LVM_INT32)outBuffer->s16[i] + (LVM_INT32)inBuffer->s16[i]);
+ for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+#ifdef NATIVE_FLOAT_BUFFER
+ outBuffer->f32[i] += inBuffer->f32[i];
+#else
+ outBuffer->s16[i] = clamp16((LVM_INT32)outBuffer->s16[i] + inBuffer->s16[i]);
+#endif
}
} else if (outBuffer->raw != inBuffer->raw) {
- memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount*sizeof(LVM_INT16)*2);
+ memcpy(outBuffer->raw,
+ inBuffer->raw,
+ outBuffer->frameCount * sizeof(effect_buffer_t) * FCC_2);
}
}
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 0e82339..c33f9f5 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -594,7 +594,7 @@
deltaSmpl = CAPTURE_BUF_SIZE;
}
- int32_t capturePoint = pContext->mCaptureIdx - deltaSmpl;
+ int32_t capturePoint = (int32_t)pContext->mCaptureIdx - deltaSmpl;
// a negative capturePoint means we wrap the buffer.
if (capturePoint < 0) {
uint32_t size = -capturePoint;
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index 062a07a..74d8ee8 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -34,7 +34,7 @@
: BpInterface<IMediaHTTPService>(impl) {
}
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() {
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
Parcel data, reply;
data.writeInterfaceToken(
IMediaHTTPService::getInterfaceDescriptor());
diff --git a/media/libmedia/include/media/IMediaHTTPConnection.h b/media/libmedia/include/media/IMediaHTTPConnection.h
index 2a63eb7..0fb6bb1 100644
--- a/media/libmedia/include/media/IMediaHTTPConnection.h
+++ b/media/libmedia/include/media/IMediaHTTPConnection.h
@@ -19,16 +19,15 @@
#define I_MEDIA_HTTP_CONNECTION_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPConnection.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/KeyedVector.h>
namespace android {
-struct IMediaHTTPConnection;
-
/** MUST stay in sync with IMediaHTTPConnection.aidl */
-struct IMediaHTTPConnection : public IInterface {
+struct IMediaHTTPConnection : public MediaHTTPConnection, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPConnection);
virtual bool connect(
diff --git a/media/libmedia/include/media/IMediaHTTPService.h b/media/libmedia/include/media/IMediaHTTPService.h
index f66d6c8..e948b78 100644
--- a/media/libmedia/include/media/IMediaHTTPService.h
+++ b/media/libmedia/include/media/IMediaHTTPService.h
@@ -19,18 +19,19 @@
#define I_MEDIA_HTTP_SERVICE_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABase.h>
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
/** MUST stay in sync with IMediaHTTPService.aidl */
-struct IMediaHTTPService : public IInterface {
+struct IMediaHTTPService : public MediaHTTPService, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPService);
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() = 0;
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
private:
DISALLOW_EVIL_CONSTRUCTORS(IMediaHTTPService);
diff --git a/media/libmedia/include/media/MediaHTTPConnection.h b/media/libmedia/include/media/MediaHTTPConnection.h
new file mode 100644
index 0000000..82a79e5
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPConnection.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_CONNECTION_H_
+
+#define MEDIA_HTTP_CONNECTION_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+struct MediaHTTPConnection : public virtual RefBase {
+ MediaHTTPConnection() {}
+
+ virtual bool connect(
+ const char *uri, const KeyedVector<String8, String8> *headers) = 0;
+
+ virtual void disconnect() = 0;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+ virtual off64_t getSize() = 0;
+ virtual status_t getMIMEType(String8 *mimeType) = 0;
+ virtual status_t getUri(String8 *uri) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPConnection);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_CONNECTION_H_
diff --git a/media/libmedia/include/media/MediaHTTPService.h b/media/libmedia/include/media/MediaHTTPService.h
new file mode 100644
index 0000000..6e9f125
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPService.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_SERVICE_H_
+
+#define MEDIA_HTTP_SERVICE_H_
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct MediaHTTPConnection;
+
+struct MediaHTTPService : public virtual RefBase {
+ MediaHTTPService() {}
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPService);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_SERVICE_H_
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index fc9e53c..116b548 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -49,7 +49,7 @@
int index, int colorFormat, bool metaOnly) = 0;
virtual status_t getFrameAtIndex(
std::vector<VideoFrame*>* frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libstagefright/DataSourceFactory.cpp b/media/libstagefright/DataSourceFactory.cpp
index aee858c..54bf0cc 100644
--- a/media/libstagefright/DataSourceFactory.cpp
+++ b/media/libstagefright/DataSourceFactory.cpp
@@ -19,8 +19,8 @@
#include "include/HTTPBase.h"
#include "include/NuCachedSource2.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/DataURISource.h>
#include <media/stagefright/FileSource.h>
@@ -31,7 +31,7 @@
// static
sp<DataSource> DataSourceFactory::CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers,
String8 *contentType,
@@ -50,7 +50,7 @@
}
if (httpSource == NULL) {
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
if (conn == NULL) {
ALOGE("Failed to make http connection from http service!");
return NULL;
@@ -101,12 +101,12 @@
return source->initCheck() != OK ? nullptr : source;
}
-sp<DataSource> DataSourceFactory::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) {
+sp<DataSource> DataSourceFactory::CreateMediaHTTP(const sp<MediaHTTPService> &httpService) {
if (httpService == NULL) {
return NULL;
}
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
if (conn == NULL) {
return NULL;
} else {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1fe5f60..8db00f0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2951,212 +2951,215 @@
mGotStartKeyFrame = true;
}
////////////////////////////////////////////////////////////////////////////////
- if (mStszTableEntries->count() == 0) {
- mFirstSampleTimeRealUs = systemTime() / 1000;
- mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
- previousPausedDurationUs = mStartTimestampUs;
- }
- if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
- mResumed = false;
- }
- TimestampDebugHelperEntry timestampDebugEntry;
- timestampUs -= previousPausedDurationUs;
- timestampDebugEntry.pts = timestampUs;
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- if (mIsVideo) {
- /*
- * Composition time: timestampUs
- * Decoding time: decodingTimeUs
- * Composition time offset = composition time - decoding time
- */
- int64_t decodingTimeUs;
- CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
- decodingTimeUs -= previousPausedDurationUs;
-
- // ensure non-negative, monotonic decoding time
- if (mLastDecodingTimeUs < 0) {
- decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
- } else {
- // increase decoding time by at least the larger vaule of 1 tick and
- // 0.1 milliseconds. This needs to take into account the possible
- // delta adjustment in DurationTicks in below.
- decodingTimeUs = std::max(mLastDecodingTimeUs +
- std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
- }
-
- mLastDecodingTimeUs = decodingTimeUs;
- timestampDebugEntry.dts = decodingTimeUs;
- timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
- // Insert the timestamp into the mTimestampDebugHelper
- if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
- mTimestampDebugHelper.pop_front();
- }
- mTimestampDebugHelper.push_back(timestampDebugEntry);
-
- cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- timestampUs = decodingTimeUs;
- ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
- timestampUs, cttsOffsetTimeUs);
-
- // Update ctts box table if necessary
- currCttsOffsetTimeTicks =
- (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
+ if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
- // Force the first ctts table entry to have one single entry
- // so that we can do adjustment for the initial track start
- // time offset easily in writeCttsBox().
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
- cttsSampleCount = 0; // No sample in ctts box is pending
- } else {
- if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- cttsSampleCount = 1; // One sample in ctts box is pending
+ mFirstSampleTimeRealUs = systemTime() / 1000;
+ mStartTimestampUs = timestampUs;
+ mOwner->setStartTimestampUs(mStartTimestampUs);
+ previousPausedDurationUs = mStartTimestampUs;
+ }
+
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ TimestampDebugHelperEntry timestampDebugEntry;
+ timestampUs -= previousPausedDurationUs;
+ timestampDebugEntry.pts = timestampUs;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mIsVideo) {
+ /*
+ * Composition time: timestampUs
+ * Decoding time: decodingTimeUs
+ * Composition time offset = composition time - decoding time
+ */
+ int64_t decodingTimeUs;
+ CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+ decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- ++cttsSampleCount;
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
- }
- // Update ctts time offset range
- if (mStszTableEntries->count() == 0) {
- mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else {
- if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mLastDecodingTimeUs = decodingTimeUs;
+ timestampDebugEntry.dts = decodingTimeUs;
+ timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ timestampUs = decodingTimeUs;
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mStszTableEntries->count() == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mStszTableEntries->count() == 0) {
mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+ mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ }
}
}
- }
- if (mOwner->isRealTimeRecording()) {
- if (mIsAudio) {
- updateDriftTime(meta_data);
- }
- }
-
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
- trackName, timestampUs, previousPausedDurationUs);
- if (timestampUs > mTrackDurationUs) {
- mTrackDurationUs = timestampUs;
- }
-
- // We need to use the time scale based ticks, rather than the
- // timestamp itself to determine whether we have to use a new
- // stts entry, since we may have rounding errors.
- // The calculation is intended to reduce the accumulated
- // rounding errors.
- currDurationTicks =
- ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
- (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
- if (currDurationTicks < 0ll) {
- ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
- (long long)timestampUs, (long long)lastTimestampUs, trackName);
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- // if the duration is different for this sample, see if it is close enough to the previous
- // duration that we can fudge it and use the same value, to avoid filling the stts table
- // with lots of near-identical entries.
- // "close enough" here means that the current duration needs to be adjusted by less
- // than 0.1 milliseconds
- if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
- int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
- + (mTimeScale / 2)) / mTimeScale;
- if (deltaUs > -100 && deltaUs < 100) {
- // use previous ticks, and adjust timestamp as if it was actually that number
- // of ticks
- currDurationTicks = lastDurationTicks;
- timestampUs += deltaUs;
- }
- }
- mStszTableEntries->add(htonl(sampleSize));
- if (mStszTableEntries->count() > 2) {
-
- // Force the first sample to have its own stts entry so that
- // we can adjust its value later to maintain the A/V sync.
- if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- sampleCount = 1;
- } else {
- ++sampleCount;
+ if (mOwner->isRealTimeRecording()) {
+ if (mIsAudio) {
+ updateDriftTime(meta_data);
+ }
}
- }
- if (mSamplesHaveSameSize) {
- if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
- mSamplesHaveSameSize = false;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
- previousSampleSize = sampleSize;
- }
- ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
- trackName, timestampUs, lastTimestampUs);
- lastDurationUs = timestampUs - lastTimestampUs;
- lastDurationTicks = currDurationTicks;
- lastTimestampUs = timestampUs;
- if (isSync != 0) {
- addOneStssTableEntry(mStszTableEntries->count());
- }
-
- if (mTrackingProgressStatus) {
- if (mPreviousTrackTimeUs <= 0) {
- mPreviousTrackTimeUs = mStartTimestampUs;
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
}
- trackProgressStatus(timestampUs);
+
+ // We need to use the time scale based ticks, rather than the
+ // timestamp itself to determine whether we have to use a new
+ // stts entry, since we may have rounding errors.
+ // The calculation is intended to reduce the accumulated
+ // rounding errors.
+ currDurationTicks =
+ ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+ (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ if (currDurationTicks < 0ll) {
+ ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+ (long long)timestampUs, (long long)lastTimestampUs, trackName);
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+ mStszTableEntries->add(htonl(sampleSize));
+ if (mStszTableEntries->count() > 2) {
+
+ // Force the first sample to have its own stts entry so that
+ // we can adjust its value later to maintain the A/V sync.
+ if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ sampleCount = 1;
+ } else {
+ ++sampleCount;
+ }
+
+ }
+ if (mSamplesHaveSameSize) {
+ if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+ mSamplesHaveSameSize = false;
+ }
+ previousSampleSize = sampleSize;
+ }
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastDurationTicks = currDurationTicks;
+ lastTimestampUs = timestampUs;
+
+ if (isSync != 0) {
+ addOneStssTableEntry(mStszTableEntries->count());
+ }
+
+ if (mTrackingProgressStatus) {
+ if (mPreviousTrackTimeUs <= 0) {
+ mPreviousTrackTimeUs = mStartTimestampUs;
+ }
+ trackProgressStatus(timestampUs);
+ }
}
if (!hasMultipleTracks) {
size_t bytesWritten;
@@ -4331,9 +4334,12 @@
}
// patch up the mPrimaryItemId and count items with prop associations
+ uint16_t firstVisibleItemId = 0;
for (size_t index = 0; index < mItems.size(); index++) {
if (mItems[index].isPrimary) {
mPrimaryItemId = mItems[index].itemId;
+ } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+ firstVisibleItemId = mItems[index].itemId;
}
if (!mItems[index].properties.empty()) {
@@ -4342,8 +4348,13 @@
}
if (mPrimaryItemId == 0) {
- ALOGW("didn't find primary, using first item");
- mPrimaryItemId = mItems[0].itemId;
+ if (firstVisibleItemId > 0) {
+ ALOGW("didn't find primary, using first visible item");
+ mPrimaryItemId = firstVisibleItemId;
+ } else {
+ ALOGW("no primary and no visible item, using first item");
+ mPrimaryItemId = mItems[0].itemId;
+ }
}
beginBox("meta");
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index a176382..17c9648 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -72,7 +72,7 @@
}
status_t NuMediaExtractor::setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 358c743..32fdbd3 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1170,6 +1170,12 @@
ps_inp_raw_buf->e_color_fmt = mIvVideoColorFormat;
source = NULL;
if ((inputBufferHeader != NULL) && inputBufferHeader->nFilledLen) {
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ return error;
+ }
source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
if (mInputDataIsMeta) {
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 7b90a01..f6a7b0e 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -434,6 +434,14 @@
}
if (inHeader->nFilledLen > 0) {
+ OMX_ERRORTYPE error = validateInputBuffer(inHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ mSignalledError = true;
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *inputData = NULL;
if (mInputDataIsMeta) {
inputData =
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index a5666da..f6257b1 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -653,6 +653,13 @@
return;
}
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/27569635");
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
@@ -668,14 +675,6 @@
return;
}
} else {
- if (inputBufferHeader->nFilledLen < frameSize) {
- android_errorWriteLog(0x534e4554, "27569635");
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- } else if (inputBufferHeader->nFilledLen > frameSize) {
- ALOGW("Input buffer contains too many pixels");
- }
-
if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
ConvertYUV420SemiPlanarToYUV420Planar(
source, mConversionBuffer, mWidth, mHeight);
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 5b18814..84837e8 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -25,11 +25,11 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/Utils.h>
-#include <media/IMediaHTTPConnection.h>
+#include <media/MediaHTTPConnection.h>
namespace android {
-MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
+MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
: mInitCheck((conn != NULL) ? OK : NO_INIT),
mHTTPConnection(conn),
mCachedSizeValid(false),
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 3fef764..72604e3 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -22,8 +22,8 @@
#include "M3UParser.h"
#include <media/DataSource.h>
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaHTTP.h>
@@ -36,7 +36,7 @@
namespace android {
HTTPDownloader::HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers) :
mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
mExtraHeaders(headers),
diff --git a/media/libstagefright/httplive/HTTPDownloader.h b/media/libstagefright/httplive/HTTPDownloader.h
index 1db4a48..0d4bd31 100644
--- a/media/libstagefright/httplive/HTTPDownloader.h
+++ b/media/libstagefright/httplive/HTTPDownloader.h
@@ -28,12 +28,12 @@
struct ABuffer;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct M3UParser;
struct HTTPDownloader : public RefBase {
HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers);
void reconnect();
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 4c2e0d4..1e2e684 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -26,7 +26,7 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include <cutils/properties.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -274,7 +274,7 @@
LiveSession::LiveSession(
const sp<AMessage> ¬ify, uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mHTTPService(httpService),
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index abf8cf0..7a6d487 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -33,7 +33,7 @@
struct AnotherPacketSource;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct LiveDataSource;
struct M3UParser;
struct PlaylistFetcher;
@@ -71,7 +71,7 @@
LiveSession(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void setBufferingSettings(const BufferingSettings &buffering);
@@ -187,7 +187,7 @@
sp<AMessage> mNotify;
uint32_t mFlags;
- sp<IMediaHTTPService> mHTTPService;
+ sp<MediaHTTPService> mHTTPService;
bool mBuffering;
bool mInPreparationPhase;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71d625f..bc3e57c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -898,6 +898,9 @@
}
}
+ if (meta->get() == NULL) {
+ return ERROR_MALFORMED;
+ }
return OK;
}
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h
index 2c4f543..b901c97 100644
--- a/media/libstagefright/include/SDPLoader.h
+++ b/media/libstagefright/include/SDPLoader.h
@@ -25,7 +25,7 @@
namespace android {
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct SDPLoader : public AHandler {
enum Flags {
@@ -38,7 +38,7 @@
SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void load(const char* url, const KeyedVector<String8, String8> *headers);
diff --git a/media/libstagefright/include/media/stagefright/DataSourceFactory.h b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
index 89add13..2a1d491 100644
--- a/media/libstagefright/include/media/stagefright/DataSourceFactory.h
+++ b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
@@ -23,20 +23,20 @@
namespace android {
-struct IMediaHTTPService;
+struct MediaHTTPService;
class String8;
struct HTTPBase;
class DataSourceFactory {
public:
static sp<DataSource> CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers = NULL,
String8 *contentType = NULL,
HTTPBase *httpSource = NULL);
- static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
+ static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libstagefright/include/media/stagefright/MediaHTTP.h
index 006d8d8..94a2ecd 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libstagefright/include/media/stagefright/MediaHTTP.h
@@ -24,10 +24,10 @@
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
struct MediaHTTP : public HTTPBase {
- MediaHTTP(const sp<IMediaHTTPConnection> &conn);
+ MediaHTTP(const sp<MediaHTTPConnection> &conn);
virtual status_t connect(
const char *uri,
@@ -56,7 +56,7 @@
private:
status_t mInitCheck;
- sp<IMediaHTTPConnection> mHTTPConnection;
+ sp<MediaHTTPConnection> mHTTPConnection;
KeyedVector<String8, String8> mLastHeaders;
AString mLastURI;
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 5af0745..eed0f05 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -34,7 +34,7 @@
struct ABuffer;
struct AMessage;
class DataSource;
-struct IMediaHTTPService;
+struct MediaHTTPService;
class MediaBuffer;
class MediaExtractor;
struct MediaSource;
@@ -54,7 +54,7 @@
NuMediaExtractor();
status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers = NULL);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a70005e..f331dbb 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -46,6 +46,36 @@
namespace android {
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+// (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+// incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+// of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
/**
* A copiable object managing a buffer in the buffer cache managed by the producer. This object
* holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
@@ -732,14 +762,16 @@
mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = std::llround(
- (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
- if (nFrames <= 0) {
+ double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+ if (nFrames < 0.5 - kTimestampFluctuation) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
return false;
}
- mFrameCount += nFrames;
+ if (nFrames <= 1.0) {
+ nFrames = 1.0;
+ }
+ mFrameCount += std::llround(nFrames);
mPrevCaptureUs = mBaseCaptureUs + std::llround(
mFrameCount * 1000000 / mCaptureFps);
mPrevFrameUs = mBaseFrameUs + std::llround(
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index fa15ab3..2fbbb44 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -664,4 +664,17 @@
return SimpleSoftOMXComponent::getExtensionIndex(name, index);
}
+OMX_ERRORTYPE SoftVideoEncoderOMXComponent::validateInputBuffer(
+ const OMX_BUFFERHEADERTYPE *inputBufferHeader) {
+ size_t frameSize = mInputDataIsMeta ?
+ max(sizeof(VideoNativeMetadata), sizeof(VideoGrallocMetadata))
+ : mWidth * mHeight * 3 / 2;
+ if (inputBufferHeader->nFilledLen < frameSize) {
+ return OMX_ErrorUndefined;
+ } else if (inputBufferHeader->nFilledLen > frameSize) {
+ ALOGW("Input buffer contains more data than expected.");
+ }
+ return OMX_ErrorNone;
+}
+
} // namespace android
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
index db5496a..2d6f31b 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
@@ -67,6 +67,8 @@
virtual OMX_ERRORTYPE getExtensionIndex(const char *name, OMX_INDEXTYPE *index);
+ OMX_ERRORTYPE validateInputBuffer(const OMX_BUFFERHEADERTYPE *inputBufferHeader);
+
enum {
kInputPortIndex = 0,
kOutputPortIndex = 1,
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 0f46c83..d459cbd 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -22,8 +22,8 @@
#include "ASessionDescription.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -36,7 +36,7 @@
SDPLoader::SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mNetLooper(new ALooper),
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 2395f4f..f3f9720 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -45,6 +45,8 @@
MtpObjectFormat format,
bool succeeded) = 0;
+ virtual void doScanDirectory(const char* path) = 0;
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent) = 0;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6080868..bb0414d 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1148,6 +1148,7 @@
ALOGV("Copying file from %s to %s", (const char*)fromPath, (const char*)path);
if (format == MTP_FORMAT_ASSOCIATION) {
int ret = makeFolder((const char *)path);
+ ret += copyRecursive(fromPath, path);
if (ret) {
result = MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1158,6 +1159,8 @@
}
mDatabase->endSendObject(path, handle, format, result);
+ if (format == MTP_FORMAT_ASSOCIATION)
+ mDatabase->doScanDirectory(path);
mResponse.setParameter(1, handle);
return result;
}
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 11dedbb..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
enum {
kWhatActivityNotify,
+ kWhatAsyncNotify,
kWhatRequestActivityNotifications,
kWhatStopActivityNotifications,
};
@@ -88,6 +89,11 @@
bool mRequestedActivityNotification;
OnCodecEvent mCallback;
void *mCallbackUserData;
+
+ sp<AMessage> mAsyncNotify;
+ mutable Mutex mAsyncCallbackLock;
+ AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+ void *mAsyncCallbackUserData;
};
CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
break;
}
+ case kWhatAsyncNotify:
+ {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatAsyncNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncInputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ AMediaCodecBufferInfo bufferInfo = {
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncOutputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index,
+ &bufferInfo);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ mCodec->mAsyncCallback.onAsyncFormatChanged(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ aMediaFormat);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_ERROR:
+ {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ mCodec->mAsyncCallback.onAsyncError(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ translate_error(err),
+ actionCode,
+ detail.c_str());
+ }
+
+ break;
+ }
+
+ default:
+ {
+ ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
case kWhatStopActivityNotifications:
{
sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
- PRIORITY_FOREGROUND);
+ PRIORITY_AUDIO);
if (res != OK) {
ALOGE("Failed to start the looper");
AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
mData->mRequestedActivityNotification = false;
mData->mCallback = NULL;
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = NULL;
+
return mData;
}
@@ -222,6 +372,32 @@
}
EXPORT
+media_status_t AMediaCodec_getName(
+ AMediaCodec *mData,
+ char** out_name) {
+ if (out_name == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AString compName;
+ status_t err = mData->mCodec->getName(&compName);
+ if (err != OK) {
+ return translate_error(err);
+ }
+ *out_name = strdup(compName.c_str());
+ return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+ AMediaCodec * /* mData */,
+ char* name) {
+ if (name != NULL) {
+ free(name);
+ }
+}
+
+EXPORT
media_status_t AMediaCodec_configure(
AMediaCodec *mData,
const AMediaFormat* format,
@@ -236,8 +412,40 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface,
- crypto ? crypto->mCrypto : NULL, flags));
+ status_t err = mData->mCodec->configure(nativeFormat, surface,
+ crypto ? crypto->mCrypto : NULL, flags);
+ if (err != OK) {
+ ALOGE("configure: err(%d), failed with format: %s",
+ err, nativeFormat->debugString(0).c_str());
+ }
+ return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec *mData,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata) {
+ if (mData->mAsyncNotify == NULL && userdata != NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+ }
+
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
+
+ return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+ return translate_error(mData->mCodec->releaseCrypto());
}
EXPORT
@@ -282,6 +490,19 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -304,6 +525,19 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -367,6 +601,13 @@
}
EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+ sp<AMessage> format;
+ mData->mCodec->getInputFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
sp<AMessage> format;
mData->mCodec->getOutputFormat(index, &format);
@@ -542,6 +783,16 @@
return translate_error(err);
}
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
EXPORT
void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..a9025c0 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -125,6 +125,14 @@
ret.appendFormat("double(%f)", val);
break;
}
+ case AMessage::kTypeRect:
+ {
+ int32_t left, top, right, bottom;
+ f->findRect(name, &left, &top, &right, &bottom);
+ ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+ left, top, right, bottom);
+ break;
+ }
case AMessage::kTypeString:
{
AString val;
@@ -165,11 +173,22 @@
}
EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+ return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
return format->mFormat->findSize(name, out);
}
EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+ return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
sp<ABuffer> buf;
if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +235,22 @@
}
EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+ format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+ format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
// AMessage::setString() makes a copy of the string
format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +268,61 @@
}
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_HEIGHT = "grid-height";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_WIDTH = "grid-width";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
} // extern "C"
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b15de38..f4a51d0 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -53,11 +53,63 @@
typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
enum {
+ AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
- AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+ AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+ AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+ AMediaCodec *codec,
+ void *userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+ AMediaCodecOnAsyncInputAvailable onAsyncInputAvailable;
+ AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+ AMediaCodecOnAsyncFormatChanged onAsyncFormatChanged;
+ AMediaCodecOnAsyncError onAsyncError;
};
#if __ANDROID_API__ >= 21
@@ -289,6 +341,71 @@
#endif /* __ANDROID_API__ >= 26 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec*,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index da61b64..e48fcbe 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -35,6 +35,17 @@
typedef enum {
AMEDIA_OK = 0,
+ /**
+ * This indicates required resource was not able to be allocated.
+ */
+ AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+ /**
+ * This indicates the resource manager reclaimed the media resource used by the codec.
+ * With this error, the codec must be released, as it has moved to terminal state.
+ */
+ AMEDIACODEC_ERROR_RECLAIMED = 1101,
+
AMEDIA_ERROR_BASE = -10000,
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 018ab76..b6489c7 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -51,6 +51,7 @@
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
@@ -80,33 +81,75 @@
/**
* XXX should these be ints/enums that we look up in a table as needed?
*/
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
extern const char* AMEDIAFORMAT_KEY_DURATION;
extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLS;
+extern const char* AMEDIAFORMAT_KEY_GRID_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_GRID_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
#endif /* __ANDROID_API__ >= 21 */
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..f2d97cd 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
AImage_getPlaneRowStride; # introduced=24
AImage_getTimestamp; # introduced=24
AImage_getWidth; # introduced=24
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+ AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
AMEDIAFORMAT_KEY_IS_ADTS; # var
AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
AMEDIAFORMAT_KEY_IS_DEFAULT; # var
AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+ AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
AMEDIAFORMAT_KEY_MAX_WIDTH; # var
AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+ AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+ AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+ AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
+ AMediaCodecActionCode_isRecoverable; # introduced=28
+ AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
AMediaCodecCryptoInfo_getClearBytes;
AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,16 @@
AMediaCodec_dequeueOutputBuffer;
AMediaCodec_flush;
AMediaCodec_getInputBuffer;
+ AMediaCodec_getInputFormat; # introduced=28
+ AMediaCodec_getName; # introduced=28
AMediaCodec_getOutputBuffer;
AMediaCodec_getOutputFormat;
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
+ AMediaCodec_releaseCrypto; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
+ AMediaCodec_setAsyncNotifyCallback; # introduced=28
AMediaCodec_setOutputSurface; # introduced=24
AMediaCodec_setParameters; # introduced=26
AMediaCodec_setInputSurface; # introduced=26
@@ -127,16 +164,21 @@
AMediaExtractor_unselectTrack;
AMediaFormat_delete;
AMediaFormat_getBuffer;
+ AMediaFormat_getDouble; # introduced=28
AMediaFormat_getFloat;
AMediaFormat_getInt32;
AMediaFormat_getInt64;
+ AMediaFormat_getRect; # introduced=28
AMediaFormat_getSize;
AMediaFormat_getString;
AMediaFormat_new;
AMediaFormat_setBuffer;
+ AMediaFormat_setDouble; # introduced=28
AMediaFormat_setFloat;
AMediaFormat_setInt32;
AMediaFormat_setInt64;
+ AMediaFormat_setRect; # introduced=28
+ AMediaFormat_setSize; # introduced=28
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index aeb32bb..9cb0357 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -674,11 +674,7 @@
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
- lStatus = BAD_VALUE;
- goto Exit;
}
-
output.sessionId = sessionId;
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
@@ -1572,144 +1568,120 @@
// ----------------------------------------------------------------------------
-sp<media::IAudioRecord> AudioFlinger::createRecord(const CreateRecordInput& input,
- CreateRecordOutput& output,
- status_t *status)
+sp<media::IAudioRecord> AudioFlinger::openRecord(
+ audio_io_handle_t input,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& opPackageName,
+ size_t *frameCount,
+ audio_input_flags_t *flags,
+ pid_t pid,
+ pid_t tid,
+ int clientUid,
+ audio_session_t *sessionId,
+ size_t *notificationFrames,
+ sp<IMemory>& cblk,
+ sp<IMemory>& buffers,
+ status_t *status,
+ audio_port_handle_t portId)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
- audio_session_t sessionId = input.sessionId;
- audio_port_handle_t portId;
+ audio_session_t lSessionId;
- output.cblk.clear();
- output.buffers.clear();
+ cblk.clear();
+ buffers.clear();
- bool updatePid = (input.clientInfo.clientPid == -1);
+ bool updatePid = (pid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- uid_t clientUid = input.clientInfo.clientUid;
if (!isTrustedCallingUid(callingUid)) {
- ALOGW_IF(clientUid != callingUid,
- "%s uid %d tried to pass itself off as %d",
- __FUNCTION__, callingUid, clientUid);
+ ALOGW_IF((uid_t)clientUid != callingUid,
+ "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
clientUid = callingUid;
updatePid = true;
}
- pid_t clientPid = input.clientInfo.clientPid;
+
if (updatePid) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
- ALOGW_IF(clientPid != -1 && clientPid != callingPid,
+ ALOGW_IF(pid != -1 && pid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, callingUid, callingPid, clientPid);
- clientPid = callingPid;
+ __func__, callingUid, callingPid, pid);
+ pid = callingPid;
}
// check calling permissions
- if (!recordingAllowed(input.opPackageName, input.clientInfo.clientTid, clientUid)) {
- ALOGE("createRecord() permission denied: recording not allowed");
+ if (!recordingAllowed(opPackageName, tid, clientUid)) {
+ ALOGE("openRecord() permission denied: recording not allowed");
lStatus = PERMISSION_DENIED;
goto Exit;
}
+
+ // further sample rate checks are performed by createRecordTrack_l()
+ if (sampleRate == 0) {
+ ALOGE("openRecord() invalid sample rate %u", sampleRate);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
// we don't yet support anything other than linear PCM
- if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
- ALOGE("createRecord() invalid format %#x", input.config.format);
+ if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
+ ALOGE("openRecord() invalid format %#x", format);
lStatus = BAD_VALUE;
goto Exit;
}
// further channel mask checks are performed by createRecordTrack_l()
- if (!audio_is_input_channel(input.config.channel_mask)) {
- ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
+ if (!audio_is_input_channel(channelMask)) {
+ ALOGE("openRecord() invalid channel mask %#x", channelMask);
lStatus = BAD_VALUE;
goto Exit;
}
- if (sessionId == AUDIO_SESSION_ALLOCATE) {
- sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- output.sessionId = sessionId;
- output.inputId = AUDIO_IO_HANDLE_NONE;
- output.selectedDeviceId = input.selectedDeviceId;
- output.flags = input.flags;
-
- client = registerPid(clientPid);
-
- // Not a conventional loop, but a retry loop for at most two iterations total.
- // Try first maybe with FAST flag then try again without FAST flag if that fails.
- // Exits loop via break on no error of got exit on error
- // The sp<> references will be dropped when re-entering scope.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
- for (;;) {
- lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
- sessionId,
- // FIXME compare to AudioTrack
- clientPid,
- clientUid,
- &input.config,
- output.flags, &output.selectedDeviceId, &portId);
-
{
Mutex::Autolock _l(mLock);
- RecordThread *thread = checkRecordThread_l(output.inputId);
+ RecordThread *thread = checkRecordThread_l(input);
if (thread == NULL) {
- ALOGE("createRecord() checkRecordThread_l failed");
+ ALOGE("openRecord() checkRecordThread_l failed");
lStatus = BAD_VALUE;
goto Exit;
}
- ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);
+ client = registerPid(pid);
- output.sampleRate = input.config.sample_rate;
- output.frameCount = input.frameCount;
- output.notificationFrameCount = input.notificationFrameCount;
+ if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+ if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ lSessionId = *sessionId;
+ } else {
+ // if no audio session id is provided, create one here
+ lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ if (sessionId != NULL) {
+ *sessionId = lSessionId;
+ }
+ }
+ ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input);
- recordTrack = thread->createRecordTrack_l(client, &output.sampleRate,
- input.config.format, input.config.channel_mask,
- &output.frameCount, sessionId,
- &output.notificationFrameCount,
- clientUid, &output.flags,
- input.clientInfo.clientTid,
- &lStatus, portId);
+ recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
+ frameCount, lSessionId, notificationFrames,
+ clientUid, flags, tid, &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
- // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
- // audio policy manager without FAST constraint
- if (lStatus == BAD_TYPE) {
- AudioSystem::releaseInput(output.inputId, sessionId);
- recordTrack.clear();
- continue;
+ if (lStatus == NO_ERROR) {
+ // Check if one effect chain was awaiting for an AudioRecord to be created on this
+ // session and move it to this thread.
+ sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
+ if (chain != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->addEffectChain_l(chain);
+ }
}
-
- if (lStatus != NO_ERROR) {
- recordTrack.clear();
- goto Exit;
- }
-
- // Check if one effect chain was awaiting for an AudioRecord to be created on this
- // session and move it to this thread.
- sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
- if (chain != 0) {
- Mutex::Autolock _l(thread->mLock);
- thread->addEffectChain_l(chain);
- }
- break;
- }
- // End of retry loop.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
}
- output.cblk = recordTrack->getCblk();
- output.buffers = recordTrack->getBuffers();
-
- // return handle to client
- recordHandle = new RecordHandle(recordTrack);
-
-Exit:
if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the RecordTrack so that the
// Client destructor is called by the TrackBase destructor with mClientLock held
@@ -1719,8 +1691,17 @@
Mutex::Autolock _cl(mClientLock);
client.clear();
}
+ recordTrack.clear();
+ goto Exit;
}
+ cblk = recordTrack->getCblk();
+ buffers = recordTrack->getBuffers();
+
+ // return handle to client
+ recordHandle = new RecordHandle(recordTrack);
+
+Exit:
*status = lStatus;
return recordHandle;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index bc73ffd..506420c 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -118,9 +118,23 @@
CreateTrackOutput& output,
status_t *status);
- virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
- CreateRecordOutput& output,
- status_t *status);
+ virtual sp<media::IAudioRecord> openRecord(
+ audio_io_handle_t input,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& opPackageName,
+ size_t *pFrameCount,
+ audio_input_flags_t *flags,
+ pid_t pid,
+ pid_t tid,
+ int clientUid,
+ audio_session_t *sessionId,
+ size_t *notificationFrames,
+ sp<IMemory>& cblk,
+ sp<IMemory>& buffers,
+ status_t *status /*non-NULL*/,
+ audio_port_handle_t portId);
virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const;
virtual audio_format_t format(audio_io_handle_t output) const;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e0d0d7b..bfb0fe2 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -296,6 +296,43 @@
const bool auxType =
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY;
+ // safeInputOutputSampleCount is 0 if the channel count between input and output
+ // buffers do not match. This prevents automatic accumulation or copying between the
+ // input and output effect buffers without an intermediary effect process.
+ // TODO: consider implementing channel conversion.
+ const size_t safeInputOutputSampleCount =
+ inChannelCount != outChannelCount ? 0
+ : outChannelCount * std::min(
+ mConfig.inputCfg.buffer.frameCount,
+ mConfig.outputCfg.buffer.frameCount);
+ const auto accumulateInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ accumulate_float(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount);
+#else
+ accumulate_i16(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount);
+#endif
+ };
+ const auto copyInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ memcpy(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.f32));
+
+#else
+ memcpy(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.s16));
+#endif
+ };
+
if (isProcessEnabled()) {
int ret;
if (isProcessImplemented()) {
@@ -308,97 +345,69 @@
static_assert(sizeof(float) <= sizeof(int32_t),
"in-place conversion requires sizeof(float) <= sizeof(int32_t)");
- const int32_t * const p32 = mConfig.inputCfg.buffer.s32;
- float * const pFloat = mConfig.inputCfg.buffer.f32;
- memcpy_to_float_from_q4_27(pFloat, p32, mConfig.inputCfg.buffer.frameCount);
- } else {
- memcpy_to_i16_from_q4_27(mConfig.inputCfg.buffer.s16,
+ memcpy_to_float_from_q4_27(
+ mConfig.inputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.frameCount);
+ } else
+#endif
+ {
+ memcpy_to_i16_from_q4_27(
+ mConfig.inputCfg.buffer.s16,
mConfig.inputCfg.buffer.s32,
mConfig.inputCfg.buffer.frameCount);
}
-#else
- memcpy_to_i16_from_q4_27(mConfig.inputCfg.buffer.s16,
- mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.frameCount);
-#endif
}
#ifdef FLOAT_EFFECT_CHAIN
- if (mSupportsFloat) {
- ret = mEffectInterface->process();
- } else {
- { // convert input to int16_t as effect doesn't support float.
- if (!auxType) {
- if (mInBuffer16.get() == nullptr) {
- ALOGW("%s: mInBuffer16 is null, bypassing", __func__);
- goto data_bypass;
- }
- const float * const pIn = mInBuffer->audioBuffer()->f32;
- int16_t * const pIn16 = mInBuffer16->audioBuffer()->s16;
- memcpy_to_i16_from_float(
- pIn16, pIn, inChannelCount * mConfig.inputCfg.buffer.frameCount);
+ if (!mSupportsFloat) { // convert input to int16_t as effect doesn't support float.
+ if (!auxType) {
+ if (mInConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
}
- if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- if (mOutBuffer16.get() == nullptr) {
- ALOGW("%s: mOutBuffer16 is null, bypassing", __func__);
- goto data_bypass;
- }
- int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
- const float * const pOut = mOutBuffer->audioBuffer()->f32;
- memcpy_to_i16_from_float(
- pOut16,
- pOut,
- outChannelCount * mConfig.outputCfg.buffer.frameCount);
- }
+ memcpy_to_i16_from_float(
+ mInConversionBuffer->audioBuffer()->s16,
+ mInBuffer->audioBuffer()->f32,
+ inChannelCount * mConfig.inputCfg.buffer.frameCount);
}
-
- ret = mEffectInterface->process();
-
- { // convert output back to float.
- const int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
- float * const pOut = mOutBuffer->audioBuffer()->f32;
- memcpy_to_float_from_i16(
- pOut, pOut16, outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ if (mOutConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
+ }
+ memcpy_to_i16_from_float(
+ mOutConversionBuffer->audioBuffer()->s16,
+ mOutBuffer->audioBuffer()->f32,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
}
}
-#else
+#endif
+
ret = mEffectInterface->process();
+
+#ifdef FLOAT_EFFECT_CHAIN
+ if (!mSupportsFloat) { // convert output int16_t back to float.
+ memcpy_to_float_from_i16(
+ mOutBuffer->audioBuffer()->f32,
+ mOutConversionBuffer->audioBuffer()->s16,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ }
#endif
} else {
#ifdef FLOAT_EFFECT_CHAIN
data_bypass:
#endif
if (!auxType /* aux effects do not require data bypass */
- && mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw
- && inChannelCount == outChannelCount) {
- const size_t sampleCount = std::min(
- mConfig.inputCfg.buffer.frameCount,
- mConfig.outputCfg.buffer.frameCount) * outChannelCount;
-
-#ifdef FLOAT_EFFECT_CHAIN
- const float * const in = mConfig.inputCfg.buffer.f32;
- float * const out = mConfig.outputCfg.buffer.f32;
-
+ && mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- accumulate_float(out, in, sampleCount);
+ accumulateInputToOutput();
} else {
- memcpy(mConfig.outputCfg.buffer.f32, mConfig.inputCfg.buffer.f32,
- sampleCount * sizeof(*mConfig.outputCfg.buffer.f32));
+ copyInputToOutput();
}
-
-#else
- const int16_t * const in = mConfig.inputCfg.buffer.s16;
- int16_t * const out = mConfig.outputCfg.buffer.s16;
-
- if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- accumulate_i16(out, in, sampleCount);
- } else {
- memcpy(mConfig.outputCfg.buffer.s16, mConfig.inputCfg.buffer.s16,
- sampleCount * sizeof(*mConfig.outputCfg.buffer.s16));
- }
-#endif
}
ret = -ENODATA;
}
+
// force transition to IDLE state when engine is ready
if (mState == STOPPED && ret == -ENODATA) {
mDisableWaitCnt = 1;
@@ -417,21 +426,8 @@
// If an insert effect is idle and input buffer is different from output buffer,
// accumulate input onto output
sp<EffectChain> chain = mChain.promote();
- if (chain != 0
- && chain->activeTrackCnt() != 0
- && inChannelCount == outChannelCount) {
- const size_t sampleCount = std::min(
- mConfig.inputCfg.buffer.frameCount,
- mConfig.outputCfg.buffer.frameCount) * outChannelCount;
-#ifdef FLOAT_EFFECT_CHAIN
- const float * const in = mConfig.inputCfg.buffer.f32;
- float * const out = mConfig.outputCfg.buffer.f32;
- accumulate_float(out, in, sampleCount);
-#else
- const int16_t * const in = mConfig.inputCfg.buffer.s16;
- int16_t * const out = mConfig.outputCfg.buffer.s16;
- accumulate_i16(out, in, sampleCount);
-#endif
+ if (chain.get() != nullptr && chain->activeTrackCnt() != 0) {
+ accumulateInputToOutput();
}
}
}
@@ -906,7 +902,7 @@
mEffectInterface->setInBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
+ // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
// Theoretically insert effects can also do in-place conversions (destroying
// the original buffer) when the output buffer is identical to the input buffer,
// but we don't optimize for it here.
@@ -920,17 +916,17 @@
ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
__func__, inChannels, inFrameCount, size);
- if (size > 0 && (mInBuffer16.get() == nullptr || size > mInBuffer16->getSize())) {
- mInBuffer16.clear();
- ALOGV("%s: allocating mInBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mInBuffer16);
+ if (size > 0 && (mInConversionBuffer.get() == nullptr
+ || size > mInConversionBuffer->getSize())) {
+ mInConversionBuffer.clear();
+ ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mInConversionBuffer);
}
- if (mInBuffer16.get() != nullptr) {
- // FIXME: confirm buffer has enough size.
- mInBuffer16->setFrameCount(inFrameCount);
- mEffectInterface->setInBuffer(mInBuffer16);
+ if (mInConversionBuffer.get() != nullptr) {
+ mInConversionBuffer->setFrameCount(inFrameCount);
+ mEffectInterface->setInBuffer(mInConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mInBuffer16", __func__);
+ ALOGE("%s cannot create mInConversionBuffer", __func__);
}
}
#endif
@@ -948,7 +944,7 @@
mEffectInterface->setOutBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // Note: Any effect that does not accumulate does not need mOutBuffer16 and
+ // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
// can do in-place conversion from int16_t to float. We don't optimize here.
if (!mSupportsFloat && mOutBuffer.get() != nullptr) {
const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
@@ -958,16 +954,17 @@
ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
__func__, outChannels, outFrameCount, size);
- if (size > 0 && (mOutBuffer16.get() == nullptr || size > mOutBuffer16->getSize())) {
- mOutBuffer16.clear();
- ALOGV("%s: allocating mOutBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mOutBuffer16);
+ if (size > 0 && (mOutConversionBuffer.get() == nullptr
+ || size > mOutConversionBuffer->getSize())) {
+ mOutConversionBuffer.clear();
+ ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mOutConversionBuffer);
}
- if (mOutBuffer16.get() != nullptr) {
- mOutBuffer16->setFrameCount(outFrameCount);
- mEffectInterface->setOutBuffer(mOutBuffer16);
+ if (mOutConversionBuffer.get() != nullptr) {
+ mOutConversionBuffer->setFrameCount(outFrameCount);
+ mEffectInterface->setOutBuffer(mOutConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mOutBuffer16", __func__);
+ ALOGE("%s cannot create mOutConversionBuffer", __func__);
}
}
#endif
@@ -1241,6 +1238,20 @@
return s;
}
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+ std::stringstream ss;
+
+ if (buffer.get() == nullptr) {
+ return "nullptr"; // make different than below
+ } else if (buffer->externalData() != nullptr) {
+ ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+ << " -> "
+ << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ ss << buffer->audioBuffer()->raw;
+ }
+ return ss.str();
+}
void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
{
@@ -1305,19 +1316,13 @@
result.append(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- if (!mSupportsFloat) {
- int16_t* pIn16 = mInBuffer16 != 0 ? mInBuffer16->audioBuffer()->s16 : NULL;
- int16_t* pOut16 = mOutBuffer16 != 0 ? mOutBuffer16->audioBuffer()->s16 : NULL;
- result.append("\t\t- Float and int16 buffers\n");
- result.append("\t\t\tIn_float In_int16 Out_float Out_int16\n");
- snprintf(buffer, SIZE,"\t\t\t%p %p %p %p\n",
- mConfig.inputCfg.buffer.raw,
- pIn16,
- pOut16,
- mConfig.outputCfg.buffer.raw);
- result.append(buffer);
- }
+ result.appendFormat("\t\t- HAL buffers:\n"
+ "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+ dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+ dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
#endif
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -2161,19 +2166,6 @@
}
}
-static void dumpInOutBuffer(
- char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
- if (buffer == nullptr) {
- snprintf(dump, dumpSize, "%p", buffer);
- } else if (buffer->externalData() != nullptr) {
- snprintf(dump, dumpSize, "%p -> %p",
- isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
- isInput ? buffer->audioBuffer()->raw : buffer->externalData());
- } else {
- snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
- }
-}
-
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -2191,15 +2183,13 @@
result.append("\tCould not lock mutex:\n");
}
- char inBufferStr[64], outBufferStr[64];
- dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
- dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
- snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
- (int)strlen(inBufferStr), "In buffer ",
- (int)strlen(outBufferStr), "Out buffer ");
- result.append(buffer);
- snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
- result.append(buffer);
+ const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+ const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+ result.appendFormat("\t%-*s%-*s Active tracks:\n",
+ (int)inBufferStr.size(), "In buffer ",
+ (int)outBufferStr.size(), "Out buffer ");
+ result.appendFormat("\t%s %s %d\n",
+ inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
write(fd, result.string(), result.size());
for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 1864e0f..eea3208 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -171,8 +171,8 @@
#ifdef FLOAT_EFFECT_CHAIN
bool mSupportsFloat; // effect supports float processing
- sp<EffectBufferHalInterface> mInBuffer16; // Buffers for interacting with HAL at 16 bits
- sp<EffectBufferHalInterface> mOutBuffer16;
+ sp<EffectBufferHalInterface> mInConversionBuffer; // Buffers for HAL conversion if needed.
+ sp<EffectBufferHalInterface> mOutConversionBuffer;
#endif
};
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 6475f22..2e4fb8c 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -78,7 +78,12 @@
uint32_t bounds = mBounds;
uint32_t newestOpen = bounds & 0xFFFF;
uint32_t oldestClosed = bounds >> 16;
- uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+
+ //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+ uint32_t n;
+ __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+ n = n & 0xFFFF;
+
if (n > mSamplingN) {
ALOGE("too many samples %u", n);
n = mSamplingN;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7636df6..b2a1e18 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6708,12 +6708,12 @@
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t *pSampleRate,
+ uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *pNotificationFrameCount,
+ size_t *notificationFrames,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
@@ -6721,30 +6721,16 @@
audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
- size_t notificationFrameCount = *pNotificationFrameCount;
sp<RecordTrack> track;
status_t lStatus;
audio_input_flags_t inputFlags = mInput->flags;
- audio_input_flags_t requestedFlags = *flags;
- uint32_t sampleRate;
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGE("createRecordTrack_l() audio driver not initialized");
- goto Exit;
- }
-
- if (*pSampleRate == 0) {
- *pSampleRate = mSampleRate;
- }
- sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast capture is present
if (hasFastCapture()) {
inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
}
- // Check if requested flags are compatible with input stream flags
+ // Check if requested flags are compatible with output stream flags
if ((*flags & inputFlags) != *flags) {
ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
" input flags (%08x)",
@@ -6799,20 +6785,12 @@
}
}
- // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
- if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
- (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
- *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
- lStatus = BAD_TYPE;
- goto Exit;
- }
-
// compute track buffer size in frames, and suggest the notification frame count
if (*flags & AUDIO_INPUT_FLAG_FAST) {
// fast track: frame count is exactly the pipe depth
frameCount = mPipeFramesP2;
// ignore requested notificationFrames, and always notify exactly once every HAL buffer
- notificationFrameCount = mFrameCount;
+ *notificationFrames = mFrameCount;
} else {
// not fast track: max notification period is resampled equivalent of one HAL buffer time
// or 20 ms if there is a fast capture
@@ -6831,12 +6809,17 @@
const size_t minFrameCount = maxNotificationFrames *
max(kMinNotifications, minNotificationsByMs);
frameCount = max(frameCount, minFrameCount);
- if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
- notificationFrameCount = maxNotificationFrames;
+ if (*notificationFrames == 0 || *notificationFrames > maxNotificationFrames) {
+ *notificationFrames = maxNotificationFrames;
}
}
*pFrameCount = frameCount;
- *pNotificationFrameCount = notificationFrameCount;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGE("createRecordTrack_l() audio driver not initialized");
+ goto Exit;
+ }
{ // scope for mLock
Mutex::Autolock _l(mLock);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 17f26c5..c7b60d6 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1327,12 +1327,12 @@
sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t *pSampleRate,
+ uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *pNotificationFrameCount,
+ size_t *notificationFrames,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index a3ea756..d4ce0b4 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -192,7 +192,7 @@
// where for AudioTrack (but not AudioRecord),
// 8-bit PCM samples are stored as 16-bit
const size_t mFrameCount;// size of track buffer given at createTrack() or
- // createRecord(), and then adjusted as needed
+ // openRecord(), and then adjusted as needed
const audio_session_t mSessionId;
uid_t mUid;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1445572..cdd8ca0 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1102,11 +1102,12 @@
void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
{
- for (size_t i = 0; i < mSyncEvents.size(); i++) {
+ for (size_t i = 0; i < mSyncEvents.size();) {
if (mSyncEvents[i]->type() == type) {
mSyncEvents[i]->trigger();
mSyncEvents.removeAt(i);
- i--;
+ } else {
+ ++i;
}
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 118f0d2..ec04ef7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -34,11 +34,7 @@
{
public:
IOProfile(const String8 &name, audio_port_role_t role)
- : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
- maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
- curOpenCount(0),
- maxActiveCount(1),
- curActiveCount(0) {}
+ : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
@@ -107,34 +103,6 @@
const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
- bool canOpenNewIo() {
- if (maxOpenCount == 0 || curOpenCount < maxOpenCount) {
- return true;
- }
- return false;
- }
-
- bool canStartNewIo() {
- if (maxActiveCount == 0 || curActiveCount < maxActiveCount) {
- return true;
- }
- return false;
- }
-
- // Maximum number of input or output streams that can be simultaneously opened for this profile.
- // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
- // profiles and 0 for input profiles
- uint32_t maxOpenCount;
- // Number of streams currently opened for this profile.
- uint32_t curOpenCount;
- // Maximum number of input or output streams that can be simultaneously active for this profile.
- // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
- // profiles and 1 for input profiles
- uint32_t maxActiveCount;
- // Number of streams currently active for this profile. This is not the number of active clients
- // (AudioTrack or AudioRecord) but the number of active HAL streams.
- uint32_t curActiveCount;
-
private:
DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
index 3b0e209..078b582 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Serializer.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -92,8 +92,6 @@
static const char name[];
static const char role[];
static const char flags[];
- static const char maxOpenCount[];
- static const char maxActiveCount[];
};
typedef IOProfile Element;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index 635fe4d..4316307 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -27,14 +27,12 @@
sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
{
- sp<AudioPort> port = 0;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- port = itemAt(i);
- break;
+ for (const auto& port : *this) {
+ if (port->getTagName() == tagName) {
+ return port;
}
}
- return port;
+ return nullptr;
}
status_t AudioRouteVector::dump(int fd, int spaces) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 737872d..624e688 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -236,7 +236,6 @@
mFormat = lConfig.format;
mId = AudioPort::getNextUniqueId();
mIoHandle = *input;
- mProfile->curOpenCount++;
}
return status;
@@ -247,10 +246,6 @@
{
if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
mClientInterface->closeInput(mIoHandle);
- LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
- __FUNCTION__, mProfile->curOpenCount);
- mProfile->curOpenCount--;
- mIoHandle = AUDIO_IO_HANDLE_NONE;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index be5a1c1..f96c5bc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -444,7 +444,6 @@
mFormat = lConfig.format;
mId = AudioPort::getNextUniqueId();
mIoHandle = *output;
- mProfile->curOpenCount++;
}
return status;
@@ -459,11 +458,6 @@
mClientInterface->setParameters(mIoHandle, param.toString());
mClientInterface->closeOutput(mIoHandle);
-
- LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
- __FUNCTION__, mProfile->curOpenCount);
- mProfile->curOpenCount--;
- mIoHandle = AUDIO_IO_HANDLE_NONE;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index fcf9070..53e694b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -74,11 +74,11 @@
SortedVector<audio_format_t> flatenedFormats;
SampleRateVector flatenedRates;
ChannelsVector flatenedChannels;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (mProfiles[profileIndex]->isValid()) {
- audio_format_t formatToExport = mProfiles[profileIndex]->getFormat();
- const SampleRateVector &ratesToExport = mProfiles[profileIndex]->getSampleRates();
- const ChannelsVector &channelsToExport = mProfiles[profileIndex]->getChannels();
+ for (const auto& profile : mProfiles) {
+ if (profile->isValid()) {
+ audio_format_t formatToExport = profile->getFormat();
+ const SampleRateVector &ratesToExport = profile->getSampleRates();
+ const ChannelsVector &channelsToExport = profile->getChannels();
if (flatenedFormats.indexOf(formatToExport) < 0) {
flatenedFormats.add(formatToExport);
@@ -130,14 +130,12 @@
void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
{
- size_t indexToImport;
- for (indexToImport = 0; indexToImport < port->mProfiles.size(); indexToImport++) {
- const sp<AudioProfile> &profileToImport = port->mProfiles[indexToImport];
+ for (const auto& profileToImport : port->mProfiles) {
if (profileToImport->isValid()) {
// Import only valid port, i.e. valid format, non empty rates and channels masks
bool hasSameProfile = false;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (*mProfiles[profileIndex] == *profileToImport) {
+ for (const auto& profile : mProfiles) {
+ if (*profile == *profileToImport) {
// never import a profile twice
hasSameProfile = true;
break;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 98f7a94..7657c4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -233,8 +233,7 @@
return NO_ERROR;
}
- for (size_t i = 0; i < size(); i++) {
- const sp<AudioProfile> profile = itemAt(i);
+ for (const auto& profile : *this) {
if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a2c1165..fdeea29 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -60,7 +60,7 @@
void DeviceVector::refreshTypes()
{
mDeviceTypes = AUDIO_DEVICE_NONE;
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
mDeviceTypes |= itemAt(i)->type();
}
ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
@@ -68,7 +68,7 @@
ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
{
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
if (item->equals(itemAt(i))) {
return i;
}
@@ -78,12 +78,15 @@
void DeviceVector::add(const DeviceVector &devices)
{
- for (size_t i = 0; i < devices.size(); i++) {
- sp<DeviceDescriptor> device = devices.itemAt(i);
+ bool added = false;
+ for (const auto& device : devices) {
if (indexOf(device) < 0 && SortedVector::add(device) >= 0) {
- refreshTypes();
+ added = true;
}
}
+ if (added) {
+ refreshTypes();
+ }
}
ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
@@ -148,14 +151,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getId() == id) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getId() == id) {
+ return device;
}
}
- return device;
+ return nullptr;
}
DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
@@ -180,11 +181,9 @@
audio_devices_t type, const String8& address) const
{
DeviceVector devices;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->type() == type) {
- if (itemAt(i)->mAddress == address) {
- devices.add(itemAt(i));
- }
+ for (const auto& device : *this) {
+ if (device->type() == type && device->mAddress == address) {
+ devices.add(device);
}
}
return devices;
@@ -192,14 +191,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getTagName() == tagName) {
+ return device;
}
}
- return device;
+ return nullptr;
}
status_t DeviceVector::dump(int fd, const String8 &tag, int spaces, bool verbose) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index cc56fb8..b4feb4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -154,10 +154,9 @@
DeviceVector HwModule::getRouteSourceDevices(const sp<AudioRoute> &route) const
{
DeviceVector sourceDevices;
- Vector <sp<AudioPort> > sources = route->getSources();
- for (size_t i = 0; i < sources.size(); i++) {
- if (sources[i]->getType() == AUDIO_PORT_TYPE_DEVICE) {
- sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(sources[i]->getTagName()));
+ for (const auto& source : route->getSources()) {
+ if (source->getType() == AUDIO_PORT_TYPE_DEVICE) {
+ sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(source->getTagName()));
}
}
return sourceDevices;
@@ -173,17 +172,15 @@
void HwModule::refreshSupportedDevices()
{
// Now updating the streams (aka IOProfile until now) supported devices
- for (size_t i = 0; i < mInputProfiles.size(); i++) {
- sp<IOProfile> stream = mInputProfiles[i];
+ for (const auto& stream : mInputProfiles) {
DeviceVector sourceDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> sink = routes[j]->getSink();
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> sink = route->getSink();
if (sink == 0 || stream != sink) {
ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
continue;
}
- DeviceVector sourceDevicesForRoute = getRouteSourceDevices(routes[j]);
+ DeviceVector sourceDevicesForRoute = getRouteSourceDevices(route);
if (sourceDevicesForRoute.isEmpty()) {
ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -196,17 +193,15 @@
}
stream->setSupportedDevices(sourceDevices);
}
- for (size_t i = 0; i < mOutputProfiles.size(); i++) {
- sp<IOProfile> stream = mOutputProfiles[i];
+ for (const auto& stream : mOutputProfiles) {
DeviceVector sinkDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> source = routes[j]->getSources().findByTagName(stream->getTagName());
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> source = route->getSources().findByTagName(stream->getTagName());
if (source == 0 || stream != source) {
ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
continue;
}
- sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(routes[j]);
+ sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(route);
if (sinkDevice == 0) {
ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -252,60 +247,40 @@
sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++)
- {
- if (strcmp(itemAt(i)->getName(), name) == 0) {
- return itemAt(i);
+ for (const auto& module : *this) {
+ if (strcmp(module->getName(), name) == 0) {
+ return module;
}
}
- return module;
+ return nullptr;
}
-
sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getHandle() == 0) {
- continue;
- }
- if (audio_is_output_device(device)) {
- for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
- {
- if (itemAt(i)->mOutputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
- }
- } else {
- for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
- if (itemAt(i)->mInputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
+ for (const auto& module : *this) {
+ IOProfileCollection& profiles = audio_is_output_device(device) ?
+ module->mOutputProfiles : module->mInputProfiles;
+ for (const auto& profile : profiles) {
+ if (profile->supportDevice(device)) {
+ return module;
}
}
}
- return module;
+ return nullptr;
}
-sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
- const char *device_address,
- const char *device_name,
- bool matchAdress) const
+sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
+ const char *device_address,
+ const char *device_name,
+ bool matchAdress) const
{
- String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+ String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
// handle legacy remote submix case where the address was not always specified
if (device_distinguishes_on_address(device) && (address.length() == 0)) {
address = String8("0");
}
- for (size_t i = 0; i < size(); i++) {
- const sp<HwModule> hwModule = itemAt(i);
- if (hwModule->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : *this) {
DeviceVector declaredDevices = hwModule->getDeclaredDevices();
DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
if (!deviceList.isEmpty()) {
@@ -340,4 +315,5 @@
return NO_ERROR;
}
+
} //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index fc89672..74ef4ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -122,16 +122,6 @@
result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
-
- result.clear();
- snprintf(buffer, SIZE, "\n - maxOpenCount: %u - curOpenCount: %u\n",
- maxOpenCount, curOpenCount);
- result.append(buffer);
- snprintf(buffer, SIZE, " - maxActiveCount: %u - curActiveCount: %u\n",
- maxActiveCount, curActiveCount);
- result.append(buffer);
-
- write(fd, result.string(), result.size());
}
void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index aa589f4..0908ffc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -217,8 +217,6 @@
const char MixPortTraits::Attributes::name[] = "name";
const char MixPortTraits::Attributes::role[] = "role";
const char MixPortTraits::Attributes::flags[] = "flags";
-const char MixPortTraits::Attributes::maxOpenCount[] = "maxOpenCount";
-const char MixPortTraits::Attributes::maxActiveCount[] = "maxActiveCount";
status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
PtrSerializingCtx /*serializingContext*/)
@@ -261,14 +259,6 @@
mixPort->setFlags(InputFlagConverter::maskFromString(flags));
}
}
- string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
- if (!maxOpenCount.empty()) {
- convertTo(maxOpenCount, mixPort->maxOpenCount);
- }
- string maxActiveCount = getXmlAttribute(child, Attributes::maxActiveCount);
- if (!maxActiveCount.empty()) {
- convertTo(maxActiveCount, mixPort->maxActiveCount);
- }
// Deserialize children
AudioGainTraits::Collection gains;
deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index b363779..7366378 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -88,7 +88,7 @@
const char *device_name)
{
ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
-- device, state, device_address, device_name);
+ device, state, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
@@ -183,14 +183,14 @@
checkOutputForAllStrategies();
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have been
// opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
- closeOutput(outputs[i]);
+ closeOutput(output);
}
}
// check again after closing A2DP output to reset mA2dpSuspended if needed
@@ -499,9 +499,7 @@
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
@@ -683,9 +681,7 @@
}
}
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
if (activeDesc->mProfile->getSupportedDevices().types() &
@@ -721,12 +717,8 @@
sp<IOProfile> profile;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
- sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->mOutputProfiles) {
if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
@@ -827,12 +819,7 @@
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableOutputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
@@ -926,29 +913,37 @@
}
if (profile != 0) {
+ sp<SwAudioOutputDescriptor> outputDesc = NULL;
+
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
+ outputDesc = desc;
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((config->sample_rate == desc->mSamplingRate) &&
- audio_formats_match(config->format, desc->mFormat) &&
- (config->channel_mask == desc->mChannelMask) &&
- (session == desc->mDirectClientSession)) {
- desc->mDirectOpenCount++;
- ALOGV("getOutputForDevice() reusing direct output %d for session %d",
- mOutputs.keyAt(i), session);
- return mOutputs.keyAt(i);
+ if ((config->sample_rate == outputDesc->mSamplingRate) &&
+ audio_formats_match(config->format, outputDesc->mFormat) &&
+ (config->channel_mask == outputDesc->mChannelMask)) {
+ if (session == outputDesc->mDirectClientSession) {
+ outputDesc->mDirectOpenCount++;
+ ALOGV("getOutputForDevice() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
+ return mOutputs.keyAt(i);
+ } else {
+ ALOGV("getOutputForDevice() do not reuse direct output because"
+ "current client (%d) is not the same as requesting client (%d)",
+ outputDesc->mDirectClientSession, session);
+ goto non_direct_output;
+ }
}
}
}
-
- if (!profile->canOpenNewIo()) {
- goto non_direct_output;
+ // close direct output if currently open and configured with different parameters
+ if (outputDesc != NULL) {
+ closeOutput(outputDesc->mIoHandle);
}
- sp<SwAudioOutputDescriptor> outputDesc =
- new SwAudioOutputDescriptor(profile, mpClientInterface);
+ outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
status = outputDesc->open(config, device, String8(""), stream, flags, &output);
// only accept an output with the requested parameters
@@ -1025,21 +1020,21 @@
// 4: the first output in the list
if (outputs.size() == 0) {
- return 0;
+ return AUDIO_IO_HANDLE_NONE;
}
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
- audio_io_handle_t outputForFlags = 0;
- audio_io_handle_t outputForPrimary = 0;
- audio_io_handle_t outputForFormat = 0;
+ audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
@@ -1052,7 +1047,7 @@
}
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
- outputForFormat = outputs[i];
+ outputForFormat = output;
bestFormat = outputDesc->mFormat;
}
}
@@ -1063,29 +1058,29 @@
if (format != AUDIO_FORMAT_INVALID
&& AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormatForFlags, format)) {
- outputForFlags = outputs[i];
+ outputForFlags = output;
bestFormatForFlags = outputDesc->mFormat;
}
} else {
- outputForFlags = outputs[i];
+ outputForFlags = output;
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
- ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
+ ALOGV("selectOutput() commonFlags for output %d, %04x", output, commonFlags);
}
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
- outputForPrimary = outputs[i];
+ outputForPrimary = output;
}
}
}
- if (outputForFlags != 0) {
+ if (outputForFlags != AUDIO_IO_HANDLE_NONE) {
return outputForFlags;
}
- if (outputForFormat != 0) {
+ if (outputForFormat != AUDIO_IO_HANDLE_NONE) {
return outputForFormat;
}
- if (outputForPrimary != 0) {
+ if (outputForPrimary != AUDIO_IO_HANDLE_NONE) {
return outputForPrimary;
}
@@ -1106,13 +1101,6 @@
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
- if (!outputDesc->isActive()) {
- if (!outputDesc->mProfile->canStartNewIo()) {
- return INVALID_OPERATION;
- }
- outputDesc->mProfile->curActiveCount++;
- }
-
// Routing?
mOutputRoutes.incRouteActivity(session);
@@ -1140,12 +1128,6 @@
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
- if (!outputDesc->isActive()) {
- LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
- "%s invalid profile active count %u",
- __FUNCTION__, outputDesc->mProfile->curActiveCount);
- outputDesc->mProfile->curActiveCount--;
- }
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
@@ -1334,15 +1316,7 @@
}
}
- status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
-
- if (status == NO_ERROR && !outputDesc->isActive()) {
- LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
- "%s invalid profile active count %u",
- __FUNCTION__, outputDesc->mProfile->curActiveCount);
- outputDesc->mProfile->curActiveCount--;
- }
- return status;
+ return stopSource(outputDesc, stream, forceDeviceUpdate);
}
status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
@@ -1464,12 +1438,7 @@
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableInputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
@@ -1728,10 +1697,6 @@
}
#endif
- if (!profile->canOpenNewIo()) {
- return AUDIO_IO_HANDLE_NONE;
- }
-
sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
@@ -1796,9 +1761,7 @@
return true;
}
- Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeInput = activeInputs[i];
+ for (const auto& activeInput : mInputs.getActiveInputs()) {
if (!isConcurrentSource(activeInput->inputSource(true)) &&
!is_virtual_input_device(activeInput->mDevice)) {
return false;
@@ -1884,9 +1847,7 @@
}
Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-
+ for (const auto& activeDesc : activeInputs) {
if (is_virtual_input_device(activeDesc->mDevice)) {
continue;
}
@@ -1926,9 +1887,7 @@
inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
// if capture is allowed, preempt currently active HOTWORD captures
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-
+ for (const auto& activeDesc : activeInputs) {
if (is_virtual_input_device(activeDesc->mDevice)) {
continue;
}
@@ -1969,13 +1928,6 @@
setInputDevice(input, device, true /* force */);
if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
- if (!inputDesc->mProfile->canStartNewIo()) {
- mInputRoutes.decRouteActivity(session);
- audioSession->changeActiveCount(-1);
- return INVALID_OPERATION;
- }
- inputDesc->mProfile->curActiveCount++;
-
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2045,11 +1997,6 @@
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
- LOG_ALWAYS_FATAL_IF(inputDesc->mProfile->curActiveCount < 1,
- "%s invalid profile active count %u",
- __FUNCTION__, inputDesc->mProfile->curActiveCount);
- inputDesc->mProfile->curActiveCount--;
-
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2135,7 +2082,7 @@
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
- for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+ for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
@@ -2299,21 +2246,21 @@
audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
continue;
}
- ALOGV("selectOutputForMusicEffects activeOnly %d outputs[%zu] flags 0x%08x",
- activeOnly, i, desc->mFlags);
+ ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
+ activeOnly, output, desc->mFlags);
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- outputOffloaded = outputs[i];
+ outputOffloaded = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
- outputDeepBuffer = outputs[i];
+ outputDeepBuffer = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
- outputPrimary = outputs[i];
+ outputPrimary = output;
}
}
if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
@@ -2424,23 +2371,16 @@
break;
}
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
- // Loop back through "remote submix"
- if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
- }
- }
-
ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
-
if (rSubmixModule == 0) {
- ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
- res = INVALID_OPERATION;
- break;
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
+ i);
+ res = INVALID_OPERATION;
+ break;
+ }
}
String8 address = mixes[i].mDeviceAddress;
@@ -2519,24 +2459,19 @@
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
- for (size_t i = 0; i < mixes.size(); i++) {
- if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
+ for (const auto& mix : mixes) {
+ if ((mix.mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ res = INVALID_OPERATION;
+ continue;
}
}
- if (rSubmixModule == 0) {
- res = INVALID_OPERATION;
- continue;
- }
- String8 address = mixes[i].mDeviceAddress;
+ String8 address = mix.mDeviceAddress;
if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
res = INVALID_OPERATION;
@@ -2558,8 +2493,8 @@
rSubmixModule->removeOutputProfile(address);
rSubmixModule->removeInputProfile(address);
- } if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
- if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
+ } if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ if (mPolicyMixes.unregisterMix(mix.mDeviceAddress) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
}
@@ -2611,7 +2546,7 @@
mAvailableOutputDevices.dump(fd, String8("Available output"));
mAvailableInputDevices.dump(fd, String8("Available input"));
- mHwModules.dump(fd);
+ mHwModulesAll.dump(fd);
mOutputs.dump(fd);
mInputs.dump(fd);
mVolumeCurves->dump(fd);
@@ -2716,23 +2651,23 @@
// do not report devices with type AUDIO_DEVICE_IN_STUB or AUDIO_DEVICE_OUT_STUB
// as they are used by stub HALs by convention
if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->type() == AUDIO_DEVICE_OUT_STUB) {
+ for (const auto& dev : mAvailableOutputDevices) {
+ if (dev->type() == AUDIO_DEVICE_OUT_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
}
if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_STUB) {
+ for (const auto& dev : mAvailableInputDevices) {
+ if (dev->type() == AUDIO_DEVICE_IN_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
@@ -3227,8 +3162,8 @@
}
}
// reroute outputs if necessary
- for (size_t i = 0; i < affectedStrategies.size(); i++) {
- checkStrategyRoute(affectedStrategies[i], AUDIO_IO_HANDLE_NONE);
+ for (const auto& strategy : affectedStrategies) {
+ checkStrategyRoute(strategy, AUDIO_IO_HANDLE_NONE);
}
// remove input routes associated with this uid
@@ -3250,8 +3185,8 @@
inputsToClose.add(inputDesc->mIoHandle);
}
}
- for (size_t i = 0; i < inputsToClose.size(); i++) {
- closeInput(inputsToClose[i]);
+ for (const auto& input : inputsToClose) {
+ closeInput(input);
}
}
@@ -3428,8 +3363,8 @@
offloaded.push(desc->mIoHandle);
}
}
- for (size_t i = 0; i < offloaded.size(); ++i) {
- closeOutput(offloaded[i]);
+ for (const auto& handle : offloaded) {
+ closeOutput(handle);
}
}
// update master mono for all remaining outputs
@@ -3555,13 +3490,13 @@
#ifdef USE_XML_AUDIO_POLICY_CONF
mVolumeCurves = new VolumeCurvesCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+ AudioPolicyConfig config(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast<VolumeCurvesCollection *>(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+ AudioPolicyConfig config(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled);
if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
(ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
@@ -3593,28 +3528,20 @@
// open all output streams needed to access attached devices
audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->getName());
- if (mHwModules[i]->mHandle == 0) {
- ALOGW("could not open HW module %s", mHwModules[i]->getName());
+ for (const auto& hwModule : mHwModulesAll) {
+ hwModule->mHandle = mpClientInterface->loadHwModule(hwModule->getName());
+ if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) {
+ ALOGW("could not open HW module %s", hwModule->getName());
continue;
}
+ mHwModules.push_back(hwModule);
// open all output streams needed to access attached devices
// except for direct output streams that are only opened when they are actually
// required by an app.
// This also validates mAvailableOutputDevices list
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
-
- if (!outProfile->canOpenNewIo()) {
- ALOGE("Invalid Output profile max open count %u for profile %s",
- outProfile->maxOpenCount, outProfile->getTagName().c_str());
- continue;
- }
-
+ for (const auto& outProfile : hwModule->mOutputProfiles) {
if (!outProfile->hasSupportedDevices()) {
- ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Output profile contains no device on module %s", hwModule->getName());
continue;
}
if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
@@ -3648,13 +3575,13 @@
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
- mHwModules[i]->getName());
+ hwModule->getName());
} else {
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : supportedDevices) {
+ ssize_t index = mAvailableOutputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
- mAvailableOutputDevices[index]->attach(mHwModules[i]);
+ mAvailableOutputDevices[index]->attach(hwModule);
}
}
if (mPrimaryOutput == 0 &&
@@ -3672,18 +3599,9 @@
}
// open input streams needed to access attached devices to validate
// mAvailableInputDevices list
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
-
- if (!inProfile->canOpenNewIo()) {
- ALOGE("Invalid Input profile max open count %u for profile %s",
- inProfile->maxOpenCount, inProfile->getTagName().c_str());
- continue;
- }
-
+ for (const auto& inProfile : hwModule->mInputProfiles) {
if (!inProfile->hasSupportedDevices()) {
- ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Input profile contains no device on module %s", hwModule->getName());
continue;
}
// chose first device present in profile's SupportedDevices also part of
@@ -3705,14 +3623,13 @@
&input);
if (status == NO_ERROR) {
- const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableInputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : inProfile->getSupportedDevices()) {
+ ssize_t index = mAvailableInputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0) {
sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
if (!devDesc->isAttached()) {
- devDesc->attach(mHwModules[i]);
+ devDesc->attach(hwModule);
devDesc->importAudioPort(inProfile, true);
}
}
@@ -3721,7 +3638,7 @@
} else {
ALOGW("Cannot open input stream for device %08x on hw module %s",
profileType,
- mHwModules[i]->getName());
+ hwModule->getName());
}
}
}
@@ -3771,6 +3688,7 @@
mOutputs.clear();
mInputs.clear();
mHwModules.clear();
+ mHwModulesAll.clear();
}
status_t AudioPolicyManager::initCheck()
@@ -3842,19 +3760,15 @@
}
// then look for output profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->mOutputProfiles.size(); j++) {
+ sp<IOProfile> profile = hwModule->mOutputProfiles[j];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
+ ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
+ j, hwModule->getName());
}
}
}
@@ -3889,12 +3803,6 @@
continue;
}
- if (!profile->canOpenNewIo()) {
- ALOGW("Max Output number %u already opened for this profile %s",
- profile->maxOpenCount, profile->getTagName().c_str());
- continue;
- }
-
ALOGV("opening output for device %08x with params %s profile %p name %s",
device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
@@ -4024,17 +3932,13 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->mOutputProfiles.size(); j++) {
+ sp<IOProfile> profile = hwModule->mOutputProfiles[j];
if (profile->supportDevice(device)) {
ALOGV("checkOutputsForDevice(): "
- "clearing direct output profile %zu on module %zu", j, i);
+ "clearing direct output profile %zu on module %s",
+ j, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4068,23 +3972,18 @@
// then look for input profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t module_idx = 0; module_idx < mHwModules.size(); module_idx++)
- {
- if (mHwModules[module_idx]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_idx]->mInputProfiles.size();
- profile_index++)
- {
- sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
+ profile_index < hwModule->mInputProfiles.size();
+ profile_index++) {
+ sp<IOProfile> profile = hwModule->mInputProfiles[profile_index];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
- profile_index, module_idx);
+ ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
+ profile_index, hwModule->getName());
}
}
}
@@ -4100,7 +3999,6 @@
for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
sp<IOProfile> profile = profiles[profile_index];
-
// nothing to do if one input is already opened for this profile
size_t input_index;
for (input_index = 0; input_index < mInputs.size(); input_index++) {
@@ -4116,12 +4014,6 @@
continue;
}
- if (!profile->canOpenNewIo()) {
- ALOGW("Max Input number %u already opened for this profile %s",
- profile->maxOpenCount, profile->getTagName().c_str());
- continue;
- }
-
desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
status_t status = desc->open(nullptr,
@@ -4178,17 +4070,14 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t module_index = 0; module_index < mHwModules.size(); module_index++) {
- if (mHwModules[module_index]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_index]->mInputProfiles.size();
+ profile_index < hwModule->mInputProfiles.size();
profile_index++) {
- sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
+ sp<IOProfile> profile = hwModule->mInputProfiles[profile_index];
if (profile->supportDevice(device)) {
- ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
- profile_index, module_index);
+ ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
+ profile_index, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4340,14 +4229,14 @@
ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
strategy, srcOutputs[0], dstOutputs[0]);
// mute strategy while moving tracks from one output to another
- for (size_t i = 0; i < srcOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
+ for (audio_io_handle_t srcOut : srcOutputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOut);
if (isStrategyActive(desc, strategy)) {
setStrategyMute(strategy, true, desc);
setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
}
sp<AudioSourceDescriptor> source =
- getSourceForStrategyOnOutput(srcOutputs[i], strategy);
+ getSourceForStrategyOnOutput(srcOut, strategy);
if (source != 0){
connectAudioSource(source);
}
@@ -4541,9 +4430,8 @@
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curDevices =
getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : getOutputsForDevice(curDevices, mOutputs)) {
+ sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
curDevices |= outputDesc->device();
}
@@ -5027,14 +4915,8 @@
// TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
// the best matching profile, not the first one.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& profile : hwModule->mInputProfiles) {
// profile->log();
if (profile->isCompatibleProfile(device, address, samplingRate,
&samplingRate /*updatedSamplingRate*/,
@@ -5514,7 +5396,7 @@
bool supportsAC3 = false;
bool supportsOtherSurround = false;
bool supportsIEC61937 = false;
- for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
+ for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
audio_format_t format = formats[formatIndex];
switch (format) {
case AUDIO_FORMAT_AC3:
@@ -5610,8 +5492,7 @@
} else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
bool supports5dot1 = false;
// Are there any channel masks that can be considered "surround"?
- for (size_t maskIndex = 0; maskIndex < channelMasks.size(); maskIndex++) {
- audio_channel_mask_t channelMask = channelMasks[maskIndex];
+ for (audio_channel_mask_t channelMask : channelMasks) {
if ((channelMask & AUDIO_CHANNEL_OUT_5POINT1) == AUDIO_CHANNEL_OUT_5POINT1) {
supports5dot1 = true;
break;
@@ -5648,10 +5529,8 @@
}
profiles.setFormats(formats);
}
- const FormatVector &supportedFormats = profiles.getSupportedFormats();
- for (size_t formatIndex = 0; formatIndex < supportedFormats.size(); formatIndex++) {
- audio_format_t format = supportedFormats[formatIndex];
+ for (audio_format_t format : profiles.getSupportedFormats()) {
ChannelsVector channelMasks;
SampleRateVector samplingRates;
AudioParameter requestedParameters;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 2d41bd1..b61bc2d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -266,7 +266,7 @@
{
return mDefaultOutputDevice;
}
-protected:
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
@@ -530,7 +530,9 @@
EffectDescriptorCollection mEffects; // list of registered audio effects
bool mA2dpSuspended; // true if A2DP output is suspended
sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
- HwModuleCollection mHwModules;
+ HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
+ HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
+ // dumps
volatile int32_t mAudioPortGeneration;
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 51ae665..ac3202b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -142,7 +142,31 @@
}
}
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+ bool closed = false;
+ if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
+ // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
+ // then only one will get the pointer and do the close.
+ sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
+ if (foundStream.get() != nullptr) {
+ foundStream->close();
+ pid_t pid = foundStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+ }
+ closed = true;
+ }
+ return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+ const sp<AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult) {
+ return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ ALOGD("closeStream(0x%08X)", streamHandle);
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
@@ -150,22 +174,13 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("closeStream(0x%08X)", streamHandle);
- // Remove handle from tracker so that we cannot look up the raw address any more.
- // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
- // then only one will get the pointer and do the close.
- serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
- if (serviceStream.get() != nullptr) {
- serviceStream->close();
- pid_t pid = serviceStream->getOwnerProcessId();
- AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
- return AAUDIO_OK;
- } else {
- ALOGW("closeStream(0x%0x) being handled by another thread", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
-}
+ pid_t pid = serviceStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
+ serviceStream->setCloseNeeded(true);
+ (void) releaseStream(serviceStream);
+ return AAUDIO_OK;
+}
sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
aaudio_handle_t streamHandle) {
@@ -181,7 +196,9 @@
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
- serviceStream = nullptr;
+ serviceStream.clear();
+ } else {
+ serviceStream->incrementServiceReferenceCount();
}
}
return serviceStream;
@@ -198,7 +215,7 @@
aaudio_result_t result = serviceStream->getDescription(parcelable);
// parcelable.dump();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
@@ -208,7 +225,8 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->start();
+ aaudio_result_t result = serviceStream->start();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
@@ -218,7 +236,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->pause();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
@@ -228,7 +246,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->stop();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
@@ -237,48 +255,51 @@
ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->flush();
+ aaudio_result_t result = serviceStream->flush();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
- ALOGE("registerAudioThread(), thread already registered");
- return AAUDIO_ERROR_INVALID_STATE;
- }
-
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
- serviceStream->setRegisteredThread(clientThreadId);
- int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
- if (err != 0){
- ALOGE("registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
- return AAUDIO_ERROR_INTERNAL;
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ result = AAUDIO_ERROR_INVALID_STATE;
} else {
- return AAUDIO_OK;
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(ownerPid, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0) {
+ ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+ clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ result = AAUDIO_ERROR_INTERNAL;
+ }
}
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("unregisterAudioThread(), wrong thread");
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ serviceStream->setRegisteredThread(0);
}
- serviceStream->setRegisteredThread(0);
- return AAUDIO_OK;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -289,7 +310,8 @@
ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->startClient(client, clientHandle);
+ aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
@@ -299,5 +321,6 @@
ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->stopClient(clientHandle);
+ aaudio_result_t result = serviceStream->stopClient(clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..bdd9e0b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -94,9 +94,15 @@
aaudio::aaudio_handle_t streamHandle);
- android::AudioClient mAudioClient;
- aaudio::AAudioStreamTracker mStreamTracker;
+ bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+ aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult);
+
+ android::AudioClient mAudioClient;
+
+ aaudio::AAudioStreamTracker mStreamTracker;
enum constants {
DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 635b45c..53d2860 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -402,3 +402,13 @@
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
}
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return --mCallingCount;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 29987f6..5f5bb98 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -199,6 +199,26 @@
return mFlowing;
}
+ /**
+ * Atomically increment the number of active references to the stream by AAudioService.
+ * @return value after the increment
+ */
+ int32_t incrementServiceReferenceCount();
+
+ /**
+ * Atomically decrement the number of active references to the stream by AAudioService.
+ * @return value after the decrement
+ */
+ int32_t decrementServiceReferenceCount();
+
+ bool isCloseNeeded() const {
+ return mCloseNeeded.load();
+ }
+
+ void setCloseNeeded(bool needed) {
+ mCloseNeeded.store(needed);
+ }
+
protected:
/**
@@ -256,8 +276,11 @@
private:
aaudio_handle_t mHandle = -1;
-
bool mFlowing = false;
+
+ std::mutex mCallingCountLock;
+ std::atomic<int32_t> mCallingCount{0};
+ std::atomic<bool> mCloseNeeded{false};
};
} /* namespace aaudio */