Merge "Add NBAIO support for more sample rates"
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 49e1afc..2218fad 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -87,9 +87,12 @@
static float linearToLog(int volume);
static int logToLinear(float volume);
- static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
- static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
- static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputSamplingRate(int* samplingRate,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputFrameCount(int* frameCount,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getOutputLatency(uint32_t* latency,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
static status_t getSamplingRate(audio_io_handle_t output,
audio_stream_type_t streamType,
int* samplingRate);
@@ -126,7 +129,8 @@
// - BAD_VALUE: invalid parameter
// NOTE: this feature is not supported on all hardware platforms and it is
// necessary to check returned status before using the returned values.
- static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
// return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
static unsigned int getInputFramesLost(audio_io_handle_t ioHandle);
@@ -147,8 +151,8 @@
NUM_CONFIG_EVENTS
};
- // audio output descriptor used to cache output configurations in client process to avoid frequent calls
- // through IAudioFlinger
+ // audio output descriptor used to cache output configurations in client process to avoid
+ // frequent calls through IAudioFlinger
class OutputDescriptor {
public:
OutputDescriptor()
@@ -162,8 +166,8 @@
};
// Events used to synchronize actions between audio sessions.
- // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until playback
- // is complete on another audio session.
+ // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
+ // playback is complete on another audio session.
// See definitions in MediaSyncEvent.java
enum sync_event_t {
SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event
@@ -183,8 +187,10 @@
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
- static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address);
- static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address);
+ static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
+ const char *device_address);
+ static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+ const char *device_address);
static status_t setPhoneState(audio_mode_t state);
static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 34108b3..3d45503 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -49,13 +49,17 @@
};
/* Events used by AudioTrack callback function (audio_track_cblk_t).
+ * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
*/
enum event_type {
EVENT_MORE_DATA = 0, // Request to write more data to PCM buffer.
- EVENT_UNDERRUN = 1, // PCM buffer underrun occured.
- EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from loop start if loop count was not 0.
- EVENT_MARKER = 3, // Playback head is at the specified marker position (See setMarkerPosition()).
- EVENT_NEW_POS = 4, // Playback head is at a new position (See setPositionUpdatePeriod()).
+ EVENT_UNDERRUN = 1, // PCM buffer underrun occurred.
+ EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from
+ // loop start if loop count was not 0.
+ EVENT_MARKER = 3, // Playback head is at the specified marker position
+ // (See setMarkerPosition()).
+ EVENT_NEW_POS = 4, // Playback head is at a new position
+ // (See setPositionUpdatePeriod()).
EVENT_BUFFER_END = 5 // Playback head is at the end of the buffer.
};
@@ -70,7 +74,7 @@
MUTE = 0x00000001
};
uint32_t flags; // 0 or MUTE
- audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT
+ audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT
// accessed directly by WebKit ANP callback
int channelCount; // will be removed in the future, do not use
@@ -123,7 +127,7 @@
*/
AudioTrack();
- /* Creates an audio track and registers it with AudioFlinger.
+ /* Creates an AudioTrack object and registers it with AudioFlinger.
* Once created, the track needs to be started before it can be used.
* Unspecified values are set to the audio hardware's current
* values.
@@ -137,12 +141,13 @@
* 16 bits per sample).
* channelMask: Channel mask.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the
* latency of the track. The actual size selected by the AudioTrack could be
* larger if the requested size is not compatible with current audio HAL
* latency. Zero means to use a default value.
* flags: See comments on audio_output_flags_t in <system/audio.h>.
* cbf: Callback function. If not null, this function is called periodically
- * to request new PCM data.
+ * to provide new PCM data.
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames have been consumed from track input buffer.
@@ -206,7 +211,7 @@
* - INVALID_OPERATION: AudioTrack is already initialized
* - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
* - NO_INIT: audio server or audio hardware not initialized
- * */
+ */
status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
uint32_t sampleRate = 0,
audio_format_t format = AUDIO_FORMAT_DEFAULT,
@@ -290,7 +295,7 @@
status_t setAuxEffectSendLevel(float level);
void getAuxEffectSendLevel(float* level) const;
- /* Set sample rate for this track, mostly used for games' sound effects
+ /* Set sample rate for this track in Hz, mostly used for games' sound effects
*/
status_t setSampleRate(int sampleRate);
uint32_t getSampleRate() const;
@@ -312,7 +317,8 @@
/* Sets marker position. When playback reaches the number of frames specified, a callback with
* event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
* notification callback.
- * If the AudioTrack has been opened with no callback function associated, the operation will fail.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
*
* Parameters:
*
@@ -330,7 +336,8 @@
* a callback with event type EVENT_NEW_POS is called.
* Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
* callback.
- * If the AudioTrack has been opened with no callback function associated, the operation will fail.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
*
* Parameters:
*
@@ -359,7 +366,8 @@
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation
* - INVALID_OPERATION: the AudioTrack is not stopped.
- * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer
+ * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack
+ * buffer
*/
status_t setPosition(uint32_t position);
status_t getPosition(uint32_t *position);
@@ -413,7 +421,7 @@
* If the track is stopped, obtainBuffer() returns
* STOPPED instead of NO_ERROR as long as there are buffers available,
* at which point NO_MORE_BUFFERS is returned.
- * Buffers will be returned until the pool (buffercount())
+ * Buffers will be returned until the pool
* is exhausted, at which point obtainBuffer() will either block
* or return WOULD_BLOCK depending on the value of the "blocking"
* parameter.
@@ -517,16 +525,21 @@
bool mActive; // protected by mLock
callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
- uint32_t mNotificationFramesReq; // requested number of frames between each notification callback
- uint32_t mNotificationFramesAct; // actual number of frames between each notification callback
+ void* mUserData; // for client callback handler
+
+ // for notification APIs
+ uint32_t mNotificationFramesReq; // requested number of frames between each
+ // notification callback
+ uint32_t mNotificationFramesAct; // actual number of frames between each
+ // notification callback
sp<IMemory> mSharedBuffer;
int mLoopCount;
uint32_t mRemainingFrames;
- uint32_t mMarkerPosition;
+ uint32_t mMarkerPosition; // in frames
bool mMarkerReached;
- uint32_t mNewPosition;
- uint32_t mUpdatePeriod;
+ uint32_t mNewPosition; // in frames
+ uint32_t mUpdatePeriod; // in frames
+
bool mFlushed; // FIXME will be made obsolete by making flush() synchronous
audio_output_flags_t mFlags;
int mSessionId;
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index 65c26f4..b1ed7b0 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -74,7 +74,8 @@
// -ENOENT no more effect available
// -ENODEV factory failed to initialize
// -EINVAL invalid pDescriptor
-// -ENOSYS effect list has changed since last execution of EffectQueryNumberEffects()
+// -ENOSYS effect list has changed since last execution of
+// EffectQueryNumberEffects()
// *pDescriptor: updated with the effect descriptor.
//
////////////////////////////////////////////////////////////////////////////////
@@ -91,12 +92,12 @@
//
// Input:
// pEffectUuid: pointer to the effect uuid.
-// sessionId: audio session to which this effect instance will be attached. All effects created
-// with the same session ID are connected in series and process the same signal stream.
-// Knowing that two effects are part of the same effect chain can help the library implement
-// some kind of optimizations.
-// ioId: identifies the output or input stream this effect is directed to at audio HAL. For future
-// use especially with tunneled HW accelerated effects
+// sessionId: audio session to which this effect instance will be attached. All effects
+// created with the same session ID are connected in series and process the same signal
+// stream. Knowing that two effects are part of the same effect chain can help the
+// library implement some kind of optimizations.
+// ioId: identifies the output or input stream this effect is directed to at audio HAL.
+// For future use especially with tunneled HW accelerated effects
//
// Input/Output:
// pHandle: address where to return the effect handle.
@@ -109,7 +110,8 @@
// *pHandle: updated with the effect handle.
//
////////////////////////////////////////////////////////////////////////////////
-int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle);
+int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ effect_handle_t *pHandle);
////////////////////////////////////////////////////////////////////////////////
//
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5170a87..359780e 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -123,7 +123,8 @@
virtual status_t setParameters(audio_io_handle_t ioHandle,
const String8& keyValuePairs) = 0;
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0;
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
+ const = 0;
// register a current process for audio output change notifications
virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index cc2e069..f5b0604 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -44,9 +44,10 @@
audio_policy_dev_state_t state,
const char *device_address) = 0;
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
- const char *device_address) = 0;
+ const char *device_address) = 0;
virtual status_t setPhoneState(audio_mode_t state) = 0;
- virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
+ virtual status_t setForceUse(audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config) = 0;
virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
uint32_t samplingRate = 0,
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 002b045..7bf3069 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -65,8 +65,10 @@
sp<IMemory> getIMemory() { return mData; }
// hack
- void init(int numChannels, int sampleRate, audio_format_t format, size_t size, sp<IMemory> data ) {
- mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size; mData = data; }
+ void init(int numChannels, int sampleRate, audio_format_t format, size_t size,
+ sp<IMemory> data ) {
+ mNumChannels = numChannels; mSampleRate = sampleRate; mFormat = format; mSize = size;
+ mData = data; }
private:
void init();
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index d27f463..cba8a6b 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -242,7 +242,10 @@
status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
- status_t configureBitrate(int32_t bitrate);
+
+ status_t configureBitrate(
+ int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode);
+
status_t setupErrorCorrectionParameters();
status_t initNativeWindow();
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5b133f3..fe42afa 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -27,7 +27,8 @@
// ----------------------------------------------------------------------------
// Maximum cumulated timeout milliseconds before restarting audioflinger thread
-#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP init time
+#define MAX_STARTUP_TIMEOUT_MS 3000 // Longer timeout period at startup to cope with A2DP
+ // init time
#define MAX_RUN_TIMEOUT_MS 1000
#define WAIT_PERIOD_MS 10
#define RESTORE_TIMEOUT_MS 5000 // Maximum waiting time for a track to be restored
@@ -100,7 +101,8 @@
uint8_t mName; // normal tracks: track name, fast tracks: track index
// used by client only
- uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
+ uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting
+ // audioflinger
uint16_t waitTimeMs; // Cumulated wait time, used by client only
private:
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 680604b..3317d57 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -152,7 +152,8 @@
mCblk->buffer = (uint8_t *)mCblk + bufOffset;
iEffect->asBinder()->linkToDeath(mIEffectClient);
- ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, mStatus, mEnabled);
+ ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId,
+ mStatus, mEnabled);
return mStatus;
}
@@ -266,9 +267,11 @@
uint32_t size = sizeof(int);
uint32_t psize = ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize;
- ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1);
+ ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data,
+ (param->psize == 8) ? *((int *)param->data + 1): -1);
- return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size, ¶m->status);
+ return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size,
+ ¶m->status);
}
status_t AudioEffect::setParameterDeferred(effect_param_t *param)
@@ -321,11 +324,14 @@
return BAD_VALUE;
}
- ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data, (param->psize == 8) ? *((int *)param->data + 1): -1);
+ ALOGV("getParameter: param: %d, param2: %d", *(int *)param->data,
+ (param->psize == 8) ? *((int *)param->data + 1): -1);
- uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize;
+ uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
+ param->vsize;
- return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param, &psize, param);
+ return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param,
+ &psize, param);
}
@@ -346,7 +352,8 @@
void AudioEffect::controlStatusChanged(bool controlGranted)
{
- ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf, mUserData);
+ ALOGV("controlStatusChanged %p control %d callback %p mUserData %p", this, controlGranted, mCbf,
+ mUserData);
if (controlGranted) {
if (mStatus == ALREADY_EXISTS) {
mStatus = NO_ERROR;
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 8ea6306..bdbee0d 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -127,7 +127,8 @@
int sessionId)
{
- ALOGV("set(): sampleRate %d, channelMask %#x, frameCount %d",sampleRate, channelMask, frameCount);
+ ALOGV("set(): sampleRate %d, channelMask %#x, frameCount %d",sampleRate, channelMask,
+ frameCount);
AutoMutex lock(mLock);
@@ -701,7 +702,8 @@
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
- ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up.");
+ ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
+ "Error obtaining an audio buffer, giving up.");
return false;
}
break;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 0e5c149..767c452 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -246,7 +246,8 @@
gLock.unlock();
}
- ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
+ ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output,
+ *samplingRate);
return NO_ERROR;
}
@@ -290,7 +291,8 @@
gLock.unlock();
}
- ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount);
+ ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output,
+ *frameCount);
return NO_ERROR;
}
@@ -369,7 +371,8 @@
return af->setVoiceVolume(value);
}
-status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream)
+status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ audio_stream_type_t stream)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
@@ -449,8 +452,10 @@
OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
gOutputs.add(ioHandle, outputDesc);
- ALOGV("ioConfigChanged() new output samplingRate %d, format %d channels %#x frameCount %d latency %d",
- outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency);
+ ALOGV("ioConfigChanged() new output samplingRate %d, format %d channels %#x frameCount %d "
+ "latency %d",
+ outputDesc->samplingRate, outputDesc->format, outputDesc->channels,
+ outputDesc->frameCount, outputDesc->latency);
} break;
case OUTPUT_CLOSED: {
if (gOutputs.indexOfKey(ioHandle) < 0) {
@@ -471,7 +476,8 @@
if (param2 == NULL) break;
desc = (const OutputDescriptor *)param2;
- ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %#x frameCount %d latency %d",
+ ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %#x "
+ "frameCount %d latency %d",
ioHandle, desc->samplingRate, desc->format,
desc->channels, desc->frameCount, desc->latency);
OutputDescriptor *outputDesc = gOutputs.valueAt(index);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 362d022..5fc9b07 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -198,7 +198,8 @@
int sessionId)
{
- ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
+ ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+ sharedBuffer->size());
ALOGV("set() streamType %d frameCount %d flags %04x", streamType, frameCount, flags);
@@ -617,12 +618,14 @@
if (loopStart >= loopEnd ||
loopEnd - loopStart > cblk->frameCount ||
cblk->server > loopStart) {
- ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, user %d", loopStart, loopEnd, loopCount, cblk->frameCount, cblk->user);
+ ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, "
+ "user %d", loopStart, loopEnd, loopCount, cblk->frameCount, cblk->user);
return BAD_VALUE;
}
if ((mSharedBuffer != 0) && (loopEnd > cblk->frameCount)) {
- ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, framecount %d",
+ ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, "
+ "framecount %d",
loopStart, loopEnd, cblk->frameCount);
return BAD_VALUE;
}
@@ -924,7 +927,8 @@
mCblk->stepUser(mCblk->frameCount);
}
- mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000));
+ mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
+ uint16_t(mVolume[LEFT] * 0x1000));
mCblk->setSendLevel(mSendLevel);
mAudioTrack->attachAuxEffect(mAuxEffectId);
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
@@ -994,8 +998,8 @@
// timing out when a loop has been set and we have already written upto loop end
// is a normal condition: no need to wake AudioFlinger up.
if (cblk->user < cblk->loopEnd) {
- ALOGW( "obtainBuffer timed out (is the CPU pegged?) %p name=%#x"
- "user=%08x, server=%08x", this, cblk->mName, cblk->user, cblk->server);
+ ALOGW("obtainBuffer timed out (is the CPU pegged?) %p name=%#x user=%08x, "
+ "server=%08x", this, cblk->mName, cblk->user, cblk->server);
//unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
cblk->lock.unlock();
result = mAudioTrack->start();
@@ -1265,7 +1269,8 @@
status_t err = obtainBuffer(&audioBuffer, waitCount);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
- ALOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up.");
+ ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
+ "Error obtaining an audio buffer, giving up.");
return false;
}
break;
@@ -1439,11 +1444,14 @@
String8 result;
result.append(" AudioTrack::dump\n");
- snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, mVolume[0], mVolume[1]);
+ snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
+ mVolume[0], mVolume[1]);
result.append(buffer);
- snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, mChannelCount, mCblk->frameCount);
+ snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat,
+ mChannelCount, mCblk->frameCount);
result.append(buffer);
- snprintf(buffer, 255, " sample rate(%d), status(%d), muted(%d)\n", (mCblk == 0) ? 0 : mCblk->sampleRate, mStatus, mMuted);
+ snprintf(buffer, 255, " sample rate(%d), status(%d), muted(%d)\n",
+ (mCblk == 0) ? 0 : mCblk->sampleRate, mStatus, mMuted);
result.append(buffer);
snprintf(buffer, 255, " active(%d), latency (%d)\n", mActive, mLatency);
result.append(buffer);
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index ce8ffc4..f412591 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -865,7 +865,8 @@
case REGISTER_CLIENT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(data.readStrongBinder());
+ sp<IAudioFlingerClient> client = interface_cast<IAudioFlingerClient>(
+ data.readStrongBinder());
registerClient(client);
return NO_ERROR;
} break;
@@ -1043,7 +1044,8 @@
int id;
int enabled;
- sp<IEffect> effect = createEffect(pid, &desc, client, priority, output, sessionId, &status, &id, &enabled);
+ sp<IEffect> effect = createEffect(pid, &desc, client, priority, output, sessionId,
+ &status, &id, &enabled);
reply->writeInt32(status);
reply->writeInt32(id);
reply->writeInt32(enabled);
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 4178b29..2d1e0f8 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -50,7 +50,8 @@
ALOGV("ioConfigChanged stream %d", stream);
data.writeInt32(stream);
} else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
- const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2;
+ const AudioSystem::OutputDescriptor *desc =
+ (const AudioSystem::OutputDescriptor *)param2;
data.writeInt32(desc->samplingRate);
data.writeInt32(desc->format);
data.writeInt32(desc->channels);
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 401437c..769deae 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -399,13 +399,15 @@
case SET_PHONE_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- reply->writeInt32(static_cast <uint32_t>(setPhoneState((audio_mode_t) data.readInt32())));
+ reply->writeInt32(static_cast <uint32_t>(setPhoneState(
+ (audio_mode_t) data.readInt32())));
return NO_ERROR;
} break;
case SET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(
+ data.readInt32());
audio_policy_forced_cfg_t config =
static_cast <audio_policy_forced_cfg_t>(data.readInt32());
reply->writeInt32(static_cast <uint32_t>(setForceUse(usage, config)));
@@ -414,7 +416,8 @@
case GET_FORCE_USE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(data.readInt32());
+ audio_policy_force_use_t usage = static_cast <audio_policy_force_use_t>(
+ data.readInt32());
reply->writeInt32(static_cast <uint32_t>(getForceUse(usage)));
return NO_ERROR;
} break;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 8196e10..5b4071b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -88,7 +88,8 @@
return status;
}
-status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate)
+status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
+ uint32_t rate)
{
if (rate > CAPTURE_RATE_MAX) {
return BAD_VALUE;
@@ -334,7 +335,8 @@
//-------------------------------------------------------------------------
-Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava)
+Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate,
+ bool bCanCallJava)
: Thread(bCanCallJava), mReceiver(receiver)
{
mSleepTimeUs = 1000000000 / captureRate;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 2b4220f..0ca027b 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1636,6 +1636,15 @@
return ret;
}
+static OMX_VIDEO_CONTROLRATETYPE getBitrateMode(const sp<AMessage> &msg) {
+ int32_t tmp;
+ if (!msg->findInt32("bitrate-mode", &tmp)) {
+ return OMX_Video_ControlRateVariable;
+ }
+
+ return static_cast<OMX_VIDEO_CONTROLRATETYPE>(tmp);
+}
+
status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) {
int32_t bitrate, iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
@@ -1643,6 +1652,8 @@
return INVALID_OPERATION;
}
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
int32_t tmp;
@@ -1706,7 +1717,7 @@
return err;
}
- err = configureBitrate(bitrate);
+ err = configureBitrate(bitrate, bitrateMode);
if (err != OK) {
return err;
@@ -1722,6 +1733,8 @@
return INVALID_OPERATION;
}
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
int32_t tmp;
@@ -1780,7 +1793,7 @@
return err;
}
- err = configureBitrate(bitrate);
+ err = configureBitrate(bitrate, bitrateMode);
if (err != OK) {
return err;
@@ -1796,6 +1809,8 @@
return INVALID_OPERATION;
}
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
int32_t tmp;
@@ -1881,7 +1896,7 @@
return err;
}
- return configureBitrate(bitrate);
+ return configureBitrate(bitrate, bitrateMode);
}
status_t ACodec::verifySupportForProfileAndLevel(
@@ -1910,7 +1925,8 @@
}
}
-status_t ACodec::configureBitrate(int32_t bitrate) {
+status_t ACodec::configureBitrate(
+ int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode) {
OMX_VIDEO_PARAM_BITRATETYPE bitrateType;
InitOMXParams(&bitrateType);
bitrateType.nPortIndex = kPortIndexOutput;
@@ -1923,7 +1939,7 @@
return err;
}
- bitrateType.eControlRate = OMX_Video_ControlRateVariable;
+ bitrateType.eControlRate = bitrateMode;
bitrateType.nTargetBitrate = bitrate;
return mOMX->setParameter(
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
index 0edafb9..dbe02b8 100644
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ b/media/libstagefright/include/FragmentedMP4Parser.h
@@ -263,7 +263,7 @@
void copyBuffer(
sp<ABuffer> *dst,
- size_t offset, uint64_t size, size_t extra = 0) const;
+ size_t offset, uint64_t size) const;
DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser);
};
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
index 7fe4e63..54c3d63 100644
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
@@ -1971,8 +1971,8 @@
}
void FragmentedMP4Parser::copyBuffer(
- sp<ABuffer> *dst, size_t offset, uint64_t size, size_t extra) const {
- sp<ABuffer> buf = new ABuffer(size + extra);
+ sp<ABuffer> *dst, size_t offset, uint64_t size) const {
+ sp<ABuffer> buf = new ABuffer(size);
memcpy(buf->data(), mBuffer->data() + offset, size);
*dst = buf;
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 93ae9a3..01a394f 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -33,6 +33,8 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
+#include <OMX_Video.h>
+
namespace android {
Converter::Converter(
@@ -152,6 +154,7 @@
mOutputFormat->setInt32("bitrate", audioBitrate);
} else {
mOutputFormat->setInt32("bitrate", videoBitrate);
+ mOutputFormat->setInt32("bitrate-mode", OMX_Video_ControlRateConstant);
mOutputFormat->setInt32("frame-rate", 30);
mOutputFormat->setInt32("i-frame-interval", 1); // Iframes every 1 secs
mOutputFormat->setInt32("prepend-sps-pps-to-idr-frames", 1);
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 58c4be4..4416b52 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -70,6 +70,10 @@
# 47.5 seconds at 44.1 kHz, 8 megabytes
# LOCAL_CFLAGS += -DTEE_SINK_FRAMES=0x200000
+# uncomment for dumpsys to write most recent audio input to .wav file
+# 47.5 seconds at 44.1 kHz, 8 megabytes
+# LOCAL_CFLAGS += -DTEE_SINK_INPUT_FRAMES=0x200000
+
# uncomment to enable the audio watchdog
# LOCAL_SRC_FILES += AudioWatchdog.cpp
# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 096a0f0..35bd431 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -417,6 +417,12 @@
audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
dev->dump(dev, fd);
}
+
+ // dump the serially shared record tee sink
+ if (mRecordTeeSource != 0) {
+ dumpTee(fd, mRecordTeeSource);
+ }
+
if (locked) mLock.unlock();
}
return NO_ERROR;
@@ -1112,7 +1118,8 @@
// removeClient_l() must be called with AudioFlinger::mLock held
void AudioFlinger::removeClient_l(pid_t pid)
{
- ALOGV("removeClient_l() pid %d, tid %d, calling tid %d", pid, gettid(), IPCThreadState::self()->getCallingPid());
+ ALOGV("removeClient_l() pid %d, tid %d, calling tid %d", pid, gettid(),
+ IPCThreadState::self()->getCallingPid());
mClients.removeItem(pid);
}
@@ -1215,7 +1222,8 @@
{
IoConfigEvent *ioEvent = new IoConfigEvent(event, param);
mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent));
- ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param);
+ ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event,
+ param);
mWaitWorkCV.signal();
}
@@ -1244,7 +1252,8 @@
PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio());
if (err != 0) {
- ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
+ ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
+ "error %d",
prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
}
} break;
@@ -1661,7 +1670,8 @@
snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
result.append(buffer);
- snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
+ snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
+ ns2ms(systemTime() - mLastWriteTime));
result.append(buffer);
snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites);
result.append(buffer);
@@ -1791,7 +1801,7 @@
if (mType == DIRECT) {
if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
- ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
+ ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x "
"for output %p with format %d",
sampleRate, format, channelMask, mOutput, mFormat);
lStatus = BAD_VALUE;
@@ -1959,7 +1969,8 @@
if (track->mainBuffer() != mMixBuffer) {
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
- ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(), track->sessionId());
+ ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
+ track->sessionId());
chain->incActiveTrackCnt();
}
}
@@ -2025,7 +2036,8 @@
AudioSystem::OutputDescriptor desc;
void *param2 = NULL;
- ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, param);
+ ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event,
+ param);
switch (event) {
case AudioSystem::OUTPUT_OPENED:
@@ -2033,7 +2045,8 @@
desc.channels = mChannelMask;
desc.samplingRate = mSampleRate;
desc.format = mFormat;
- desc.frameCount = mNormalFrameCount; // FIXME see AudioFlinger::frameCount(audio_io_handle_t)
+ desc.frameCount = mNormalFrameCount; // FIXME see
+ // AudioFlinger::frameCount(audio_io_handle_t)
desc.latency = latency();
param2 = &desc;
break;
@@ -2062,7 +2075,8 @@
// Calculate size of normal mix buffer relative to the HAL output buffer size
double multiplier = 1.0;
- if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) {
+ if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
+ kUseFastMixer == FastMixer_Dynamic)) {
size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000;
// round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
@@ -2081,9 +2095,10 @@
multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
}
} else {
- // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL SRC
- // (it would be unusual for the normal mix buffer size to not be a multiple of fast
- // track, but we sometimes have to do this to satisfy the maximum frame count constraint)
+ // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
+ // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast
+ // track, but we sometimes have to do this to satisfy the maximum frame count
+ // constraint)
// FIXME this rounding up should not be done if no HAL SRC
uint32_t truncMult = (uint32_t) multiplier;
if ((truncMult & 1)) {
@@ -2097,7 +2112,8 @@
mNormalFrameCount = multiplier * mFrameCount;
// round up to nearest 16 frames to satisfy AudioMixer
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
- ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, mNormalFrameCount);
+ ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
+ mNormalFrameCount);
delete[] mMixBuffer;
mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
@@ -2235,7 +2251,8 @@
return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
}
-void AudioFlinger::PlaybackThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
+void AudioFlinger::PlaybackThread::threadLoop_removeTracks(
+ const Vector< sp<Track> >& tracksToRemove)
{
size_t count = tracksToRemove.size();
if (CC_UNLIKELY(count)) {
@@ -2891,7 +2908,8 @@
} else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
memset (mMixBuffer, 0, mixBufferSize);
sleepTime = 0;
- ALOGV_IF((mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED)), "anticipated start");
+ ALOGV_IF((mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED)),
+ "anticipated start");
}
// TODO add standby time extension fct of effect tail
}
@@ -3125,7 +3143,8 @@
if ((track->framesReady() >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
- ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this);
+ ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
+ this);
mixedTracks++;
@@ -3138,7 +3157,8 @@
if (chain != 0) {
tracksWithEffect++;
} else {
- ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on session %d",
+ ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on "
+ "session %d",
name, track->sessionId());
}
}
@@ -3268,7 +3288,8 @@
chain->clearInputBuffer();
}
- ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user, cblk->server, this);
+ ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user,
+ cblk->server, this);
if ((track->sharedBuffer() != 0) || track->isTerminated() ||
track->isStopped() || track->isPaused()) {
// We have consumed all the buffers of this track.
@@ -3362,7 +3383,8 @@
if (track->mainBuffer() != mMixBuffer) {
chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
- ALOGV("stopping track on chain %p for session Id: %d", chain.get(), track->sessionId());
+ ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
+ track->sessionId());
chain->decActiveTrackCnt();
}
}
@@ -3375,7 +3397,8 @@
// mix buffer must be cleared if all tracks are connected to an
// effect chain as in this case the mixer will not write to
// mix buffer and track effects will accumulate into it
- if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || (mixedTracks == 0 && fastTracks > 0)) {
+ if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+ (mixedTracks == 0 && fastTracks > 0)) {
// FIXME as a performance optimization, should remember previous zero status
memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
}
@@ -3580,39 +3603,18 @@
return reconfig;
}
-void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- PlaybackThread::dumpInternals(fd, args);
-
- snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
- result.append(buffer);
- write(fd, result.string(), result.size());
-
- // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
- FastMixerDumpState copy = mFastMixerDumpState;
- copy.dump(fd);
-
-#ifdef STATE_QUEUE_DUMP
- // Similar for state queue
- StateQueueObserverDump observerCopy = mStateQueueObserverDump;
- observerCopy.dump(fd);
- StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
- mutatorCopy.dump(fd);
-#endif
-
- // Write the tee output to a .wav file
- NBAIO_Source *teeSource = mTeeSource.get();
+ NBAIO_Source *teeSource = source.get();
if (teeSource != NULL) {
- char teePath[64];
+ char teeTime[16];
struct timeval tv;
gettimeofday(&tv, NULL);
struct tm tm;
localtime_r(&tv.tv_sec, &tm);
- strftime(teePath, sizeof(teePath), "/data/misc/media/%T.wav", &tm);
+ strftime(teeTime, sizeof(teeTime), "%T", &tm);
+ char teePath[64];
+ sprintf(teePath, "/data/misc/media/%s_%d.wav", teeTime, id);
int teeFd = open(teePath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
if (teeFd >= 0) {
char wavHeader[44];
@@ -3660,6 +3662,34 @@
fdprintf(fd, "FastMixer unable to create tee %s: \n", strerror(errno));
}
}
+}
+
+void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ PlaybackThread::dumpInternals(fd, args);
+
+ snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames());
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+
+ // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+ FastMixerDumpState copy = mFastMixerDumpState;
+ copy.dump(fd);
+
+#ifdef STATE_QUEUE_DUMP
+ // Similar for state queue
+ StateQueueObserverDump observerCopy = mStateQueueObserverDump;
+ observerCopy.dump(fd);
+ StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
+ mutatorCopy.dump(fd);
+#endif
+
+ // Write the tee output to a .wav file
+ dumpTee(fd, mTeeSource, mId);
#ifdef AUDIO_WATCHDOG
if (mAudioWatchdog != 0) {
@@ -3982,7 +4012,8 @@
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
- : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(), DUPLICATING),
+ : MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
+ DUPLICATING),
mWaitTimeMs(UINT_MAX)
{
addOutputTrack(mainThread);
@@ -4103,18 +4134,21 @@
}
-bool AudioFlinger::DuplicatingThread::outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks)
+bool AudioFlinger::DuplicatingThread::outputsReady(
+ const SortedVector< sp<OutputTrack> > &outputTracks)
{
for (size_t i = 0; i < outputTracks.size(); i++) {
sp<ThreadBase> thread = outputTracks[i]->thread().promote();
if (thread == 0) {
- ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", outputTracks[i].get());
+ ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p",
+ outputTracks[i].get());
return false;
}
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
// see note at standby() declaration
if (playbackThread->standby() && !playbackThread->isSuspended()) {
- ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(), thread.get());
+ ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(),
+ thread.get());
return false;
}
}
@@ -4161,7 +4195,8 @@
// mChannelCount
// mChannelMask
{
- ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
+ ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+ sharedBuffer->size());
// ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
size_t size = sizeof(audio_track_cblk_t);
@@ -4322,7 +4357,8 @@
const sp<IMemory>& sharedBuffer,
int sessionId,
IAudioFlinger::track_flags_t flags)
- : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId),
+ : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
+ sessionId),
mMute(false),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
@@ -4341,7 +4377,8 @@
if (mCblk != NULL) {
// NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
// 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
- mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) : sizeof(uint8_t);
+ mCblk->frameSize = audio_is_linear_pcm(format) ? mChannelCount * sizeof(int16_t) :
+ sizeof(uint8_t);
// to avoid leaking a track name, do not allocate one unless there is an mCblk
mName = thread->getTrackName_l(channelMask, sessionId);
mCblk->mName = mName;
@@ -4366,7 +4403,8 @@
thread->mFastTrackAvailMask &= ~(1 << i);
}
}
- ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
+ ALOGV("Track constructor name %d, calling pid %d", mName,
+ IPCThreadState::self()->getCallingPid());
}
AudioFlinger::PlaybackThread::Track::~Track()
@@ -4408,8 +4446,8 @@
/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
- result.append(" Name Client Type Fmt Chn mask Session mFrCnt fCount S M F SRate L dB R dB "
- " Server User Main buf Aux Buf Flags Underruns\n");
+ result.append(" Name Client Type Fmt Chn mask Session mFrCnt fCount S M F SRate "
+ "L dB R dB Server User Main buf Aux Buf Flags Underruns\n");
}
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
@@ -4636,7 +4674,8 @@
// and then to STOPPED and reset() when presentation is complete
mState = STOPPING_1;
}
- ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, playbackThread);
+ ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
+ playbackThread);
}
if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
thread->mLock.unlock();
@@ -5395,7 +5434,8 @@
}
// AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesAvail;
@@ -5587,7 +5627,8 @@
mOutBuffer.frameCount = pInBuffer->frameCount;
nsecs_t startTime = systemTime();
if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
- ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this, mThread.unsafe_get());
+ ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
+ mThread.unsafe_get());
outputBufferFull = true;
break;
}
@@ -5599,7 +5640,8 @@
}
}
- uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : pInBuffer->frameCount;
+ uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
+ pInBuffer->frameCount;
memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
mCblk->stepUser(outFrames);
pInBuffer->frameCount -= outFrames;
@@ -5612,7 +5654,8 @@
mBufferQueue.removeAt(0);
delete [] pInBuffer->mBuffer;
delete pInBuffer;
- ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size());
+ ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
+ mThread.unsafe_get(), mBufferQueue.size());
} else {
break;
}
@@ -5628,11 +5671,14 @@
pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
pInBuffer->frameCount = inBuffer.frameCount;
pInBuffer->i16 = pInBuffer->mBuffer;
- memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * sizeof(int16_t));
+ memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
+ sizeof(int16_t));
mBufferQueue.add(pInBuffer);
- ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, mThread.unsafe_get(), mBufferQueue.size());
+ ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
+ mThread.unsafe_get(), mBufferQueue.size());
} else {
- ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", mThread.unsafe_get(), this);
+ ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
+ mThread.unsafe_get(), this);
}
}
}
@@ -5657,7 +5703,8 @@
return outputBufferFull;
}
-status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
+status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
+ AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
int active;
status_t result;
@@ -5921,13 +5968,14 @@
*sessionId = lSessionId;
}
}
- // create new record track. The record track uses one track in mHardwareMixerThread by convention.
+ // create new record track.
+ // The record track uses one track in mHardwareMixerThread by convention.
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, flags, tid, &lStatus);
}
if (lStatus != NO_ERROR) {
- // remove local strong reference to Client before deleting the RecordTrack so that the Client
- // destructor is called by the TrackBase destructor with mLock held
+ // remove local strong reference to Client before deleting the RecordTrack so that the
+ // Client destructor is called by the TrackBase destructor with mLock held
client.clear();
recordTrack.clear();
goto Exit;
@@ -5946,7 +5994,8 @@
// ----------------------------------------------------------------------------
-AudioFlinger::RecordHandle::RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
+AudioFlinger::RecordHandle::RecordHandle(
+ const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
: BnAudioRecord(),
mRecordTrack(recordTrack)
{
@@ -5961,7 +6010,8 @@
return mRecordTrack->getCblk();
}
-status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) {
+status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
+ int triggerSession) {
ALOGV("RecordHandle::start()");
return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
}
@@ -5988,18 +6038,21 @@
uint32_t sampleRate,
audio_channel_mask_t channelMask,
audio_io_handle_t id,
- audio_devices_t device) :
+ audio_devices_t device,
+ const sp<NBAIO_Sink>& teeSink) :
ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
// mRsmpInIndex and mInputBytes set by readInputParameters()
mReqChannelCount(popcount(channelMask)),
- mReqSampleRate(sampleRate)
+ mReqSampleRate(sampleRate),
// mBytesRead is only meaningful while active, and so is cleared in start()
// (but might be better to also clear here for dump?)
+ mTeeSink(teeSink)
{
snprintf(mName, kNameLength, "AudioIn_%X", id);
readInputParameters();
+
}
@@ -6106,7 +6159,8 @@
size_t framesIn = mFrameCount - mRsmpInIndex;
if (framesIn) {
int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
- int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * mActiveTrack->mCblk->frameSize;
+ int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
+ mActiveTrack->mCblk->frameSize;
if (framesIn > framesOut)
framesIn = framesOut;
mRsmpInIndex += framesIn;
@@ -6125,14 +6179,17 @@
}
}
if (framesOut && mFrameCount == mRsmpInIndex) {
+ void *readInto;
if (framesOut == mFrameCount &&
- ((int)mChannelCount == mReqChannelCount || mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
- mBytesRead = mInput->stream->read(mInput->stream, buffer.raw, mInputBytes);
+ ((int)mChannelCount == mReqChannelCount ||
+ mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
+ readInto = buffer.raw;
framesOut = 0;
} else {
- mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
+ readInto = mRsmpInBuffer;
mRsmpInIndex = 0;
}
+ mBytesRead = mInput->stream->read(mInput->stream, readInto, mInputBytes);
if (mBytesRead <= 0) {
if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
{
@@ -6145,6 +6202,9 @@
mRsmpInIndex = mFrameCount;
framesOut = 0;
buffer.frameCount = 0;
+ } else if (mTeeSink != 0) {
+ (void) mTeeSink->write(readInto,
+ mBytesRead >> Format_frameBitShift(mTeeSink->format()));
}
}
}
@@ -6156,12 +6216,14 @@
if (mChannelCount == 1 && mReqChannelCount == 1) {
framesOut >>= 1;
}
- mResampler->resample(mRsmpOutBuffer, framesOut, this /* AudioBufferProvider* */);
- // ditherAndClamp() works as long as all buffers returned by mActiveTrack->getNextBuffer()
- // are 32 bit aligned which should be always true.
+ mResampler->resample(mRsmpOutBuffer, framesOut,
+ this /* AudioBufferProvider* */);
+ // ditherAndClamp() works as long as all buffers returned by
+ // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
if (mChannelCount == 2 && mReqChannelCount == 1) {
ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
- // the resampler always outputs stereo samples: do post stereo to mono conversion
+ // the resampler always outputs stereo samples:
+ // do post stereo to mono conversion
downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
framesOut);
} else {
@@ -6635,7 +6697,8 @@
status = BAD_VALUE;
} else {
mInDevice = value;
- // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
+ // disable AEC and NS if the device is a BT SCO headset supporting those
+ // pre processings
if (mTracks.size() > 0) {
bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
mAudioFlinger->btNrecIsOff();
@@ -6657,7 +6720,8 @@
mAudioSource = (audio_source_t)value;
}
if (status == NO_ERROR) {
- status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
+ status = mInput->stream->common.set_parameters(&mInput->stream->common,
+ keyValuePair.string());
if (status == INVALID_OPERATION) {
inputStandBy();
status = mInput->stream->common.set_parameters(&mInput->stream->common,
@@ -6667,8 +6731,10 @@
if (status == BAD_VALUE &&
reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
- ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
- popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
+ ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common)
+ <= (2 * reqSamplingRate)) &&
+ popcount(mInput->stream->common.get_channels(&mInput->stream->common))
+ <= FCC_2 &&
(reqChannelCount <= FCC_2)) {
status = NO_ERROR;
}
@@ -6762,7 +6828,8 @@
mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
mRsmpOutBuffer = new int32_t[mFrameCount * 2];
- // optmization: if mono to mono, alter input frame count as if we were inputing stereo samples
+ // optmization: if mono to mono, alter input frame count as if we were inputing
+ // stereo samples
if (mChannelCount == 1 && mReqChannelCount == 1) {
mFrameCount >>= 1;
}
@@ -6989,7 +7056,8 @@
&outStream);
mHardwareStatus = AUDIO_HW_IDLE;
- ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
+ ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, "
+ "Channels %x, status %d",
outStream,
config.sample_rate,
config.format,
@@ -7042,7 +7110,8 @@
MixerThread *thread2 = checkMixerThread_l(output2);
if (thread1 == NULL || thread2 == NULL) {
- ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2);
+ ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1,
+ output2);
return 0;
}
@@ -7077,7 +7146,8 @@
if (thread->type() == ThreadBase::MIXER) {
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
if (mPlaybackThreads.valueAt(i)->type() == ThreadBase::DUPLICATING) {
- DuplicatingThread *dupThread = (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
+ DuplicatingThread *dupThread =
+ (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
dupThread->removeOutputTrack((MixerThread *)thread.get());
}
}
@@ -7164,16 +7234,17 @@
status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
&inStream);
- ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, status %d",
+ ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, "
+ "status %d",
inStream,
config.sample_rate,
config.format,
config.channel_mask,
status);
- // If the input could not be opened with the requested parameters and we can handle the conversion internally,
- // try to open again with the proposed parameters. The AudioFlinger can resample the input and do mono to stereo
- // or stereo to mono conversions on 16 bit PCM inputs.
+ // If the input could not be opened with the requested parameters and we can handle the
+ // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
+ // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
if (status == BAD_VALUE &&
reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
(config.sample_rate <= 2 * reqSamplingRate) &&
@@ -7184,18 +7255,66 @@
}
if (status == NO_ERROR && inStream != NULL) {
+
+ // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
+ // or (re-)create if current Pipe is idle and does not match the new format
+ sp<NBAIO_Sink> teeSink;
+#ifdef TEE_SINK_INPUT_FRAMES
+ enum {
+ TEE_SINK_NO, // don't copy input
+ TEE_SINK_NEW, // copy input using a new pipe
+ TEE_SINK_OLD, // copy input using an existing pipe
+ } kind;
+ NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common),
+ popcount(inStream->common.get_channels(&inStream->common)));
+ if (format == Format_Invalid) {
+ kind = TEE_SINK_NO;
+ } else if (mRecordTeeSink == 0) {
+ kind = TEE_SINK_NEW;
+ } else if (mRecordTeeSink->getStrongCount() != 1) {
+ kind = TEE_SINK_NO;
+ } else if (format == mRecordTeeSink->format()) {
+ kind = TEE_SINK_OLD;
+ } else {
+ kind = TEE_SINK_NEW;
+ }
+ switch (kind) {
+ case TEE_SINK_NEW: {
+ Pipe *pipe = new Pipe(TEE_SINK_INPUT_FRAMES, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mRecordTeeSink = pipe;
+ mRecordTeeSource = pipeReader;
+ teeSink = pipe;
+ }
+ break;
+ case TEE_SINK_OLD:
+ teeSink = mRecordTeeSink;
+ break;
+ case TEE_SINK_NO:
+ default:
+ break;
+ }
+#endif
AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
// Start record thread
// RecorThread require both input and output device indication to forward to audio
// pre processing modules
audio_devices_t device = (*pDevices) | primaryOutputDevice_l();
+
thread = new RecordThread(this,
input,
reqSamplingRate,
reqChannels,
id,
- device);
+ device, teeSink);
mRecordThreads.add(id, thread);
ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
@@ -8003,7 +8122,8 @@
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(), buffer);
+ ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
+ buffer);
track->setMainBuffer(buffer);
chain->incTrackCnt();
}
@@ -8845,12 +8965,15 @@
result.append("\t\tDescriptor:\n");
snprintf(buffer, SIZE, "\t\t- UUID: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
mDescriptor.uuid.timeLow, mDescriptor.uuid.timeMid, mDescriptor.uuid.timeHiAndVersion,
- mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1],mDescriptor.uuid.node[2],
+ mDescriptor.uuid.clockSeq, mDescriptor.uuid.node[0], mDescriptor.uuid.node[1],
+ mDescriptor.uuid.node[2],
mDescriptor.uuid.node[3],mDescriptor.uuid.node[4],mDescriptor.uuid.node[5]);
result.append(buffer);
snprintf(buffer, SIZE, "\t\t- TYPE: %08X-%04X-%04X-%04X-%02X%02X%02X%02X%02X%02X\n",
- mDescriptor.type.timeLow, mDescriptor.type.timeMid, mDescriptor.type.timeHiAndVersion,
- mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1],mDescriptor.type.node[2],
+ mDescriptor.type.timeLow, mDescriptor.type.timeMid,
+ mDescriptor.type.timeHiAndVersion,
+ mDescriptor.type.clockSeq, mDescriptor.type.node[0], mDescriptor.type.node[1],
+ mDescriptor.type.node[2],
mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]);
result.append(buffer);
snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n",
@@ -8934,7 +9057,8 @@
mBuffer = (uint8_t *)mCblk + bufOffset;
}
} else {
- ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t));
+ ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
+ sizeof(effect_param_cblk_t));
return;
}
}
@@ -9061,8 +9185,9 @@
// handle commands that are not forwarded transparently to effect engine
if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) {
- // No need to trylock() here as this function is executed in the binder thread serving a particular client process:
- // no risk to block the whole media server process or mixer threads is we are stuck here
+ // No need to trylock() here as this function is executed in the binder thread serving a
+ // particular client process: no risk to block the whole media server process or mixer
+ // threads if we are stuck here
Mutex::Autolock _l(mCblk->lock);
if (mCblk->clientIndex > EFFECT_PARAM_BUFFER_SIZE ||
mCblk->serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
@@ -9202,7 +9327,8 @@
}
// getEffectFromDesc_l() must be called with ThreadBase::mLock held
-sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(effect_descriptor_t *descriptor)
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(
+ effect_descriptor_t *descriptor)
{
size_t size = mEffects.size();
@@ -9361,7 +9487,8 @@
// check invalid effect chaining combinations
if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
- ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s", desc.name, d.name);
+ ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s",
+ desc.name, d.name);
return INVALID_OPERATION;
}
// remember position of first insert effect and by default
@@ -9412,7 +9539,8 @@
}
mEffects.insertAt(effect, idx_insert);
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this, idx_insert);
+ ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this,
+ idx_insert);
}
effect->configure();
return NO_ERROR;
@@ -9443,7 +9571,8 @@
}
}
mEffects.removeAt(i);
- ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(), this, i);
+ ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(),
+ this, i);
break;
}
}
@@ -9667,7 +9796,8 @@
for (size_t i = 0; i < types.size(); i++) {
setEffectSuspended_l(types[i], false);
}
- ALOGV("setEffectSuspendedAll_l() remove entry for %08x", mSuspendedEffects.keyAt(index));
+ ALOGV("setEffectSuspendedAll_l() remove entry for %08x",
+ mSuspendedEffects.keyAt(index));
mSuspendedEffects.removeItem((int)kKeyForSuspendAll);
}
}
@@ -9693,7 +9823,8 @@
return true;
}
-void AudioFlinger::EffectChain::getSuspendEligibleEffects(Vector< sp<AudioFlinger::EffectModule> > &effects)
+void AudioFlinger::EffectChain::getSuspendEligibleEffects(
+ Vector< sp<AudioFlinger::EffectModule> > &effects)
{
effects.clear();
for (size_t i = 0; i < mEffects.size(); i++) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 8c55f7c..2251b45 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -269,12 +269,14 @@
virtual ~AudioFlinger();
// call in any IAudioFlinger method that accesses mPrimaryHardwareDev
- status_t initCheck() const { return mPrimaryHardwareDev == NULL ? NO_INIT : NO_ERROR; }
+ status_t initCheck() const { return mPrimaryHardwareDev == NULL ?
+ NO_INIT : NO_ERROR; }
// RefBase
virtual void onFirstRef();
- AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module, audio_devices_t devices);
+ AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module,
+ audio_devices_t devices);
void purgeStaleEffects_l();
// standby delay for MIXER and DUPLICATING playback threads is read from property
@@ -746,7 +748,8 @@
const sp<PMDeathRecipient> mDeathRecipient;
// list of suspended effects per session and per type. The first vector is
// keyed by session ID, the second by type UUID timeLow field
- KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > mSuspendedSessions;
+ KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >
+ mSuspendedSessions;
};
struct stream_type_t {
@@ -788,7 +791,8 @@
static void appendDumpHeader(String8& result);
void dump(char* buffer, size_t size);
- virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
virtual void stop();
void pause();
@@ -823,7 +827,8 @@
Track& operator = (const Track&);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts = kInvalidPTS);
// releaseBuffer() not overridden
virtual size_t framesReady() const;
@@ -877,8 +882,8 @@
int32_t *mAuxBuffer;
int mAuxEffectId;
bool mHasVolumeController;
- size_t mPresentationCompleteFrames; // number of frames written to the audio HAL
- // when this track will be fully rendered
+ size_t mPresentationCompleteFrames; // number of frames written to the
+ // audio HAL when this track will be fully rendered
private:
IAudioFlinger::track_flags_t mFlags;
@@ -1000,7 +1005,8 @@
int frameCount);
virtual ~OutputTrack();
- virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
virtual void stop();
bool write(int16_t* data, uint32_t frames);
@@ -1014,7 +1020,8 @@
NO_MORE_BUFFERS = 0x80000001, // same in AudioTrack.h, ok to be different value
};
- status_t obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs);
+ status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
+ uint32_t waitTimeMs);
void clearBufferQueue();
// Maximum number of pending buffers allocated by OutputTrack::write()
@@ -1186,7 +1193,8 @@
void dumpTracks(int fd, const Vector<String16>& args);
SortedVector< sp<Track> > mTracks;
- // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by DuplicatingThread
+ // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by
+ // DuplicatingThread
stream_type_t mStreamTypes[AUDIO_STREAM_CNT + 1];
AudioStreamOut *mOutput;
@@ -1454,7 +1462,8 @@
// clear the buffer overflow flag
void clearOverflow() { mOverflow = false; }
// set the buffer overflow flag and return previous value
- bool setOverflow() { bool tmp = mOverflow; mOverflow = true; return tmp; }
+ bool setOverflow() { bool tmp = mOverflow; mOverflow = true;
+ return tmp; }
static void appendDumpHeader(String8& result);
void dump(char* buffer, size_t size);
@@ -1466,7 +1475,8 @@
RecordTrack& operator = (const RecordTrack&);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts = kInvalidPTS);
// releaseBuffer() not overridden
bool mOverflow; // overflow on most recent attempt to fill client buffer
@@ -1477,7 +1487,8 @@
uint32_t sampleRate,
audio_channel_mask_t channelMask,
audio_io_handle_t id,
- audio_devices_t device);
+ audio_devices_t device,
+ const sp<NBAIO_Sink>& teeSink);
virtual ~RecordThread();
// no addTrack_l ?
@@ -1573,6 +1584,9 @@
// when < 0, maximum frames to drop before starting capture even if sync event is
// not received
ssize_t mFramestoDrop;
+
+ // For dumpsys
+ const sp<NBAIO_Sink> mTeeSink;
};
// server side of the client's IAudioRecord
@@ -1782,7 +1796,8 @@
sp<IEffectClient> mEffectClient; // callback interface for client notifications
/*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect()
sp<IMemory> mCblkMemory; // shared memory for control block
- effect_param_cblk_t* mCblk; // control block for deferred parameter setting via shared memory
+ effect_param_cblk_t* mCblk; // control block for deferred parameter setting via
+ // shared memory
uint8_t* mBuffer; // pointer to parameter area in shared memory
int mPriority; // client application priority to control the effect
bool mHasControl; // true if this handle is controlling the effect
@@ -1795,10 +1810,10 @@
// the EffectChain class represents a group of effects associated to one audio session.
// There can be any number of EffectChain objects per output mixer thread (PlaybackThread).
// The EffecChain with session ID 0 contains global effects applied to the output mix.
- // Effects in this chain can be insert or auxiliary. Effects in other chains (attached to tracks)
- // are insert only. The EffectChain maintains an ordered list of effect module, the order corresponding
- // in the effect process order. When attached to a track (session ID != 0), it also provide it's own
- // input buffer used by the track as accumulation buffer.
+ // Effects in this chain can be insert or auxiliary. Effects in other chains (attached to
+ // tracks) are insert only. The EffectChain maintains an ordered list of effect module, the
+ // order corresponding in the effect process order. When attached to a track (session ID != 0),
+ // it also provide it's own input buffer used by the track as accumulation buffer.
class EffectChain : public RefBase {
public:
EffectChain(const wp<ThreadBase>& wThread, int sessionId);
@@ -2065,6 +2080,13 @@
// for use from destructor
status_t closeOutput_nonvirtual(audio_io_handle_t output);
status_t closeInput_nonvirtual(audio_io_handle_t input);
+
+ // all record threads serially share a common tee sink, which is re-created on format change
+ sp<NBAIO_Sink> mRecordTeeSink;
+ sp<NBAIO_Source> mRecordTeeSource;
+
+public:
+ static void dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_handle_t id = 0);
};
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index a4ed445..b3ca877 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -765,7 +765,8 @@
}
-void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount,
+ int32_t* temp, int32_t* aux)
{
t->resampler->setSampleRate(t->sampleRate);
@@ -798,11 +799,13 @@
}
}
-void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp,
+ int32_t* aux)
{
}
-void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
int32_t vl = t->prevVolume[0];
int32_t vr = t->prevVolume[1];
@@ -844,7 +847,8 @@
t->adjustVolumeRamp(aux != NULL);
}
-void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t vl = t->volume[0];
const int16_t vr = t->volume[1];
@@ -872,7 +876,8 @@
}
}
-void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t *in = static_cast<const int16_t *>(t->in);
@@ -962,7 +967,8 @@
t->in = in;
}
-void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
{
const int16_t *in = static_cast<int16_t const *>(t->in);
@@ -1147,7 +1153,8 @@
while (outFrames) {
size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
if (inFrames) {
- t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
+ t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames,
+ state->resampleTemp, aux);
t.frameCount -= inFrames;
outFrames -= inFrames;
if (CC_UNLIKELY(aux != NULL)) {
@@ -1156,7 +1163,8 @@
}
if (t.frameCount == 0 && outFrames) {
t.bufferProvider->releaseBuffer(&t.buffer);
- t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
+ t.buffer.frameCount = (state->frameCount - numFrames) -
+ (BLOCKSIZE - outFrames);
int64_t outputPTS = calculateOutputPTS(
t, pts, numFrames + (BLOCKSIZE - outFrames));
t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
@@ -1246,7 +1254,8 @@
if (CC_UNLIKELY(aux != NULL)) {
aux += outFrames;
}
- t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
+ t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount,
+ state->resampleTemp, aux);
outFrames += t.buffer.frameCount;
t.bufferProvider->releaseBuffer(&t.buffer);
}
@@ -1286,7 +1295,8 @@
// been enabled for mixing.
if (in == NULL || ((unsigned long)in & 3)) {
memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t));
- ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: buffer %p track %d, channels %d, needs %08x",
+ ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: "
+ "buffer %p track %d, channels %d, needs %08x",
in, i, t.channelCount, t.needs);
return;
}
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index e60a298..fd21fda 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -146,7 +146,8 @@
struct track_t;
class DownmixerBufferProvider;
- typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+ typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
+ int32_t* aux);
static const int BLOCKSIZE = 16; // 4 cache lines
struct track_t {
@@ -261,12 +262,17 @@
static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
- static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
- static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+ static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
+ static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
static void process__validate(state_t* state, int64_t pts);
static void process__nop(state_t* state, int64_t pts);
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 8b99bd2..ea130ba 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -227,7 +227,8 @@
}
ALOGV("getOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask, flags);
+ return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask,
+ flags);
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -280,7 +281,7 @@
Mutex::Autolock _l(mLock);
// the audio_in_acoustics_t parameter is ignored by get_input()
audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
- format, channelMask, (audio_in_acoustics_t) 0);
+ format, channelMask, (audio_in_acoustics_t) 0);
if (input == 0) {
return input;
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 63f9549..92653c1 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -142,11 +142,11 @@
status_t dumpInternals(int fd);
// Thread used for tone playback and to send audio config commands to audio flinger
- // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because startTone()
- // and stopTone() are normally called with mLock locked and requesting a tone start or stop will cause
- // calls to AudioPolicyService and an attempt to lock mLock.
- // For audio config commands, it is necessary because audio flinger requires that the calling process (user)
- // has permission to modify audio settings.
+ // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
+ // startTone() and stopTone() are normally called with mLock locked and requesting a tone start
+ // or stop will cause calls to AudioPolicyService and an attempt to lock mLock.
+ // For audio config commands, it is necessary because audio flinger requires that the calling
+ // process (user) has permission to modify audio settings.
class AudioCommandThread : public Thread {
class AudioCommand;
public:
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
index a8e23e4..151313b 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/services/audioflinger/test-resample.cpp
@@ -61,7 +61,8 @@
};
static int usage(const char* name) {
- fprintf(stderr,"Usage: %s [-p] [-h] [-q <dq|lq|mq|hq|vhq>] [-i <input-sample-rate>] [-o <output-sample-rate>] <input-file> <output-file>\n", name);
+ fprintf(stderr,"Usage: %s [-p] [-h] [-q <dq|lq|mq|hq|vhq>] [-i <input-sample-rate>] "
+ "[-o <output-sample-rate>] <input-file> <output-file>\n", name);
fprintf(stderr,"-p - enable profiling\n");
fprintf(stderr,"-h - create wav file\n");
fprintf(stderr,"-q - resampler quality\n");