Merge "Camera3: Provide consumer usage flags to HAL for each stream"
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
index 188bd8e..3cec1f4 100644
--- a/camera/camera2/ICameraDeviceCallbacks.cpp
+++ b/camera/camera2/ICameraDeviceCallbacks.cpp
@@ -57,11 +57,12 @@
data.writeNoException();
}
- void onResultReceived(int32_t frameId, const CameraMetadata& result) {
+ void onResultReceived(int32_t requestId, const CameraMetadata& result) {
ALOGV("onResultReceived");
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- data.writeInt32(frameId);
+ data.writeInt32(requestId);
+ data.writeInt32(1); // to mark presence of metadata object
result.writeToParcel(&data);
remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
data.writeNoException();
@@ -91,10 +92,14 @@
case RESULT_RECEIVED: {
ALOGV("RESULT_RECEIVED");
CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- int32_t frameId = data.readInt32();
+ int32_t requestId = data.readInt32();
CameraMetadata result;
- result.readFromParcel(const_cast<Parcel*>(&data));
- onResultReceived(frameId, result);
+ if (data.readInt32() != 0) {
+ result.readFromParcel(const_cast<Parcel*>(&data));
+ } else {
+ ALOGW("No metadata object is present in result");
+ }
+ onResultReceived(requestId, result);
data.readExceptionCode();
return NO_ERROR;
break;
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 3e79ee0..28fc00f 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -44,8 +44,9 @@
// Command-line parameters.
static bool gVerbose = false; // chatty on stdout
static bool gRotate = false; // rotate 90 degrees
-static uint32_t gVideoWidth = 1280; // 720p
-static uint32_t gVideoHeight = 720;
+static bool gSizeSpecified = false; // was size explicitly requested?
+static uint32_t gVideoWidth = 0; // default width+height
+static uint32_t gVideoHeight = 0;
static uint32_t gBitRate = 4000000; // 4Mbps
// Set by signal handler to stop recording.
@@ -107,6 +108,14 @@
}
/*
+ * Returns "true" if the device is rotated 90 degrees.
+ */
+static bool isDeviceRotated(int orientation) {
+ return orientation != DISPLAY_ORIENTATION_0 &&
+ orientation != DISPLAY_ORIENTATION_180;
+}
+
+/*
* Configures and starts the MediaCodec encoder. Obtains an input surface
* from the codec.
*/
@@ -114,6 +123,11 @@
sp<IGraphicBufferProducer>* pBufferProducer) {
status_t err;
+ if (gVerbose) {
+ printf("Configuring recorder for %dx%d video at %.2fMbps\n",
+ gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
+ }
+
sp<AMessage> format = new AMessage;
format->setInt32("width", gVideoWidth);
format->setInt32("height", gVideoHeight);
@@ -152,6 +166,7 @@
return err;
}
+ ALOGV("Codec prepared");
*pCodec = codec;
*pBufferProducer = bufferProducer;
return 0;
@@ -169,8 +184,7 @@
// Set the region of the layer stack we're interested in, which in our
// case is "all of it". If the app is rotated (so that the width of the
// app is based on the height of the display), reverse width/height.
- bool deviceRotated = mainDpyInfo.orientation != DISPLAY_ORIENTATION_0 &&
- mainDpyInfo.orientation != DISPLAY_ORIENTATION_180;
+ bool deviceRotated = isDeviceRotated(mainDpyInfo.orientation);
uint32_t sourceWidth, sourceHeight;
if (!deviceRotated) {
sourceWidth = mainDpyInfo.w;
@@ -295,6 +309,12 @@
bufIndex, size, ptsUsec);
CHECK(trackIdx != -1);
+ // If the virtual display isn't providing us with timestamps,
+ // use the current time.
+ if (ptsUsec == 0) {
+ ptsUsec = systemTime(SYSTEM_TIME_MONOTONIC) / 1000;
+ }
+
// The MediaMuxer docs are unclear, but it appears that we
// need to pass either the full set of BufferInfo flags, or
// (flags & BUFFER_FLAG_SYNCFRAME).
@@ -370,11 +390,6 @@
static status_t recordScreen(const char* fileName) {
status_t err;
- if (gVerbose) {
- printf("Recording %dx%d video at %.2fMbps\n",
- gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
- }
-
// Configure signal handler.
err = configureSignals();
if (err != NO_ERROR) return err;
@@ -399,11 +414,31 @@
mainDpyInfo.orientation);
}
+ bool rotated = isDeviceRotated(mainDpyInfo.orientation);
+ if (gVideoWidth == 0) {
+ gVideoWidth = rotated ? mainDpyInfo.h : mainDpyInfo.w;
+ }
+ if (gVideoHeight == 0) {
+ gVideoHeight = rotated ? mainDpyInfo.w : mainDpyInfo.h;
+ }
+
// Configure and start the encoder.
sp<MediaCodec> encoder;
sp<IGraphicBufferProducer> bufferProducer;
err = prepareEncoder(mainDpyInfo.fps, &encoder, &bufferProducer);
- if (err != NO_ERROR) return err;
+ if (err != NO_ERROR && !gSizeSpecified) {
+ ALOGV("Retrying with 720p");
+ if (gVideoWidth != 1280 && gVideoHeight != 720) {
+ fprintf(stderr, "WARNING: failed at %dx%d, retrying at 720p\n",
+ gVideoWidth, gVideoHeight);
+ gVideoWidth = 1280;
+ gVideoHeight = 720;
+ err = prepareEncoder(mainDpyInfo.fps, &encoder, &bufferProducer);
+ }
+ }
+ if (err != NO_ERROR) {
+ return err;
+ }
// Configure virtual display.
sp<IBinder> dpy;
@@ -478,6 +513,8 @@
fprintf(stderr,
"Usage: screenrecord [options] <filename>\n"
"\n"
+ "Records the device's display to a .mp4 file.\n"
+ "\n"
"Options:\n"
"--size WIDTHxHEIGHT\n"
" Set the video size, e.g. \"1280x720\". For best results, use\n"
@@ -485,8 +522,7 @@
"--bit-rate RATE\n"
" Set the video bit rate, in megabits per second. Default 4Mbps.\n"
"--rotate\n"
- " Rotate the output 90 degrees. Useful for filling the frame\n"
- " when in portrait mode.\n"
+ " Rotate the output 90 degrees.\n"
"--verbose\n"
" Display interesting information on stdout.\n"
"--help\n"
@@ -536,6 +572,7 @@
gVideoWidth, gVideoHeight);
return 2;
}
+ gSizeSpecified = true;
break;
case 'b':
gBitRate = atoi(optarg);
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 7aa3c24..d245414 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -35,8 +35,6 @@
{
public:
- static const int DEFAULT_SAMPLE_RATE = 8000;
-
/* Events used by AudioRecord callback function (callback_t).
* Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
*/
@@ -131,7 +129,7 @@
* sampleRate: Data sink sampling rate in Hz.
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
- * channelMask: Channel mask.
+ * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
* application's contribution to the
* latency of the track. The actual size selected by the AudioRecord could
@@ -148,9 +146,9 @@
*/
AudioRecord(audio_source_t inputSource,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
callback_t cbf = NULL,
void* user = NULL,
@@ -178,10 +176,10 @@
*
* threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
*/
- status_t set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+ status_t set(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
callback_t cbf = NULL,
void* user = NULL,
@@ -476,6 +474,7 @@
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
+ bool mAwaitBoost; // thread should wait for priority boost before running
// The proxy should only be referenced while a lock is held because the proxy isn't
// multi-thread safe.
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 523bd32..aa2dd4e 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -170,9 +170,9 @@
*/
AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t,
int frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
@@ -194,10 +194,10 @@
*/
AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- const sp<IMemory>& sharedBuffer = 0,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const sp<IMemory>& sharedBuffer,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
void* user = NULL,
@@ -227,10 +227,10 @@
*
* threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
*/
- status_t set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
- uint32_t sampleRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
+ status_t set(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
int frameCount = 0,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
callback_t cbf = NULL,
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index 2e5fd73..940fc55 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -30,6 +30,7 @@
const sp<ALooper> looper, const sp<AHandler> &handler);
void unregisterHandler(ALooper::handler_id handlerID);
+ void unregisterStaleHandlers();
status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0);
void deliverMessage(const sp<AMessage> &msg);
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 6d778dd..1379379 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -415,6 +415,13 @@
virtual void framesReadyIsCalledByMultipleThreads() { }
bool setStreamEndDone(); // and return previous value
+
+ // Add to the tally of underrun frames, and inform client of underrun
+ virtual void tallyUnderrunFrames(uint32_t frameCount);
+
+ // Return the total number of frames which AudioFlinger desired but were unavailable,
+ // and thus which resulted in an underrun.
+ virtual uint32_t getUnderrunFrames() const { return mCblk->u.mStreaming.mUnderrunFrames; }
};
class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
@@ -429,6 +436,8 @@
virtual void framesReadyIsCalledByMultipleThreads();
virtual status_t obtainBuffer(Buffer* buffer);
virtual void releaseBuffer(Buffer* buffer);
+ virtual void tallyUnderrunFrames(uint32_t frameCount);
+ virtual uint32_t getUnderrunFrames() const { return 0; }
private:
ssize_t pollPosition(); // poll for state queue update, and return current position
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 0e7e17f..d88c1ed 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -60,10 +60,9 @@
// We double the size of input buffer for ping pong use of record buffer.
size <<= 1;
- if (audio_is_linear_pcm(format)) {
- uint32_t channelCount = popcount(channelMask);
- size /= channelCount * audio_bytes_per_sample(format);
- }
+ // Assumes audio_is_linear_pcm(format)
+ uint32_t channelCount = popcount(channelMask);
+ size /= channelCount * audio_bytes_per_sample(format);
*frameCount = size;
return NO_ERROR;
@@ -176,7 +175,8 @@
}
if (sampleRate == 0) {
- sampleRate = DEFAULT_SAMPLE_RATE;
+ ALOGE("Invalid sample rate %u", sampleRate);
+ return BAD_VALUE;
}
mSampleRate = sampleRate;
@@ -205,11 +205,8 @@
uint32_t channelCount = popcount(channelMask);
mChannelCount = channelCount;
- if (audio_is_linear_pcm(format)) {
- mFrameSize = channelCount * audio_bytes_per_sample(format);
- } else {
- mFrameSize = sizeof(uint8_t);
- }
+ // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
if (sessionId == 0 ) {
mSessionId = AudioSystem::newAudioSessionId();
@@ -665,6 +662,26 @@
nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
{
mLock.lock();
+ if (mAwaitBoost) {
+ mAwaitBoost = false;
+ mLock.unlock();
+ static const int32_t kMaxTries = 5;
+ int32_t tryCounter = kMaxTries;
+ uint32_t pollUs = 10000;
+ do {
+ int policy = sched_getscheduler(0);
+ if (policy == SCHED_FIFO || policy == SCHED_RR) {
+ break;
+ }
+ usleep(pollUs);
+ pollUs <<= 1;
+ } while (tryCounter-- > 0);
+ if (tryCounter < 0) {
+ ALOGE("did not receive expected priority boost on time");
+ }
+ // Run again immediately
+ return 0;
+ }
// Can only reference mCblk while locked
int32_t flags = android_atomic_and(~CBLK_OVERRUN, &mCblk->mFlags);
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 3b7616f..e7abb40 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -661,6 +661,14 @@
return old;
}
+void AudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
+{
+ mCblk->u.mStreaming.mUnderrunFrames += frameCount;
+
+ // FIXME also wake futex so that underrun is noticed more quickly
+ (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+}
+
// ---------------------------------------------------------------------------
StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
@@ -817,6 +825,17 @@
buffer->mNonContig = 0;
}
+void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
+{
+ // Unlike AudioTrackServerProxy::tallyUnderrunFrames() used for streaming tracks,
+ // we don't have a location to count underrun frames. The underrun frame counter
+ // only exists in AudioTrackSharedStreaming. Fortunately, underruns are not
+ // possible for static buffer tracks other than at end of buffer, so this is not a loss.
+
+ // FIXME also wake futex so that underrun is noticed more quickly
+ (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+}
+
// ---------------------------------------------------------------------------
} // namespace android
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index befd4cc..b082c3a 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -65,10 +65,8 @@
// reference once the ctor ends, as that would cause the refcount of 'this'
// dropping to 0 at the end of the ctor. Since all we need is a wp<...>
// that's what we create.
- wp<BufferQueue::ConsumerListener> listener;
- sp<BufferQueue::ConsumerListener> proxy;
- listener = static_cast<BufferQueue::ConsumerListener*>(this);
- proxy = new BufferQueue::ProxyConsumerListener(listener);
+ wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
+ sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
status_t err = mBufferQueue->consumerConnect(proxy, false);
if (err != NO_ERROR) {
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index 22777a2..ebf9d8d 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -72,6 +72,10 @@
ALooper::~ALooper() {
stop();
+
+ // Since this looper is "dead" (or as good as dead by now),
+ // have ALooperRoster unregister any handlers still registered for it.
+ gLooperRoster.unregisterStaleHandlers();
}
void ALooper::setName(const char *name) {
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index ad10d2b..0c181ff 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -71,6 +71,20 @@
mHandlers.removeItemsAt(index);
}
+void ALooperRoster::unregisterStaleHandlers() {
+ Mutex::Autolock autoLock(mLock);
+
+ for (size_t i = mHandlers.size(); i-- > 0;) {
+ const HandlerInfo &info = mHandlers.valueAt(i);
+
+ sp<ALooper> looper = info.mLooper.promote();
+ if (looper == NULL) {
+ ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
+ mHandlers.removeItemsAt(i);
+ }
+ }
+}
+
status_t ALooperRoster::postMessage(
const sp<AMessage> &msg, int64_t delayUs) {
Mutex::Autolock autoLock(mLock);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index d6fd95b..325ffcf 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -69,11 +69,8 @@
// reference once the ctor ends, as that would cause the refcount of 'this'
// dropping to 0 at the end of the ctor. Since all we need is a wp<...>
// that's what we create.
- wp<BufferQueue::ConsumerListener> listener;
- listener = static_cast<BufferQueue::ConsumerListener*>(this);
-
- sp<BufferQueue::ConsumerListener> proxy;
- proxy = new BufferQueue::ProxyConsumerListener(listener);
+ wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
+ sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
mInitCheck = mBufferQueue->consumerConnect(proxy, false);
if (mInitCheck != NO_ERROR) {
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 906aef3..5116550 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -571,6 +571,9 @@
if (sawCR && c == '\n') {
line->erase(line->size() - 1, 1);
return true;
+ } else if (c == '\n') {
+ // some reponse line ended with '\n', instead of '\r\n'.
+ return true;
}
line->append(&c, 1);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1ae51ca..00e8a57 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1744,7 +1744,7 @@
AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
// Start record thread
- // RecorThread require both input and output device indication to forward to audio
+ // RecordThread requires both input and output device indication to forward to audio
// pre processing modules
thread = new RecordThread(this,
input,
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 29dc5b6..33e64ce 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -56,6 +56,14 @@
// set the PTS of the next buffer output by the resampler
virtual void setPTS(int64_t pts);
+ // Resample int16_t samples from provider and accumulate into 'out'.
+ // A mono provider delivers a sequence of samples.
+ // A stereo provider delivers a sequence of interleaved pairs of samples.
+ // Multi-channel providers are not supported.
+ // In either case, 'out' holds interleaved pairs of fixed-point signed Q19.12.
+ // That is, for a mono provider, there is an implicit up-channeling.
+ // Since this method accumulates, the caller is responsible for clearing 'out' initially.
+ // FIXME assumes provider is always successful; it should return the actual frame count.
virtual void resample(int32_t* out, size_t outFrameCount,
AudioBufferProvider* provider) = 0;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 628f5af..5600411c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -140,7 +140,6 @@
// but the slot is only used if track is active
FastTrackUnderruns mObservedUnderruns; // Most recently observed value of
// mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
- uint32_t mUnderrunCount; // Counter of total number of underruns, never reset
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e6a9d65c..f0c27c3 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2739,8 +2739,10 @@
track->mObservedUnderruns = underruns;
// don't count underruns that occur while stopping or pausing
// or stopped which can occur when flush() is called while active
- if (!(track->isStopping() || track->isPausing() || track->isStopped())) {
- track->mUnderrunCount += recentUnderruns;
+ if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
+ recentUnderruns > 0) {
+ // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
+ track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
}
// This is similar to the state machine for normal tracks,
@@ -3056,12 +3058,8 @@
mixerStatus = MIXER_TRACKS_READY;
}
} else {
- // only implemented for normal tracks, not fast tracks
if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
- // we missed desiredFrames whatever the actual number of frames missing was
- cblk->u.mStreaming.mUnderrunFrames += desiredFrames;
- // FIXME also wake futex so that underrun is noticed more quickly
- (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
+ track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
}
// clear effect chain input buffer if an active track underruns to avoid sending
// previous audio buffer again to effects
@@ -3086,7 +3084,6 @@
tracksToRemove->add(track);
}
} else {
- track->mUnderrunCount++;
// No buffers for this track. Give it a few chances to
// fill a buffer, then remove it from active list.
if (--(track->mRetryCount) <= 0) {
@@ -4337,7 +4334,8 @@
} else {
// resampling
- memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t));
+ // resampler accumulates, but we only have one source track
+ memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
// alter output frame count as if we were expecting stereo samples
if (mChannelCount == 1 && mReqChannelCount == 1) {
framesOut >>= 1;
@@ -4347,6 +4345,7 @@
// ditherAndClamp() works as long as all buffers returned by
// mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
if (mChannelCount == 2 && mReqChannelCount == 1) {
+ // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
// the resampler always outputs stereo samples:
// do post stereo to mono conversion
@@ -4355,6 +4354,7 @@
} else {
ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
}
+ // now done with mRsmpOutBuffer
}
if (mFramestoDrop == 0) {
@@ -4908,9 +4908,9 @@
void AudioFlinger::RecordThread::readInputParameters()
{
- delete mRsmpInBuffer;
+ delete[] mRsmpInBuffer;
// mRsmpInBuffer is always assigned a new[] below
- delete mRsmpOutBuffer;
+ delete[] mRsmpOutBuffer;
mRsmpOutBuffer = NULL;
delete mResampler;
mResampler = NULL;
@@ -4940,7 +4940,7 @@
mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
mResampler->setSampleRate(mSampleRate);
mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
- mRsmpOutBuffer = new int32_t[mFrameCount * 2];
+ mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2];
// optmization: if mono to mono, alter input frame count as if we were inputing
// stereo samples
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7be6043..aa04fd4 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -898,8 +898,9 @@
// updated by RecordThread::readInputParameters()
AudioResampler *mResampler;
+ // interleaved stereo pairs of fixed-point signed Q19.12
int32_t *mRsmpOutBuffer;
- int16_t *mRsmpInBuffer;
+ int16_t *mRsmpInBuffer; // [mFrameCount * mChannelCount]
size_t mRsmpInIndex;
size_t mBufferSize; // stream buffer size for read()
const uint32_t mReqChannelCount;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1f75468..e676365 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -316,7 +316,6 @@
mPresentationCompleteFrames(0),
mFlags(flags),
mFastIndex(-1),
- mUnderrunCount(0),
mCachedVolume(1.0),
mIsInvalid(false),
mAudioTrackServerProxy(NULL),
@@ -389,7 +388,7 @@
/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
result.append(" Name Client Type Fmt Chn mask Session fCount S F SRate "
- "L dB R dB Server Main buf Aux Buf Flags Underruns\n");
+ "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n");
}
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
@@ -470,7 +469,7 @@
(int)mMainBuffer,
(int)mAuxBuffer,
mCblk->mFlags,
- mUnderrunCount,
+ mAudioTrackServerProxy->getUnderrunFrames(),
nowInUnderrun);
}
@@ -489,10 +488,7 @@
buffer->frameCount = buf.mFrameCount;
buffer->raw = buf.mRaw;
if (buf.mFrameCount == 0) {
- // only implemented so far for normal tracks, not fast tracks
- mCblk->u.mStreaming.mUnderrunFrames += desiredFrames;
- // FIXME also wake futex so that underrun is noticed more quickly
- (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+ mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
}
return status;
}
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 10bc6ea..e7b440a 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -143,7 +143,7 @@
__FUNCTION__, device->getId());
return BAD_VALUE;
}
- int32_t frameId = entry.data.i32[0];
+ int32_t requestId = entry.data.i32[0];
List<sp<FilteredListener> > listeners;
{
@@ -151,8 +151,8 @@
List<RangeListener>::iterator item = mRangeListeners.begin();
while (item != mRangeListeners.end()) {
- if (frameId >= item->minId &&
- frameId < item->maxId) {
+ if (requestId >= item->minId &&
+ requestId < item->maxId) {
sp<FilteredListener> listener = item->listener.promote();
if (listener == 0) {
item = mRangeListeners.erase(item);
@@ -167,7 +167,7 @@
ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
List<sp<FilteredListener> >::iterator item = listeners.begin();
for (; item != listeners.end(); item++) {
- (*item)->onFrameAvailable(frameId, frame);
+ (*item)->onFrameAvailable(requestId, frame);
}
return OK;
}