Merge "Fix resampler to allow output of single frame"
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 6d59ea7..bcbbc04 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -25,6 +25,8 @@
namespace android {
+class String8;
+
class NBLog {
public:
@@ -187,6 +189,10 @@
const Shared* const mShared; // raw pointer to shared memory
const sp<IMemory> mIMemory; // ref-counted version
int32_t mFront; // index of oldest acknowledged Entry
+ int mFd; // file descriptor
+ int mIndent; // indentation level
+
+ void dumpLine(const String8& timestamp, String8& body);
static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps
};
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 69cfbd0..dd0a106 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -172,7 +172,7 @@
const sp<IGraphicBufferProducer>& surface,
bool storeMetaDataInVideoBuffers);
- virtual void startCameraRecording();
+ virtual status_t startCameraRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& frame);
// Returns true if need to skip the current frame.
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index fc4b2a5..e0acae6 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -60,16 +60,12 @@
LOCAL_SRC_FILES += ../libnbaio/roundup.c
-# for <cutils/atomic-inline.h>
-LOCAL_CFLAGS += -DANDROID_SMP=$(if $(findstring true,$(TARGET_CPU_SMP)),1,0)
-LOCAL_SRC_FILES += SingleStateQueue.cpp
-LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
-# Consider a separate a library for SingleStateQueueInstantiations.
-
LOCAL_SHARED_LIBRARIES := \
libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
libcamera_client libstagefright_foundation \
- libgui libdl libaudioutils
+ libgui libdl libaudioutils libnbaio
+
+LOCAL_STATIC_LIBRARIES += libinstantssq
LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
@@ -84,3 +80,15 @@
$(call include-path-for, audio-utils)
include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+# for <cutils/atomic-inline.h>
+LOCAL_CFLAGS += -DANDROID_SMP=$(if $(findstring true,$(TARGET_CPU_SMP)),1,0)
+LOCAL_SRC_FILES += SingleStateQueue.cpp
+LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
+
+LOCAL_MODULE := libinstantssq
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 6ca499b..700718d 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -462,7 +462,9 @@
audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
mChannelMask, mSessionId);
if (input == 0) {
- ALOGE("Could not get audio input for record source %d", mInputSource);
+ ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
+ "channel mask %#x, session %d",
+ mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
return BAD_VALUE;
}
{
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 7b15e68..e696323 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -988,7 +988,7 @@
&latency,
flags,
hasOffloadInfo ? &offloadInfo : NULL);
- ALOGV("OPEN_OUTPUT output, %p", output);
+ ALOGV("OPEN_OUTPUT output, %d", output);
reply->writeInt32((int32_t) output);
reply->writeInt32(devices);
reply->writeInt32(samplingRate);
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 69c75b8..9707c4a 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -31,9 +31,8 @@
libcommon_time_client \
libcutils \
libutils \
- liblog \
- libmedia
-# This dependency on libmedia is for SingleStateQueueInstantiations.
-# Consider a separate a library for SingleStateQueueInstantiations.
+ liblog
+
+LOCAL_STATIC_LIBRARIES += libinstantssq
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index 190824d..96738a7 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -26,6 +26,7 @@
#include <cutils/atomic.h>
#include <media/nbaio/NBLog.h>
#include <utils/Log.h>
+#include <utils/String8.h>
namespace android {
@@ -337,25 +338,25 @@
}
i -= length + 3;
}
- if (i > 0) {
- lost += i;
- if (fd >= 0) {
- fdprintf(fd, "%*swarning: lost %zu bytes worth of events\n", indent, "", lost);
- } else {
- ALOGI("%*swarning: lost %u bytes worth of events\n", indent, "", lost);
- }
+ mFd = fd;
+ mIndent = indent;
+ String8 timestamp, body;
+ lost += i;
+ if (lost > 0) {
+ body.appendFormat("warning: lost %u bytes worth of events", lost);
+ // TODO timestamp empty here, only other choice to wait for the first timestamp event in the
+ // log to push it out. Consider keeping the timestamp/body between calls to readAt().
+ dumpLine(timestamp, body);
}
size_t width = 1;
while (maxSec >= 10) {
++width;
maxSec /= 10;
}
- char prefix[32];
if (maxSec >= 0) {
- snprintf(prefix, sizeof(prefix), "[%*s] ", width + 4, "");
- } else {
- prefix[0] = '\0';
+ timestamp.appendFormat("[%*s]", width + 4, "");
}
+ bool deferredTimestamp = false;
while (i < avail) {
event = (Event) copy[i];
length = copy[i + 1];
@@ -363,11 +364,8 @@
size_t advance = length + 3;
switch (event) {
case EVENT_STRING:
- if (fd >= 0) {
- fdprintf(fd, "%*s%s%.*s\n", indent, "", prefix, length, (const char *) data);
- } else {
- ALOGI("%*s%s%.*s", indent, "", prefix, length, (const char *) data);
- } break;
+ body.appendFormat("%.*s", length, (const char *) data);
+ break;
case EVENT_TIMESTAMP: {
// already checked that length == sizeof(struct timespec);
memcpy(&ts, data, sizeof(struct timespec));
@@ -400,45 +398,53 @@
prevNsec = tsNext.tv_nsec;
}
size_t n = (j - i) / (sizeof(struct timespec) + 3);
+ if (deferredTimestamp) {
+ dumpLine(timestamp, body);
+ deferredTimestamp = false;
+ }
+ timestamp.clear();
if (n >= kSquashTimestamp) {
- if (fd >= 0) {
- fdprintf(fd, "%*s[%d.%03d to .%.03d by .%.03d to .%.03d]\n", indent, "",
- (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
- (int) ((ts.tv_nsec + deltaTotal) / 1000000),
- (int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
- } else {
- ALOGI("%*s[%d.%03d to .%.03d by .%.03d to .%.03d]\n", indent, "",
- (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
- (int) ((ts.tv_nsec + deltaTotal) / 1000000),
- (int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
- }
+ timestamp.appendFormat("[%d.%03d to .%.03d by .%.03d to .%.03d]",
+ (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
+ (int) ((ts.tv_nsec + deltaTotal) / 1000000),
+ (int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
i = j;
advance = 0;
break;
}
- if (fd >= 0) {
- fdprintf(fd, "%*s[%d.%03d]\n", indent, "", (int) ts.tv_sec,
- (int) (ts.tv_nsec / 1000000));
- } else {
- ALOGI("%*s[%d.%03d]", indent, "", (int) ts.tv_sec,
- (int) (ts.tv_nsec / 1000000));
- }
+ timestamp.appendFormat("[%d.%03d]", (int) ts.tv_sec,
+ (int) (ts.tv_nsec / 1000000));
+ deferredTimestamp = true;
} break;
case EVENT_RESERVED:
default:
- if (fd >= 0) {
- fdprintf(fd, "%*s%swarning: unknown event %d\n", indent, "", prefix, event);
- } else {
- ALOGI("%*s%swarning: unknown event %d", indent, "", prefix, event);
- }
+ body.appendFormat("warning: unknown event %d", event);
break;
}
i += advance;
+
+ if (!body.isEmpty()) {
+ dumpLine(timestamp, body);
+ deferredTimestamp = false;
+ }
+ }
+ if (deferredTimestamp) {
+ dumpLine(timestamp, body);
}
// FIXME it would be more efficient to put a char mCopy[256] as a member variable of the dumper
delete[] copy;
}
+void NBLog::Reader::dumpLine(const String8& timestamp, String8& body)
+{
+ if (mFd >= 0) {
+ fdprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
+ } else {
+ ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
+ }
+ body.clear();
+}
+
bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
{
return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index f3ff792..b31e9e8 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -586,14 +586,15 @@
}
}
-void CameraSource::startCameraRecording() {
+status_t CameraSource::startCameraRecording() {
ALOGV("startCameraRecording");
// Reset the identity to the current thread because media server owns the
// camera and recording is started by the applications. The applications
// will connect to the camera in ICameraRecordingProxy::startRecording.
int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t err;
if (mNumInputBuffers > 0) {
- status_t err = mCamera->sendCommand(
+ err = mCamera->sendCommand(
CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
// This could happen for CameraHAL1 clients; thus the failure is
@@ -604,17 +605,25 @@
}
}
+ err = OK;
if (mCameraFlags & FLAGS_HOT_CAMERA) {
mCamera->unlock();
mCamera.clear();
- CHECK_EQ((status_t)OK,
- mCameraRecordingProxy->startRecording(new ProxyListener(this)));
+ if ((err = mCameraRecordingProxy->startRecording(
+ new ProxyListener(this))) != OK) {
+ ALOGE("Failed to start recording, received error: %s (%d)",
+ strerror(-err), err);
+ }
} else {
mCamera->setListener(new CameraSourceListener(this));
mCamera->startRecording();
- CHECK(mCamera->recordingEnabled());
+ if (!mCamera->recordingEnabled()) {
+ err = -EINVAL;
+ ALOGE("Failed to start recording");
+ }
}
IPCThreadState::self()->restoreCallingIdentity(token);
+ return err;
}
status_t CameraSource::start(MetaData *meta) {
@@ -646,10 +655,12 @@
}
}
- startCameraRecording();
+ status_t err;
+ if ((err = startCameraRecording()) == OK) {
+ mStarted = true;
+ }
- mStarted = true;
- return OK;
+ return err;
}
void CameraSource::stopCameraRecording() {
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 4756b3e..f80772a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -488,12 +488,12 @@
break;
}
uint32_t chunk_type = ntohl(hdr[1]);
- if (chunk_type == FOURCC('s', 'i', 'd', 'x')) {
- // parse the sidx box too
- continue;
- } else if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
// store the offset of the first segment
mMoofOffset = offset;
+ } else if (chunk_type != FOURCC('m', 'd', 'a', 't')) {
+ // keep parsing until we get to the data
+ continue;
}
break;
}
@@ -1921,9 +1921,10 @@
ALOGW("sub-sidx boxes not supported yet");
}
bool sap = d3 & 0x80000000;
- bool saptype = d3 >> 28;
- if (!sap || saptype > 2) {
- ALOGW("not a stream access point, or unsupported type");
+ uint32_t saptype = (d3 >> 28) & 7;
+ if (!sap || (saptype != 1 && saptype != 2)) {
+ // type 1 and 2 are sync samples
+ ALOGW("not a stream access point, or unsupported type: %08x", d3);
}
total_duration += d2;
offset += 12;
@@ -2899,9 +2900,20 @@
}
}
if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
- // *offset points to the mdat box following this moof
- parseChunk(offset); // doesn't actually parse it, just updates offset
- mNextMoofOffset = *offset;
+ // *offset points to the box following this moof. Find the next moof from there.
+
+ while (true) {
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ return ERROR_END_OF_STREAM;
+ }
+ chunk_size = ntohl(hdr[0]);
+ chunk_type = ntohl(hdr[1]);
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ mNextMoofOffset = *offset;
+ break;
+ }
+ *offset += chunk_size;
+ }
}
break;
}
@@ -3706,7 +3718,7 @@
const SidxEntry *se = &mSegments[i];
if (totalTime + se->mDurationUs > seekTimeUs) {
// The requested time is somewhere in this segment
- if ((mode == ReadOptions::SEEK_NEXT_SYNC) ||
+ if ((mode == ReadOptions::SEEK_NEXT_SYNC && seekTimeUs > totalTime) ||
(mode == ReadOptions::SEEK_CLOSEST_SYNC &&
(seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
// requested next sync, or closest sync and it was closer to the end of
@@ -3719,11 +3731,19 @@
totalTime += se->mDurationUs;
totalOffset += se->mSize;
}
- mCurrentMoofOffset = totalOffset;
- mCurrentSamples.clear();
- mCurrentSampleIndex = 0;
- parseChunk(&totalOffset);
- mCurrentTime = totalTime * mTimescale / 1000000ll;
+ mCurrentMoofOffset = totalOffset;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ parseChunk(&totalOffset);
+ mCurrentTime = totalTime * mTimescale / 1000000ll;
+ } else {
+ // without sidx boxes, we can only seek to 0
+ mCurrentMoofOffset = mFirstMoofOffset;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ off64_t tmp = mCurrentMoofOffset;
+ parseChunk(&tmp);
+ mCurrentTime = 0;
}
if (mBuffer != NULL) {
@@ -3743,16 +3763,18 @@
newBuffer = true;
if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- // move to next fragment
- Sample lastSample = mCurrentSamples[mCurrentSamples.size() - 1];
- off64_t nextMoof = mNextMoofOffset; // lastSample.offset + lastSample.size;
+ // move to next fragment if there is one
+ if (mNextMoofOffset <= mCurrentMoofOffset) {
+ return ERROR_END_OF_STREAM;
+ }
+ off64_t nextMoof = mNextMoofOffset;
mCurrentMoofOffset = nextMoof;
mCurrentSamples.clear();
mCurrentSampleIndex = 0;
parseChunk(&nextMoof);
- if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- return ERROR_END_OF_STREAM;
- }
+ if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+ return ERROR_END_OF_STREAM;
+ }
}
const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 7672204..7144efd 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -574,7 +574,7 @@
if (originalTimeUs < mPrevOriginalTimeUs) {
// Drop the frame if it's going backward in time. Bad timestamp
// could disrupt encoder's rate control completely.
- ALOGV("Dropping frame that's going backward in time");
+ ALOGW("Dropping frame that's going backward in time");
return -1;
}
int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
@@ -593,6 +593,12 @@
status_t GraphicBufferSource::submitBuffer_l(
const BufferQueue::BufferItem &item, int cbi) {
ALOGV("submitBuffer_l cbi=%d", cbi);
+
+ int64_t timeUs = getTimestamp(item);
+ if (timeUs < 0ll) {
+ return UNKNOWN_ERROR;
+ }
+
CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
codecBuffer.mGraphicBuffer = mBufferSlot[item.mBuf];
codecBuffer.mBuf = item.mBuf;
@@ -606,12 +612,6 @@
memcpy(data, &type, 4);
memcpy(data + 4, &handle, sizeof(buffer_handle_t));
- int64_t timeUs = getTimestamp(item);
- if (timeUs < 0ll) {
- ALOGE("Dropping frame with bad timestamp");
- return UNKNOWN_ERROR;
- }
-
status_t err = mNodeInstance->emptyDirectBuffer(header, 0,
4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME,
timeUs);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b74fa89..788559d 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1847,8 +1847,6 @@
// pre processing modules
RecordThread *thread = new RecordThread(this,
input,
- reqSamplingRate,
- reqChannelMask,
id,
primaryOutputDevice_l(),
*pDevices
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index fc3171f..3ec9889 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -47,6 +47,9 @@
static void appendDumpHeader(String8& result);
void dump(char* buffer, size_t size, bool active);
+ void handleSyncStartEvent(const sp<SyncEvent>& event);
+ void clearSyncStartEvent();
+
private:
friend class AudioFlinger; // for mState
@@ -59,4 +62,33 @@
// releaseBuffer() not overridden
bool mOverflow; // overflow on most recent attempt to fill client buffer
+
+ // updated by RecordThread::readInputParameters_l()
+ AudioResampler *mResampler;
+
+ // interleaved stereo pairs of fixed-point signed Q19.12
+ int32_t *mRsmpOutBuffer;
+ // current allocated frame count for the above, which may be larger than needed
+ size_t mRsmpOutFrameCount;
+
+ size_t mRsmpInUnrel; // unreleased frames remaining from
+ // most recent getNextBuffer
+ // for debug only
+
+ // rolling counter that is never cleared
+ int32_t mRsmpInFront; // next available frame
+
+ AudioBufferProvider::Buffer mSink; // references client's buffer sink in shared memory
+
+ // sync event triggering actual audio capture. Frames read before this event will
+ // be dropped and therefore not read by the application.
+ sp<SyncEvent> mSyncStartEvent;
+
+ // number of captured frames to drop after the start sync event has been received.
+ // when < 0, maximum frames to drop before starting capture even if sync event is
+ // not received
+ ssize_t mFramesToDrop;
+
+ // used by resampler to find source frames
+ ResamplerBufferProvider *mResamplerBufferProvider;
};
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b064e89..33c1178 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -274,7 +274,8 @@
mType(type),
mAudioFlinger(audioFlinger),
// mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
- // are set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
+ // are set by PlaybackThread::readOutputParameters_l() or
+ // RecordThread::readInputParameters_l()
mParamStatus(NO_ERROR),
//FIXME: mStandby should be true here. Is this some kind of hack?
mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
@@ -1108,7 +1109,7 @@
}
}
- readOutputParameters();
+ readOutputParameters_l();
// mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
// There is no AUDIO_STREAM_MIN, and ++ operator does not compile
@@ -1677,7 +1678,7 @@
return 0;
}
-void AudioFlinger::PlaybackThread::readOutputParameters()
+void AudioFlinger::PlaybackThread::readOutputParameters_l()
{
// unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
@@ -1765,7 +1766,7 @@
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
- // Note that mLock is not held when readOutputParameters() is called from the constructor
+ // Note that mLock is not held when readOutputParameters_l() is called from the constructor
// but in this case nothing is done below as no audio sessions have effect yet so it doesn't
// matter.
// create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
@@ -3485,7 +3486,7 @@
keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
- readOutputParameters();
+ readOutputParameters_l();
delete mAudioMixer;
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
for (size_t i = 0; i < mTracks.size() ; i++) {
@@ -3827,7 +3828,7 @@
keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
- readOutputParameters();
+ readOutputParameters_l();
sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
}
}
@@ -4450,8 +4451,6 @@
AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamIn *input,
- uint32_t sampleRate,
- audio_channel_mask_t channelMask,
audio_io_handle_t id,
audio_devices_t outDevice,
audio_devices_t inDevice
@@ -4460,14 +4459,9 @@
#endif
) :
ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
- mInput(input), mActiveTracksGen(0), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
- // mRsmpInFrames, mRsmpInFramesP2, mRsmpInUnrel, mRsmpInFront, and mRsmpInRear
- // are set by readInputParameters()
- // mRsmpInIndex LEGACY
- mReqChannelCount(popcount(channelMask)),
- mReqSampleRate(sampleRate)
- // mBytesRead is only meaningful while active, and so is cleared in start()
- // (but might be better to also clear here for dump?)
+ mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
+ // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
+ mRsmpInRear(0)
#ifdef TEE_SINK
, mTeeSink(teeSink)
#endif
@@ -4475,7 +4469,7 @@
snprintf(mName, kNameLength, "AudioIn_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
- readInputParameters();
+ readInputParameters_l();
}
@@ -4483,8 +4477,6 @@
{
mAudioFlinger->unregisterWriter(mNBLogWriter);
delete[] mRsmpInBuffer;
- delete mResampler;
- delete[] mRsmpOutBuffer;
}
void AudioFlinger::RecordThread::onFirstRef()
@@ -4498,12 +4490,6 @@
inputStandBy();
- // used to verify we've read at least once before evaluating how many bytes were read
- bool readOnce = false;
-
- // used to request a deferred sleep, to be executed later while mutex is unlocked
- bool doSleep = false;
-
reacquire_wakelock:
sp<RecordTrack> activeTrack;
int activeTracksGen;
@@ -4527,17 +4513,22 @@
}
}
- // start recording
+ // used to request a deferred sleep, to be executed later while mutex is unlocked
+ uint32_t sleepUs = 0;
+
+ // loop while there is work to do
for (;;) {
- TrackBase::track_state activeTrackState;
Vector< sp<EffectChain> > effectChains;
// sleep with mutex unlocked
- if (doSleep) {
- doSleep = false;
- usleep(kRecordThreadSleepUs);
+ if (sleepUs > 0) {
+ usleep(sleepUs);
+ sleepUs = 0;
}
+ // activeTracks accumulates a copy of a subset of mActiveTracks
+ Vector< sp<RecordTrack> > activeTracks;
+
{ // scope for mLock
Mutex::Autolock _l(mLock);
@@ -4571,236 +4562,300 @@
tmp.add(mActiveTracks[i]->uid());
}
updateWakeLockUids_l(tmp);
- // FIXME an arbitrary choice
- activeTrack = mActiveTracks[0];
}
- if (activeTrack->isTerminated()) {
- removeTrack_l(activeTrack);
- mActiveTracks.remove(activeTrack);
- mActiveTracksGen++;
- continue;
- }
+ bool doBroadcast = false;
+ for (size_t i = 0; i < size; ) {
- activeTrackState = activeTrack->mState;
- switch (activeTrackState) {
- case TrackBase::PAUSING:
- standbyIfNotAlreadyInStandby();
- mActiveTracks.remove(activeTrack);
- mActiveTracksGen++;
- mStartStopCond.broadcast();
- doSleep = true;
- continue;
-
- case TrackBase::RESUMING:
- mStandby = false;
- if (mReqChannelCount != activeTrack->channelCount()) {
+ activeTrack = mActiveTracks[i];
+ if (activeTrack->isTerminated()) {
+ removeTrack_l(activeTrack);
mActiveTracks.remove(activeTrack);
mActiveTracksGen++;
- mStartStopCond.broadcast();
+ size--;
continue;
}
- if (readOnce) {
- mStartStopCond.broadcast();
- // record start succeeds only if first read from audio input succeeds
- if (mBytesRead < 0) {
- mActiveTracks.remove(activeTrack);
- mActiveTracksGen++;
- continue;
- }
+
+ TrackBase::track_state activeTrackState = activeTrack->mState;
+ switch (activeTrackState) {
+
+ case TrackBase::PAUSING:
+ mActiveTracks.remove(activeTrack);
+ mActiveTracksGen++;
+ doBroadcast = true;
+ size--;
+ continue;
+
+ case TrackBase::STARTING_1:
+ sleepUs = 10000;
+ i++;
+ continue;
+
+ case TrackBase::STARTING_2:
+ doBroadcast = true;
+ mStandby = false;
activeTrack->mState = TrackBase::ACTIVE;
+ break;
+
+ case TrackBase::ACTIVE:
+ break;
+
+ case TrackBase::IDLE:
+ i++;
+ continue;
+
+ default:
+ LOG_FATAL("Unexpected activeTrackState %d", activeTrackState);
}
- break;
- case TrackBase::ACTIVE:
- break;
+ activeTracks.add(activeTrack);
+ i++;
- case TrackBase::IDLE:
- doSleep = true;
- continue;
-
- default:
- LOG_FATAL("Unexpected activeTrackState %d", activeTrackState);
}
+ if (doBroadcast) {
+ mStartStopCond.broadcast();
+ }
+
+ // sleep if there are no active tracks to process
+ if (activeTracks.size() == 0) {
+ if (sleepUs == 0) {
+ sleepUs = kRecordThreadSleepUs;
+ }
+ continue;
+ }
+ sleepUs = 0;
lockEffectChains_l(effectChains);
}
- // thread mutex is now unlocked, mActiveTracks unknown, activeTrack != 0, kept, immutable
- // activeTrack->mState unknown, activeTrackState immutable and is ACTIVE or RESUMING
+ // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
- for (size_t i = 0; i < effectChains.size(); i ++) {
+ size_t size = effectChains.size();
+ for (size_t i = 0; i < size; i++) {
// thread mutex is not locked, but effect chain is locked
effectChains[i]->process_l();
}
- AudioBufferProvider::Buffer buffer;
- buffer.frameCount = mFrameCount;
- status_t status = activeTrack->getNextBuffer(&buffer);
- if (status == NO_ERROR) {
- readOnce = true;
- size_t framesOut = buffer.frameCount;
- if (mResampler == NULL) {
- // no resampling
- while (framesOut) {
- size_t framesIn = mFrameCount - mRsmpInIndex;
- if (framesIn > 0) {
- int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize;
- int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) *
- activeTrack->mFrameSize;
- if (framesIn > framesOut) {
- framesIn = framesOut;
- }
- mRsmpInIndex += framesIn;
- framesOut -= framesIn;
- if (mChannelCount == mReqChannelCount) {
- memcpy(dst, src, framesIn * mFrameSize);
- } else {
- if (mChannelCount == 1) {
- upmix_to_stereo_i16_from_mono_i16((int16_t *)dst,
- (int16_t *)src, framesIn);
- } else {
- downmix_to_mono_i16_from_stereo_i16((int16_t *)dst,
- (int16_t *)src, framesIn);
- }
- }
- }
- if (framesOut > 0 && mFrameCount == mRsmpInIndex) {
- void *readInto;
- if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
- readInto = buffer.raw;
- framesOut = 0;
- } else {
- readInto = mRsmpInBuffer;
- mRsmpInIndex = 0;
- }
- mBytesRead = mInput->stream->read(mInput->stream, readInto, mBufferSize);
- if (mBytesRead <= 0) {
- // TODO: verify that it's benign to use a stale track state
- if ((mBytesRead < 0) && (activeTrackState == TrackBase::ACTIVE))
- {
- ALOGE("Error reading audio input");
- // Force input into standby so that it tries to
- // recover at next read attempt
- inputStandBy();
- doSleep = true;
- }
- mRsmpInIndex = mFrameCount;
- framesOut = 0;
- buffer.frameCount = 0;
- }
-#ifdef TEE_SINK
- else if (mTeeSink != 0) {
- (void) mTeeSink->write(readInto,
- mBytesRead >> Format_frameBitShift(mTeeSink->format()));
- }
-#endif
- }
- }
- } else {
- // resampling
+ // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
+ // Only the client(s) that are too slow will overrun. But if even the fastest client is too
+ // slow, then this RecordThread will overrun by not calling HAL read often enough.
+ // If destination is non-contiguous, first read past the nominal end of buffer, then
+ // copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
- // avoid busy-waiting if client doesn't keep up
- bool madeProgress = false;
-
- // keep mRsmpInBuffer full so resampler always has sufficient input
- for (;;) {
- int32_t rear = mRsmpInRear;
- ssize_t filled = rear - mRsmpInFront;
- ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2);
- // exit once there is enough data in buffer for resampler
- if ((size_t) filled >= mRsmpInFrames) {
- break;
- }
- size_t avail = mRsmpInFramesP2 - filled;
- // Only try to read full HAL buffers.
- // But if the HAL read returns a partial buffer, use it.
- if (avail < mFrameCount) {
- ALOGE("insufficient space to read: avail %d < mFrameCount %d",
- avail, mFrameCount);
- break;
- }
- // If 'avail' is non-contiguous, first read past the nominal end of buffer, then
- // copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
- rear &= mRsmpInFramesP2 - 1;
- mBytesRead = mInput->stream->read(mInput->stream,
- &mRsmpInBuffer[rear * mChannelCount], mBufferSize);
- if (mBytesRead <= 0) {
- ALOGE("read failed: mBytesRead=%d < %u", mBytesRead, mBufferSize);
- break;
- }
- ALOG_ASSERT((size_t) mBytesRead <= mBufferSize);
- size_t framesRead = mBytesRead / mFrameSize;
- ALOG_ASSERT(framesRead > 0);
- madeProgress = true;
- // If 'avail' was non-contiguous, we now correct for reading past end of buffer.
- size_t part1 = mRsmpInFramesP2 - rear;
- if (framesRead > part1) {
- memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount],
- (framesRead - part1) * mFrameSize);
- }
- mRsmpInRear += framesRead;
- }
-
- if (!madeProgress) {
- ALOGV("Did not make progress");
- usleep(((mFrameCount * 1000) / mSampleRate) * 1000);
- }
-
- // resampler accumulates, but we only have one source track
- memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
- mResampler->resample(mRsmpOutBuffer, framesOut,
- this /* AudioBufferProvider* */);
- // ditherAndClamp() works as long as all buffers returned by
- // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
- if (mReqChannelCount == 1) {
- // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
- ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
- // the resampler always outputs stereo samples:
- // do post stereo to mono conversion
- downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer,
- framesOut);
- } else {
- ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
- }
- // now done with mRsmpOutBuffer
-
- }
- if (mFramestoDrop == 0) {
- activeTrack->releaseBuffer(&buffer);
- } else {
- if (mFramestoDrop > 0) {
- mFramestoDrop -= buffer.frameCount;
- if (mFramestoDrop <= 0) {
- clearSyncStartEvent();
- }
- } else {
- mFramestoDrop += buffer.frameCount;
- if (mFramestoDrop >= 0 || mSyncStartEvent == 0 ||
- mSyncStartEvent->isCancelled()) {
- ALOGW("Synced record %s, session %d, trigger session %d",
- (mFramestoDrop >= 0) ? "timed out" : "cancelled",
- activeTrack->sessionId(),
- (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0);
- clearSyncStartEvent();
- }
- }
- }
- activeTrack->clearOverflow();
+ int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
+ ssize_t bytesRead = mInput->stream->read(mInput->stream,
+ &mRsmpInBuffer[rear * mChannelCount], mBufferSize);
+ if (bytesRead <= 0) {
+ ALOGE("read failed: bytesRead=%d < %u", bytesRead, mBufferSize);
+ // Force input into standby so that it tries to recover at next read attempt
+ inputStandBy();
+ sleepUs = kRecordThreadSleepUs;
+ continue;
}
- // client isn't retrieving buffers fast enough
- else {
- if (!activeTrack->setOverflow()) {
- nsecs_t now = systemTime();
- if ((now - lastWarning) > kWarningThrottleNs) {
- ALOGW("RecordThread: buffer overflow");
- lastWarning = now;
+ ALOG_ASSERT((size_t) bytesRead <= mBufferSize);
+ size_t framesRead = bytesRead / mFrameSize;
+ ALOG_ASSERT(framesRead > 0);
+ if (mTeeSink != 0) {
+ (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead);
+ }
+ // If destination is non-contiguous, we now correct for reading past end of buffer.
+ size_t part1 = mRsmpInFramesP2 - rear;
+ if (framesRead > part1) {
+ memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount],
+ (framesRead - part1) * mFrameSize);
+ }
+ rear = mRsmpInRear += framesRead;
+
+ size = activeTracks.size();
+ // loop over each active track
+ for (size_t i = 0; i < size; i++) {
+ activeTrack = activeTracks[i];
+
+ enum {
+ OVERRUN_UNKNOWN,
+ OVERRUN_TRUE,
+ OVERRUN_FALSE
+ } overrun = OVERRUN_UNKNOWN;
+
+ // loop over getNextBuffer to handle circular sink
+ for (;;) {
+
+ activeTrack->mSink.frameCount = ~0;
+ status_t status = activeTrack->getNextBuffer(&activeTrack->mSink);
+ size_t framesOut = activeTrack->mSink.frameCount;
+ LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
+
+ int32_t front = activeTrack->mRsmpInFront;
+ ssize_t filled = rear - front;
+ size_t framesIn;
+
+ if (filled < 0) {
+ // should not happen, but treat like a massive overrun and re-sync
+ framesIn = 0;
+ activeTrack->mRsmpInFront = rear;
+ overrun = OVERRUN_TRUE;
+ } else if ((size_t) filled <= mRsmpInFramesP2) {
+ framesIn = (size_t) filled;
+ } else {
+ // client is not keeping up with server, but give it latest data
+ framesIn = mRsmpInFramesP2;
+ activeTrack->mRsmpInFront = rear - framesIn;
+ overrun = OVERRUN_TRUE;
+ }
+
+ if (framesOut == 0 || framesIn == 0) {
+ break;
+ }
+
+ if (activeTrack->mResampler == NULL) {
+ // no resampling
+ if (framesIn > framesOut) {
+ framesIn = framesOut;
+ } else {
+ framesOut = framesIn;
+ }
+ int8_t *dst = activeTrack->mSink.i8;
+ while (framesIn > 0) {
+ front &= mRsmpInFramesP2 - 1;
+ size_t part1 = mRsmpInFramesP2 - front;
+ if (part1 > framesIn) {
+ part1 = framesIn;
+ }
+ int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize);
+ if (mChannelCount == activeTrack->mChannelCount) {
+ memcpy(dst, src, part1 * mFrameSize);
+ } else if (mChannelCount == 1) {
+ upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (int16_t *)src,
+ part1);
+ } else {
+ downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (int16_t *)src,
+ part1);
+ }
+ dst += part1 * activeTrack->mFrameSize;
+ front += part1;
+ framesIn -= part1;
+ }
+ activeTrack->mRsmpInFront += framesOut;
+
+ } else {
+ // resampling
+ // FIXME framesInNeeded should really be part of resampler API, and should
+ // depend on the SRC ratio
+ // to keep mRsmpInBuffer full so resampler always has sufficient input
+ size_t framesInNeeded;
+ // FIXME only re-calculate when it changes, and optimize for common ratios
+ double inOverOut = (double) mSampleRate / activeTrack->mSampleRate;
+ double outOverIn = (double) activeTrack->mSampleRate / mSampleRate;
+ framesInNeeded = ceil(framesOut * inOverOut) + 1;
+ if (framesIn < framesInNeeded) {
+ ALOGV("not enough to resample: have %u but need %u to produce %u "
+ "given in/out ratio of %.4g",
+ framesIn, framesInNeeded, framesOut, inOverOut);
+ size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * outOverIn) : 0;
+ size_t newFramesInNeeded = ceil(newFramesOut * inOverOut) + 1;
+ ALOGV("now need %u frames to produce %u given out/in ratio of %.4g",
+ newFramesInNeeded, newFramesOut, outOverIn);
+ if (framesIn < newFramesInNeeded) {
+ ALOGE("failure: have %u but need %u", framesIn, newFramesInNeeded);
+ framesOut = 0;
+ } else {
+ ALOGV("success 2: have %u and need %u to produce %u "
+ "given in/out ratio of %.4g",
+ framesIn, newFramesInNeeded, newFramesOut, inOverOut);
+ LOG_ALWAYS_FATAL_IF(newFramesOut > framesOut);
+ framesOut = newFramesOut;
+ }
+ } else {
+ ALOGI("success 1: have %u and need %u to produce %u "
+ "given in/out ratio of %.4g",
+ framesIn, framesInNeeded, framesOut, inOverOut);
+ }
+
+ // reallocate mRsmpOutBuffer as needed; we will grow but never shrink
+ if (activeTrack->mRsmpOutFrameCount < framesOut) {
+ delete[] activeTrack->mRsmpOutBuffer;
+ // resampler always outputs stereo
+ activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2];
+ activeTrack->mRsmpOutFrameCount = framesOut;
+ }
+
+ // resampler accumulates, but we only have one source track
+ memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
+ activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut,
+ activeTrack->mResamplerBufferProvider
+ /*this*/ /* AudioBufferProvider* */);
+ // ditherAndClamp() works as long as all buffers returned by
+ // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
+ if (activeTrack->mChannelCount == 1) {
+ // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
+ ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer,
+ framesOut);
+ // the resampler always outputs stereo samples:
+ // do post stereo to mono conversion
+ downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16,
+ (int16_t *)activeTrack->mRsmpOutBuffer, framesOut);
+ } else {
+ ditherAndClamp((int32_t *)activeTrack->mSink.raw,
+ activeTrack->mRsmpOutBuffer, framesOut);
+ }
+ // now done with mRsmpOutBuffer
+
+ }
+
+ if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
+ overrun = OVERRUN_FALSE;
+ }
+
+ if (activeTrack->mFramesToDrop == 0) {
+ if (framesOut > 0) {
+ activeTrack->mSink.frameCount = framesOut;
+ activeTrack->releaseBuffer(&activeTrack->mSink);
+ }
+ } else {
+ // FIXME could do a partial drop of framesOut
+ if (activeTrack->mFramesToDrop > 0) {
+ activeTrack->mFramesToDrop -= framesOut;
+ if (activeTrack->mFramesToDrop <= 0) {
+ activeTrack->clearSyncStartEvent();
+ }
+ } else {
+ activeTrack->mFramesToDrop += framesOut;
+ if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 ||
+ activeTrack->mSyncStartEvent->isCancelled()) {
+ ALOGW("Synced record %s, session %d, trigger session %d",
+ (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
+ activeTrack->sessionId(),
+ (activeTrack->mSyncStartEvent != 0) ?
+ activeTrack->mSyncStartEvent->triggerSession() : 0);
+ activeTrack->clearSyncStartEvent();
+ }
+ }
+ }
+
+ if (framesOut == 0) {
+ break;
}
}
- // Release the processor for a while before asking for a new buffer.
- // This will give the application more chance to read from the buffer and
- // clear the overflow.
- doSleep = true;
+
+ switch (overrun) {
+ case OVERRUN_TRUE:
+ // client isn't retrieving buffers fast enough
+ if (!activeTrack->setOverflow()) {
+ nsecs_t now = systemTime();
+ // FIXME should lastWarning per track?
+ if ((now - lastWarning) > kWarningThrottleNs) {
+ ALOGW("RecordThread: buffer overflow");
+ lastWarning = now;
+ }
+ }
+ break;
+ case OVERRUN_FALSE:
+ activeTrack->clearOverflow();
+ break;
+ case OVERRUN_UNKNOWN:
+ break;
+ }
+
}
// enable changes in effect chain
@@ -4959,114 +5014,89 @@
status_t status = NO_ERROR;
if (event == AudioSystem::SYNC_EVENT_NONE) {
- clearSyncStartEvent();
+ recordTrack->clearSyncStartEvent();
} else if (event != AudioSystem::SYNC_EVENT_SAME) {
- mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
+ recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
triggerSession,
recordTrack->sessionId(),
syncStartEventCallback,
- this);
+ recordTrack);
// Sync event can be cancelled by the trigger session if the track is not in a
// compatible state in which case we start record immediately
- if (mSyncStartEvent->isCancelled()) {
- clearSyncStartEvent();
+ if (recordTrack->mSyncStartEvent->isCancelled()) {
+ recordTrack->clearSyncStartEvent();
} else {
// do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs
- mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000);
+ recordTrack->mFramesToDrop = -
+ ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000);
}
}
{
// This section is a rendezvous between binder thread executing start() and RecordThread
AutoMutex lock(mLock);
- if (mActiveTracks.size() > 0) {
- // FIXME does not work for multiple active tracks
- if (mActiveTracks.indexOf(recordTrack) != 0) {
- status = -EBUSY;
- } else if (recordTrack->mState == TrackBase::PAUSING) {
+ if (mActiveTracks.indexOf(recordTrack) >= 0) {
+ if (recordTrack->mState == TrackBase::PAUSING) {
+ ALOGV("active record track PAUSING -> ACTIVE");
recordTrack->mState = TrackBase::ACTIVE;
+ } else {
+ ALOGV("active record track state %d", recordTrack->mState);
}
return status;
}
- // FIXME why? already set in constructor, 'STARTING_1' would be more accurate
- recordTrack->mState = TrackBase::IDLE;
+ // TODO consider other ways of handling this, such as changing the state to :STARTING and
+ // adding the track to mActiveTracks after returning from AudioSystem::startInput(),
+ // or using a separate command thread
+ recordTrack->mState = TrackBase::STARTING_1;
mActiveTracks.add(recordTrack);
mActiveTracksGen++;
mLock.unlock();
status_t status = AudioSystem::startInput(mId);
mLock.lock();
- // FIXME should verify that mActiveTrack is still == recordTrack
+ // FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
mActiveTracks.remove(recordTrack);
mActiveTracksGen++;
- clearSyncStartEvent();
+ recordTrack->clearSyncStartEvent();
return status;
}
- // FIXME LEGACY
- mRsmpInIndex = mFrameCount;
- mRsmpInFront = 0;
- mRsmpInRear = 0;
- mRsmpInUnrel = 0;
- mBytesRead = 0;
- if (mResampler != NULL) {
- mResampler->reset();
+ // Catch up with current buffer indices if thread is already running.
+ // This is what makes a new client discard all buffered data. If the track's mRsmpInFront
+ // was initialized to some value closer to the thread's mRsmpInFront, then the track could
+ // see previously buffered data before it called start(), but with greater risk of overrun.
+
+ recordTrack->mRsmpInFront = mRsmpInRear;
+ recordTrack->mRsmpInUnrel = 0;
+ // FIXME why reset?
+ if (recordTrack->mResampler != NULL) {
+ recordTrack->mResampler->reset();
}
- // FIXME hijacking a playback track state name which was intended for start after pause;
- // here 'STARTING_2' would be more accurate
- recordTrack->mState = TrackBase::RESUMING;
+ recordTrack->mState = TrackBase::STARTING_2;
// signal thread to start
- ALOGV("Signal record thread");
mWaitWorkCV.broadcast();
- // do not wait for mStartStopCond if exiting
- if (exitPending()) {
- mActiveTracks.remove(recordTrack);
- mActiveTracksGen++;
- status = INVALID_OPERATION;
- goto startError;
- }
- // FIXME incorrect usage of wait: no explicit predicate or loop
- mStartStopCond.wait(mLock);
if (mActiveTracks.indexOf(recordTrack) < 0) {
ALOGV("Record failed to start");
status = BAD_VALUE;
goto startError;
}
- ALOGV("Record started OK");
return status;
}
startError:
AudioSystem::stopInput(mId);
- clearSyncStartEvent();
+ recordTrack->clearSyncStartEvent();
+ // FIXME I wonder why we do not reset the state here?
return status;
}
-void AudioFlinger::RecordThread::clearSyncStartEvent()
-{
- if (mSyncStartEvent != 0) {
- mSyncStartEvent->cancel();
- }
- mSyncStartEvent.clear();
- mFramestoDrop = 0;
-}
-
void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
{
sp<SyncEvent> strongEvent = event.promote();
if (strongEvent != 0) {
- RecordThread *me = (RecordThread *)strongEvent->cookie();
- me->handleSyncStartEvent(strongEvent);
- }
-}
-
-void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event)
-{
- if (event == mSyncStartEvent) {
- // TODO: use actual buffer filling status instead of 2 buffers when info is available
- // from audio HAL
- mFramestoDrop = mFrameCount * 2;
+ RecordTrack *recordTrack = (RecordTrack *)strongEvent->cookie();
+ recordTrack->handleSyncStartEvent(strongEvent);
}
}
@@ -5151,13 +5181,9 @@
fdprintf(fd, "\nInput thread %p:\n", this);
if (mActiveTracks.size() > 0) {
- fdprintf(fd, " In index: %zu\n", mRsmpInIndex);
fdprintf(fd, " Buffer size: %zu bytes\n", mBufferSize);
- fdprintf(fd, " Resampling: %d\n", (mResampler != NULL));
- fdprintf(fd, " Out channel count: %u\n", mReqChannelCount);
- fdprintf(fd, " Out sample rate: %u\n", mReqSampleRate);
} else {
- fdprintf(fd, " No active record client\n");
+ fdprintf(fd, " No active record clients\n");
}
dumpBase(fd, args);
@@ -5209,15 +5235,25 @@
}
// AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
+status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
{
- int32_t rear = mRsmpInRear;
- int32_t front = mRsmpInFront;
+ RecordTrack *activeTrack = mRecordTrack;
+ sp<ThreadBase> threadBase = activeTrack->mThread.promote();
+ if (threadBase == 0) {
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+ }
+ RecordThread *recordThread = (RecordThread *) threadBase.get();
+ int32_t rear = recordThread->mRsmpInRear;
+ int32_t front = activeTrack->mRsmpInFront;
ssize_t filled = rear - front;
- ALOG_ASSERT(0 <= filled && (size_t) filled <= mRsmpInFramesP2);
+ // FIXME should not be P2 (don't want to increase latency)
+ // FIXME if client not keeping up, discard
+ ALOG_ASSERT(0 <= filled && (size_t) filled <= recordThread->mRsmpInFramesP2);
// 'filled' may be non-contiguous, so return only the first contiguous chunk
- front &= mRsmpInFramesP2 - 1;
- size_t part1 = mRsmpInFramesP2 - front;
+ front &= recordThread->mRsmpInFramesP2 - 1;
+ size_t part1 = recordThread->mRsmpInFramesP2 - front;
if (part1 > (size_t) filled) {
part1 = filled;
}
@@ -5228,29 +5264,31 @@
}
if (part1 == 0) {
// Higher-level should keep mRsmpInBuffer full, and not call resampler if empty
- ALOGE("RecordThread::getNextBuffer() starved");
+ LOG_FATAL("RecordThread::getNextBuffer() starved");
buffer->raw = NULL;
buffer->frameCount = 0;
- mRsmpInUnrel = 0;
+ activeTrack->mRsmpInUnrel = 0;
return NOT_ENOUGH_DATA;
}
- buffer->raw = mRsmpInBuffer + front * mChannelCount;
+ buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount;
buffer->frameCount = part1;
- mRsmpInUnrel = part1;
+ activeTrack->mRsmpInUnrel = part1;
return NO_ERROR;
}
// AudioBufferProvider interface
-void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer(
+ AudioBufferProvider::Buffer* buffer)
{
+ RecordTrack *activeTrack = mRecordTrack;
size_t stepCount = buffer->frameCount;
if (stepCount == 0) {
return;
}
- ALOG_ASSERT(stepCount <= mRsmpInUnrel);
- mRsmpInUnrel -= stepCount;
- mRsmpInFront += stepCount;
+ ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel);
+ activeTrack->mRsmpInUnrel -= stepCount;
+ activeTrack->mRsmpInFront += stepCount;
buffer->raw = NULL;
buffer->frameCount = 0;
}
@@ -5265,11 +5303,14 @@
AudioParameter param = AudioParameter(keyValuePair);
int value;
audio_format_t reqFormat = mFormat;
- uint32_t reqSamplingRate = mReqSampleRate;
- audio_channel_mask_t reqChannelMask = audio_channel_in_mask_from_count(mReqChannelCount);
+ uint32_t samplingRate = mSampleRate;
+ audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(mChannelCount);
+ // TODO Investigate when this code runs. Check with audio policy when a sample rate and
+ // channel count change can be requested. Do we mandate the first client defines the
+ // HAL sampling rate and channel count or do we allow changes on the fly?
if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
- reqSamplingRate = value;
+ samplingRate = value;
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
@@ -5285,7 +5326,7 @@
if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) {
status = BAD_VALUE;
} else {
- reqChannelMask = mask;
+ channelMask = mask;
reconfig = true;
}
}
@@ -5350,15 +5391,15 @@
reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
(mInput->stream->common.get_sample_rate(&mInput->stream->common)
- <= (2 * reqSamplingRate)) &&
+ <= (2 * samplingRate)) &&
popcount(mInput->stream->common.get_channels(&mInput->stream->common))
<= FCC_2 &&
- (reqChannelMask == AUDIO_CHANNEL_IN_MONO ||
- reqChannelMask == AUDIO_CHANNEL_IN_STEREO)) {
+ (channelMask == AUDIO_CHANNEL_IN_MONO ||
+ channelMask == AUDIO_CHANNEL_IN_STEREO)) {
status = NO_ERROR;
}
if (status == NO_ERROR) {
- readInputParameters();
+ readInputParameters_l();
sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
}
}
@@ -5410,15 +5451,8 @@
mAudioFlinger->audioConfigChanged_l(event, mId, param2);
}
-void AudioFlinger::RecordThread::readInputParameters()
+void AudioFlinger::RecordThread::readInputParameters_l()
{
- delete[] mRsmpInBuffer;
- // mRsmpInBuffer is always assigned a new[] below
- delete[] mRsmpOutBuffer;
- mRsmpOutBuffer = NULL;
- delete mResampler;
- mResampler = NULL;
-
mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
mChannelCount = popcount(mChannelMask);
@@ -5429,24 +5463,20 @@
mFrameSize = audio_stream_frame_size(&mInput->stream->common);
mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
mFrameCount = mBufferSize / mFrameSize;
+ // This is the formula for calculating the temporary buffer size.
// With 3 HAL buffers, we can guarantee ability to down-sample the input by ratio of 2:1 to
// 1 full output buffer, regardless of the alignment of the available input.
+ // The "3" is somewhat arbitrary, and could probably be larger.
+ // A larger value should allow more old data to be read after a track calls start(),
+ // without increasing latency.
mRsmpInFrames = mFrameCount * 3;
mRsmpInFramesP2 = roundup(mRsmpInFrames);
+ delete[] mRsmpInBuffer;
// Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount];
- mRsmpInFront = 0;
- mRsmpInRear = 0;
- mRsmpInUnrel = 0;
- if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) {
- mResampler = AudioResampler::create(16, (int) mChannelCount, mReqSampleRate);
- mResampler->setSampleRate(mSampleRate);
- mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
- // resampler always outputs stereo
- mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2];
- }
- mRsmpInIndex = mFrameCount;
+ // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
+ // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
}
uint32_t AudioFlinger::RecordThread::getInputFramesLost()
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 999fea3..fa3563c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -270,8 +270,8 @@
const sp<AudioFlinger> mAudioFlinger;
- // updated by PlaybackThread::readOutputParameters() or
- // RecordThread::readInputParameters()
+ // updated by PlaybackThread::readOutputParameters_l() or
+ // RecordThread::readInputParameters_l()
uint32_t mSampleRate;
size_t mFrameCount; // output HAL, direct output, record
audio_channel_mask_t mChannelMask;
@@ -478,7 +478,7 @@
status_t getTimestamp_l(AudioTimestamp& timestamp);
protected:
- // updated by readOutputParameters()
+ // updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
int16_t* mMixBuffer; // frame size aligned mix buffer
@@ -541,7 +541,7 @@
void removeTrack_l(const sp<Track>& track);
void broadcast_l();
- void readOutputParameters();
+ void readOutputParameters_l();
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -839,17 +839,28 @@
// record thread
-class RecordThread : public ThreadBase, public AudioBufferProvider
- // derives from AudioBufferProvider interface for use by resampler
+class RecordThread : public ThreadBase
{
public:
+ class RecordTrack;
+ class ResamplerBufferProvider : public AudioBufferProvider
+ // derives from AudioBufferProvider interface for use by resampler
+ {
+ public:
+ ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { }
+ virtual ~ResamplerBufferProvider() { }
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+ private:
+ RecordTrack * const mRecordTrack;
+ };
+
#include "RecordTracks.h"
RecordThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamIn *input,
- uint32_t sampleRate,
- audio_channel_mask_t channelMask,
audio_io_handle_t id,
audio_devices_t outDevice,
audio_devices_t inDevice
@@ -898,14 +909,11 @@
AudioStreamIn* clearInput();
virtual audio_stream_t* stream() const;
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
- virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
virtual bool checkForNewParameters_l();
virtual String8 getParameters(const String8& keys);
virtual void audioConfigChanged_l(int event, int param = 0);
- void readInputParameters();
+ void readInputParameters_l();
virtual uint32_t getInputFramesLost();
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
@@ -921,14 +929,11 @@
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
static void syncStartEventCallback(const wp<SyncEvent>& event);
- void handleSyncStartEvent(const sp<SyncEvent>& event);
virtual size_t frameCount() const { return mFrameCount; }
bool hasFastRecorder() const { return false; }
private:
- void clearSyncStartEvent();
-
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
@@ -944,34 +949,13 @@
int mActiveTracksGen;
Condition mStartStopCond;
- // updated by RecordThread::readInputParameters()
- AudioResampler *mResampler;
- // interleaved stereo pairs of fixed-point signed Q19.12
- int32_t *mRsmpOutBuffer;
-
// resampler converts input at HAL Hz to output at AudioRecord client Hz
int16_t *mRsmpInBuffer; // see new[] for details on the size
size_t mRsmpInFrames; // size of resampler input in frames
size_t mRsmpInFramesP2;// size rounded up to a power-of-2
- size_t mRsmpInUnrel; // unreleased frames remaining from
- // most recent getNextBuffer
- // these are rolling counters that are never cleared
- int32_t mRsmpInFront; // next available frame
+
+ // rolling index that is never cleared
int32_t mRsmpInRear; // last filled frame + 1
- size_t mRsmpInIndex; // FIXME legacy
-
- // client's requested configuration, which may differ from the HAL configuration
- const uint32_t mReqChannelCount;
- const uint32_t mReqSampleRate;
-
- ssize_t mBytesRead;
- // sync event triggering actual audio capture. Frames read before this event will
- // be dropped and therefore not read by the application.
- sp<SyncEvent> mSyncStartEvent;
- // number of captured frames to drop after the start sync event has been received.
- // when < 0, maximum frames to drop before starting capture even if sync event is
- // not received
- ssize_t mFramestoDrop;
// For dumpsys
const sp<NBAIO_Sink> mTeeSink;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 05fde7c..58705c4 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -34,7 +34,9 @@
RESUMING,
ACTIVE,
PAUSING,
- PAUSED
+ PAUSED,
+ STARTING_1, // for RecordTrack only
+ STARTING_2, // for RecordTrack only
};
TrackBase(ThreadBase *thread,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e5152b8..92ed46a 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1785,17 +1785,34 @@
int uid)
: TrackBase(thread, client, sampleRate, format,
channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/),
- mOverflow(false)
+ mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
+ // See real initialization of mRsmpInFront at RecordThread::start()
+ mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
{
ALOGV("RecordTrack constructor");
if (mCblk != NULL) {
mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
}
+
+ uint32_t channelCount = popcount(channelMask);
+ // FIXME I don't understand either of the channel count checks
+ if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 &&
+ channelCount <= FCC_2) {
+ // sink SR
+ mResampler = AudioResampler::create(16, thread->mChannelCount, sampleRate);
+ // source SR
+ mResampler->setSampleRate(thread->mSampleRate);
+ mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
+ mResamplerBufferProvider = new ResamplerBufferProvider(this);
+ }
}
AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
{
ALOGV("%s", __func__);
+ delete mResampler;
+ delete[] mRsmpOutBuffer;
+ delete mResamplerBufferProvider;
}
// AudioBufferProvider interface
@@ -1868,12 +1885,12 @@
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append(" Active Client Fmt Chn mask Session S Server fCount\n");
+ result.append(" Active Client Fmt Chn mask Session S Server fCount Resampling\n");
}
void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
{
- snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu\n",
+ snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n",
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
mFormat,
@@ -1881,7 +1898,32 @@
mSessionId,
mState,
mCblk->mServer,
- mFrameCount);
+ mFrameCount,
+ mResampler != NULL);
+
+}
+
+void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
+{
+ if (event == mSyncStartEvent) {
+ ssize_t framesToDrop = 0;
+ sp<ThreadBase> threadBase = mThread.promote();
+ if (threadBase != 0) {
+ // TODO: use actual buffer filling status instead of 2 buffers when info is available
+ // from audio HAL
+ framesToDrop = threadBase->mFrameCount * 2;
+ }
+ mFramesToDrop = framesToDrop;
+ }
+}
+
+void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
+{
+ if (mSyncStartEvent != 0) {
+ mSyncStartEvent->cancel();
+ mSyncStartEvent.clear();
+ }
+ mFramesToDrop = 0;
}
}; // namespace android