Merge "Tolerate 0 valid bits value in WAV_EXT" into jb-dev
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index a68ab4e..1fad383 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -162,6 +162,7 @@
INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
INVOKE_ID_SELECT_TRACK = 4,
INVOKE_ID_UNSELECT_TRACK = 5,
+ INVOKE_ID_SET_VIDEO_SCALING_MODE = 6,
};
// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 3891809..6d304e0 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -75,9 +75,8 @@
AUDIO_ENCODER_AMR_NB = 1,
AUDIO_ENCODER_AMR_WB = 2,
AUDIO_ENCODER_AAC = 3,
- AUDIO_ENCODER_AAC_PLUS = 4,
- AUDIO_ENCODER_EAAC_PLUS = 5,
- AUDIO_ENCODER_AAC_ELD = 6,
+ AUDIO_ENCODER_HE_AAC = 4,
+ AUDIO_ENCODER_AAC_ELD = 5,
AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
};
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index b8d925e..72827c1 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -22,6 +22,7 @@
#include <android/native_window.h>
#include <media/IOMX.h>
#include <media/stagefright/foundation/AHierarchicalStateMachine.h>
+#include <media/stagefright/SkipCutBuffer.h>
#include <OMX_Audio.h>
namespace android {
@@ -120,6 +121,9 @@
sp<ExecutingToIdleState> mExecutingToIdleState;
sp<IdleToLoadedState> mIdleToLoadedState;
sp<FlushingState> mFlushingState;
+ int32_t mEncoderDelay;
+ int32_t mEncoderPadding;
+ sp<SkipCutBuffer> mSkipCutBuffer;
AString mComponentName;
uint32_t mFlags;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 887ce5d..81350ca 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -204,7 +204,7 @@
ReadOptions::SeekMode mSeekMode;
int64_t mTargetTimeUs;
bool mOutputPortSettingsChangedPending;
- SkipCutBuffer *mSkipCutBuffer;
+ sp<SkipCutBuffer> mSkipCutBuffer;
MediaBuffer *mLeftOverBuffer;
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 27851ca..2653b53 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -27,12 +27,11 @@
* utility class to cut the start and end off a stream of data in MediaBuffers
*
*/
-class SkipCutBuffer {
+class SkipCutBuffer: public RefBase {
public:
// 'skip' is the number of bytes to skip from the beginning
// 'cut' is the number of bytes to cut from the end
SkipCutBuffer(int32_t skip, int32_t cut);
- virtual ~SkipCutBuffer();
// Submit one MediaBuffer for skipping and cutting. This may consume all or
// some of the data in the buffer, or it may add data to it.
@@ -42,6 +41,9 @@
void clear();
size_t size(); // how many bytes are currently stored in the buffer
+ protected:
+ virtual ~SkipCutBuffer();
+
private:
void write(const char *src, size_t num);
size_t read(char *dst, size_t num);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index cd419bd..9391d16 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -896,9 +896,9 @@
int32_t old = android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags);
if (flags & AUDIO_OUTPUT_FLAG_FAST) {
if (old & CBLK_FAST) {
- ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", mCblk->frameCount);
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", mCblk->frameCount);
} else {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", mCblk->frameCount);
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", mCblk->frameCount);
}
if (sharedBuffer == 0) {
mNotificationFramesAct = mCblk->frameCount/2;
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index c08f033..6929efa 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -44,7 +44,8 @@
{"amrnb", AUDIO_ENCODER_AMR_NB},
{"amrwb", AUDIO_ENCODER_AMR_WB},
{"aac", AUDIO_ENCODER_AAC},
- {"aaceld", AUDIO_ENCODER_AAC_ELD},
+ {"heaac", AUDIO_ENCODER_HE_AAC},
+ {"aaceld", AUDIO_ENCODER_AAC_ELD}
};
const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 4b318ed..abc8899 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -611,7 +611,7 @@
channels, sample->getIMemory(), AUDIO_OUTPUT_FLAG_NONE, callback, userData);
#else
newTrack = new AudioTrack(streamType, sampleRate, sample->format(),
- channels, frameCount, AUDIO_OUTPUT_FLAG_NONE, callback, userData,
+ channels, frameCount, AUDIO_OUTPUT_FLAG_FAST, callback, userData,
bufferFrames);
#endif
oldTrack = mAudioTrack;
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index eac71c5..253602d 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1024,7 +1024,7 @@
AUDIO_FORMAT_PCM_16_BIT,
AUDIO_CHANNEL_OUT_MONO,
0, // frameCount
- AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_OUTPUT_FLAG_FAST,
audioCallback,
this, // user
0, // notificationFrames
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index b676cc7..727fd0d 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -820,10 +820,15 @@
mime = MEDIA_MIMETYPE_AUDIO_AAC;
encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectLC);
break;
+ case AUDIO_ENCODER_HE_AAC:
+ mime = MEDIA_MIMETYPE_AUDIO_AAC;
+ encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectHE);
+ break;
case AUDIO_ENCODER_AAC_ELD:
mime = MEDIA_MIMETYPE_AUDIO_AAC;
encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectELD);
break;
+
default:
ALOGE("Unknown audio encoder: %d", mAudioEncoder);
return NULL;
@@ -844,7 +849,6 @@
OMXClient client;
CHECK_EQ(client.connect(), (status_t)OK);
-
sp<MediaSource> audioEncoder =
OMXCodec::Create(client.interface(), encMeta,
true /* createEncoder */, audioSource);
@@ -859,6 +863,7 @@
CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
CHECK(mAudioEncoder == AUDIO_ENCODER_AAC ||
+ mAudioEncoder == AUDIO_ENCODER_HE_AAC ||
mAudioEncoder == AUDIO_ENCODER_AAC_ELD);
CHECK(mAudioSource != AUDIO_SOURCE_CNT);
@@ -977,7 +982,9 @@
sp<MediaWriter> writer = new MPEG2TSWriter(mOutputFd);
if (mAudioSource != AUDIO_SOURCE_CNT) {
- if (mAudioEncoder != AUDIO_ENCODER_AAC) {
+ if (mAudioEncoder != AUDIO_ENCODER_AAC &&
+ mAudioEncoder != AUDIO_ENCODER_HE_AAC &&
+ mAudioEncoder != AUDIO_ENCODER_AAC_ELD) {
return ERROR_UNSUPPORTED;
}
@@ -1442,6 +1449,7 @@
case AUDIO_ENCODER_AMR_NB:
case AUDIO_ENCODER_AMR_WB:
case AUDIO_ENCODER_AAC:
+ case AUDIO_ENCODER_HE_AAC:
case AUDIO_ENCODER_AAC_ELD:
break;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index f1467c4..2a770cd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -39,7 +39,6 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/SkipCutBuffer.h>
#include <gui/ISurfaceTexture.h>
#include "avc_utils.h"
@@ -64,13 +63,10 @@
mSkipRenderingVideoUntilMediaTimeUs(-1ll),
mVideoLateByUs(0ll),
mNumFramesTotal(0ll),
- mNumFramesDropped(0ll),
- mSkipCutBuffer(NULL) {
+ mNumFramesDropped(0ll) {
}
NuPlayer::~NuPlayer() {
- delete mSkipCutBuffer;
- mSkipCutBuffer = NULL;
}
void NuPlayer::setUID(uid_t uid) {
@@ -238,32 +234,6 @@
mSource->start();
- sp<MetaData> meta = mSource->getFormat(true /* audio */);
- if (meta != NULL) {
- int32_t delay = 0;
- if (!meta->findInt32(kKeyEncoderDelay, &delay)) {
- delay = 0;
- }
- int32_t padding = 0;
- if (!meta->findInt32(kKeyEncoderPadding, &padding)) {
- padding = 0;
- }
- int32_t numchannels = 0;
- if (delay + padding) {
- if (meta->findInt32(kKeyChannelCount, &numchannels)) {
- size_t frameSize = numchannels * sizeof(int16_t);
- if (mSkipCutBuffer) {
- size_t prevbuffersize = mSkipCutBuffer->size();
- if (prevbuffersize != 0) {
- ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
- }
- delete mSkipCutBuffer;
- }
- mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
- }
- }
- }
-
mRenderer = new Renderer(
mAudioSink,
new AMessage(kWhatRendererNotify, id()));
@@ -892,10 +862,6 @@
skipUntilMediaTimeUs = -1;
}
- if (audio && mSkipCutBuffer) {
- mSkipCutBuffer->submit(buffer);
- }
-
mRenderer->queueBuffer(audio, buffer, reply);
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index f917f64..25766e0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -27,7 +27,6 @@
struct ACodec;
struct MetaData;
struct NuPlayerDriver;
-class SkipCutBuffer;
struct NuPlayer : public AHandler {
NuPlayer();
@@ -129,8 +128,6 @@
int64_t mVideoLateByUs;
int64_t mNumFramesTotal, mNumFramesDropped;
- SkipCutBuffer *mSkipCutBuffer;
-
status_t instantiateDecoder(bool audio, sp<Decoder> *decoder);
status_t feedDecoderInputData(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 25974b6..d18d146 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -124,6 +124,15 @@
msg->setInt32("channel-count", numChannels);
msg->setInt32("sample-rate", sampleRate);
+ int32_t delay = 0;
+ if (meta->findInt32(kKeyEncoderDelay, &delay)) {
+ msg->setInt32("encoder-delay", delay);
+ }
+ int32_t padding = 0;
+ if (meta->findInt32(kKeyEncoderPadding, &padding)) {
+ msg->setInt32("encoder-padding", padding);
+ }
+
int32_t isADTS;
if (meta->findInt32(kKeyIsADTS, &isADTS) && isADTS != 0) {
msg->setInt32("is-adts", true);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 253bc2f..441cbf3 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -103,7 +103,7 @@
}
status_t NuPlayerDriver::prepare() {
- sendEvent(MEDIA_SET_VIDEO_SIZE, 320, 240);
+ sendEvent(MEDIA_SET_VIDEO_SIZE, 0, 0);
return OK;
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index c303146..0de2d0a 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -889,6 +889,13 @@
}
}
+ if (!msg->findInt32("encoder-delay", &mEncoderDelay)) {
+ mEncoderDelay = 0;
+ }
+ if (!msg->findInt32("encoder-padding", &mEncoderPadding)) {
+ mEncoderPadding = 0;
+ }
+
int32_t maxInputSize;
if (msg->findInt32("max-input-size", &maxInputSize)) {
err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
@@ -2003,6 +2010,17 @@
notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ if (mEncoderDelay + mEncoderPadding) {
+ size_t frameSize = params.nChannels * sizeof(int16_t);
+ if (mSkipCutBuffer != NULL) {
+ size_t prevbufsize = mSkipCutBuffer->size();
+ if (prevbufsize != 0) {
+ ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize);
+ }
+ }
+ mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize,
+ mEncoderPadding * frameSize);
+ }
break;
}
@@ -2417,6 +2435,9 @@
info->mData->setRange(rangeOffset, rangeLength);
}
+ if (mCodec->mSkipCutBuffer != NULL) {
+ mCodec->mSkipCutBuffer->submit(info->mData);
+ }
info->mData->meta()->setInt64("timeUs", timeUs);
sp<AMessage> notify = mCodec->mNotify->dup();
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2169cac..e2e5091 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -91,8 +91,6 @@
LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
libstagefright_aacenc \
- libstagefright_avcenc \
- libstagefright_m4vh263enc \
libstagefright_matroska \
libstagefright_timedtext \
libvpx \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 2e0b013..8fb0d8d 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -117,7 +117,9 @@
CHECK(success);
if(!format->findInt32(kKeyChannelMask, &channelMask)) {
- ALOGW("source format didn't specify channel mask, using channel order");
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
}
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 1387e74..cc3fae6 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -184,6 +184,7 @@
mAudioPlayer(NULL),
mDisplayWidth(0),
mDisplayHeight(0),
+ mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
mFlags(0),
mExtractorFlags(0),
mVideoBuffer(NULL),
@@ -1081,6 +1082,8 @@
// before creating a new one.
IPCThreadState::self()->flushCommands();
+ // Even if set scaling mode fails, we will continue anyway
+ setVideoScalingMode_l(mVideoScalingMode);
if (USE_SURFACE_ALLOC
&& !strncmp(component, "OMX.", 4)
&& strncmp(component, "OMX.google.", 11)
@@ -2362,6 +2365,23 @@
return mExtractor->countTracks() + mTextDriver->countExternalTracks();
}
+status_t AwesomePlayer::setVideoScalingMode(int32_t mode) {
+ Mutex::Autolock lock(mLock);
+ return setVideoScalingMode_l(mode);
+}
+
+status_t AwesomePlayer::setVideoScalingMode_l(int32_t mode) {
+ mVideoScalingMode = mode;
+ if (mNativeWindow != NULL) {
+ status_t err = native_window_set_scaling_mode(
+ mNativeWindow.get(), mVideoScalingMode);
+ if (err != OK) {
+ ALOGW("Failed to set scaling mode: %d", err);
+ }
+ }
+ return OK;
+}
+
status_t AwesomePlayer::invoke(const Parcel &request, Parcel *reply) {
if (NULL == reply) {
return android::BAD_VALUE;
@@ -2372,6 +2392,12 @@
return ret;
}
switch(methodId) {
+ case INVOKE_ID_SET_VIDEO_SCALING_MODE:
+ {
+ int mode = request.readInt32();
+ return setVideoScalingMode(mode);
+ }
+
case INVOKE_ID_GET_TRACK_INFO:
{
return getTrackInfo(reply);
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index c39aa77..9f6d4a3 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -61,11 +61,6 @@
// These are currently still used by the video editing suite.
addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
- addMediaCodec(true /* encoder */, "AVCEncoder", "video/avc");
-
- addMediaCodec(true /* encoder */, "M4vH263Encoder");
- addType("video/3gpp");
- addType("video/mp4v-es");
addMediaCodec(
false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 78b2469..d6075cd 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -220,6 +220,15 @@
msg->setInt32("channel-count", numChannels);
msg->setInt32("sample-rate", sampleRate);
+ int32_t delay = 0;
+ if (meta->findInt32(kKeyEncoderDelay, &delay)) {
+ msg->setInt32("encoder-delay", delay);
+ }
+ int32_t padding = 0;
+ if (meta->findInt32(kKeyEncoderPadding, &padding)) {
+ msg->setInt32("encoder-padding", padding);
+ }
+
int32_t isADTS;
if (meta->findInt32(kKeyIsADTS, &isADTS)) {
msg->setInt32("is-adts", true);
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 245d941..56016a8 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -19,8 +19,6 @@
#include <utils/Log.h>
#include "include/AACEncoder.h"
-#include "include/AVCEncoder.h"
-#include "include/M4vH263Encoder.h"
#include "include/ESDS.h"
@@ -67,8 +65,6 @@
#define FACTORY_REF(name) { #name, Make##name },
FACTORY_CREATE_ENCODER(AACEncoder)
-FACTORY_CREATE_ENCODER(AVCEncoder)
-FACTORY_CREATE_ENCODER(M4vH263Encoder)
static sp<MediaSource> InstantiateSoftwareEncoder(
const char *name, const sp<MediaSource> &source,
@@ -80,8 +76,6 @@
static const FactoryInfo kFactoryInfo[] = {
FACTORY_REF(AACEncoder)
- FACTORY_REF(AVCEncoder)
- FACTORY_REF(M4vH263Encoder)
};
for (size_t i = 0;
i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
@@ -1434,9 +1428,6 @@
free(mMIME);
mMIME = NULL;
-
- delete mSkipCutBuffer;
- mSkipCutBuffer = NULL;
}
status_t OMXCodec::init() {
@@ -1610,14 +1601,13 @@
}
int32_t numchannels = 0;
if (delay + padding) {
- if (meta->findInt32(kKeyChannelCount, &numchannels)) {
+ if (mOutputFormat->findInt32(kKeyChannelCount, &numchannels)) {
size_t frameSize = numchannels * sizeof(int16_t);
- if (mSkipCutBuffer) {
+ if (mSkipCutBuffer != NULL) {
size_t prevbuffersize = mSkipCutBuffer->size();
if (prevbuffersize != 0) {
ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
}
- delete mSkipCutBuffer;
}
mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
}
@@ -1693,13 +1683,6 @@
return err;
}
- err = native_window_set_scaling_mode(mNativeWindow.get(),
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-
- if (err != OK) {
- return err;
- }
-
err = native_window_set_buffers_geometry(
mNativeWindow.get(),
def.format.video.nFrameWidth,
@@ -1923,14 +1906,6 @@
return err;
}
- err = native_window_set_scaling_mode(mNativeWindow.get(),
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- if (err != NO_ERROR) {
- ALOGE("error pushing blank frames: set_buffers_geometry failed: %s (%d)",
- strerror(-err), -err);
- goto error;
- }
-
err = native_window_set_buffers_geometry(mNativeWindow.get(), 1, 1,
HAL_PIXEL_FORMAT_RGBX_8888);
if (err != NO_ERROR) {
@@ -2541,7 +2516,7 @@
CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
mPortBuffers[portIndex].size());
- if (mSkipCutBuffer && mPortStatus[kPortIndexOutput] == ENABLED) {
+ if (mSkipCutBuffer != NULL && mPortStatus[kPortIndexOutput] == ENABLED) {
mSkipCutBuffer->clear();
}
@@ -3863,7 +3838,7 @@
info->mStatus = OWNED_BY_CLIENT;
info->mMediaBuffer->add_ref();
- if (mSkipCutBuffer) {
+ if (mSkipCutBuffer != NULL) {
mSkipCutBuffer->submit(info->mMediaBuffer);
}
*buffer = info->mMediaBuffer;
diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
index 76f7946..3f0b2c2 100644
--- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
+++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
@@ -100,7 +100,7 @@
mDelegate->initiateConnection(mURI.c_str(), &mHeaders, offset);
- while (mState == CONNECTING) {
+ while (mState == CONNECTING || mState == DISCONNECTING) {
mCondition.wait(mLock);
}
@@ -110,6 +110,13 @@
void ChromiumHTTPDataSource::onConnectionEstablished(
int64_t contentSize, const char *contentType) {
Mutex::Autolock autoLock(mLock);
+
+ if (mState != CONNECTING) {
+ // We may have initiated disconnection.
+ CHECK_EQ(mState, DISCONNECTING);
+ return;
+ }
+
mState = CONNECTED;
mContentSize = (contentSize < 0) ? -1 : contentSize + mCurrentOffset;
mContentType = String8(contentType);
@@ -255,6 +262,7 @@
mState = DISCONNECTED;
// mURI.clear();
+ mIOResult = -ENOTCONN;
mCondition.broadcast();
}
diff --git a/media/libstagefright/chromium_http/support.cpp b/media/libstagefright/chromium_http/support.cpp
index f15014e..13ae3df 100644
--- a/media/libstagefright/chromium_http/support.cpp
+++ b/media/libstagefright/chromium_http/support.cpp
@@ -490,6 +490,10 @@
}
void SfDelegate::onInitiateDisconnect() {
+ if (mURLRequest == NULL) {
+ return;
+ }
+
mURLRequest->Cancel();
delete mURLRequest;
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 547a554..bf7befd 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -142,9 +142,9 @@
aacParams->nSampleRate = 44100;
aacParams->nFrameLength = 0;
} else {
- aacParams->nChannels = mStreamInfo->channelConfig;
- aacParams->nSampleRate = mStreamInfo->aacSampleRate;
- aacParams->nFrameLength = mStreamInfo->aacSamplesPerFrame;
+ aacParams->nChannels = mStreamInfo->numChannels;
+ aacParams->nSampleRate = mStreamInfo->sampleRate;
+ aacParams->nFrameLength = mStreamInfo->frameSize;
}
return OMX_ErrorNone;
@@ -175,7 +175,7 @@
pcmParams->nChannels = 1;
pcmParams->nSamplingRate = 44100;
} else {
- pcmParams->nChannels = mStreamInfo->channelConfig;
+ pcmParams->nChannels = mStreamInfo->numChannels;
pcmParams->nSamplingRate = mStreamInfo->sampleRate;
}
@@ -185,6 +185,7 @@
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
+
}
OMX_ERRORTYPE SoftAAC2::internalSetParameter(
@@ -254,7 +255,6 @@
UCHAR* inBuffer[FILEREAD_MAX_LAYERS];
UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0};
UINT bytesValid[FILEREAD_MAX_LAYERS] = {0};
- AAC_DECODER_ERROR decoderErr;
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
@@ -277,7 +277,6 @@
notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
return;
}
-
inQueue.erase(inQueue.begin());
info->mOwnedByUs = false;
notifyEmptyBufferDone(header);
@@ -303,10 +302,16 @@
// the AACDEC_FLUSH flag set
INT_PCM *outBuffer =
reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
- decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
- outBuffer,
- outHeader->nAllocLen,
- AACDEC_FLUSH);
+ AAC_DECODER_ERROR decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+ outBuffer,
+ outHeader->nAllocLen,
+ AACDEC_FLUSH);
+ if (decoderErr != AAC_DEC_OK) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+
outHeader->nFilledLen =
mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
@@ -352,23 +357,27 @@
inBufferLength[0] = inHeader->nFilledLen;
}
-
// Fill and decode
INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
bytesValid[0] = inBufferLength[0];
int flags = mInputDiscontinuity ? AACDEC_INTR : 0;
int prevSampleRate = mStreamInfo->sampleRate;
- decoderErr = aacDecoder_Fill(mAACDecoder,
- inBuffer,
- inBufferLength,
- bytesValid);
+ int prevNumChannels = mStreamInfo->numChannels;
- decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
- outBuffer,
- outHeader->nAllocLen,
- flags);
+ AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS;
+ while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+ aacDecoder_Fill(mAACDecoder,
+ inBuffer,
+ inBufferLength,
+ bytesValid);
+ decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+ outBuffer,
+ outHeader->nAllocLen,
+ flags);
+
+ }
mInputDiscontinuity = false;
/*
@@ -386,7 +395,8 @@
* AAC+/eAAC+ until the first data frame is decoded.
*/
if (mInputBufferCount <= 2) {
- if (mStreamInfo->sampleRate != prevSampleRate) {
+ if (mStreamInfo->sampleRate != prevSampleRate ||
+ mStreamInfo->numChannels != prevNumChannels) {
// We're going to want to revisit this input buffer, but
// may have already advanced the offset. Undo that if
// necessary.
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 4947fb2..7719435 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -239,7 +239,6 @@
mBitRate = aacParams->nBitRate;
mNumChannels = aacParams->nChannels;
mSampleRate = aacParams->nSampleRate;
-
if (aacParams->eAACProfile != OMX_AUDIO_AACObjectNull) {
mAACProfile = aacParams->eAACProfile;
}
@@ -262,7 +261,6 @@
mNumChannels = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
-
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
@@ -275,7 +273,7 @@
}
}
-CHANNEL_MODE getChannelMode(OMX_U32 nChannels) {
+static CHANNEL_MODE getChannelMode(OMX_U32 nChannels) {
CHANNEL_MODE chMode = MODE_INVALID;
switch (nChannels) {
case 1: chMode = MODE_1; break;
@@ -289,6 +287,19 @@
return chMode;
}
+static AUDIO_OBJECT_TYPE getAOTFromProfile(OMX_U32 profile) {
+ if (profile == OMX_AUDIO_AACObjectLC) {
+ return AOT_AAC_LC;
+ } else if (profile == OMX_AUDIO_AACObjectHE) {
+ return AOT_SBR;
+ } else if (profile == OMX_AUDIO_AACObjectELD) {
+ return AOT_ER_AAC_ELD;
+ } else {
+ ALOGW("Unsupported AAC profile - defaulting to AAC-LC");
+ return AOT_AAC_LC;
+ }
+}
+
status_t SoftAACEncoder2::setAudioParams() {
// We call this whenever sample rate, number of channels or bitrate change
// in reponse to setParameter calls.
@@ -297,7 +308,7 @@
mSampleRate, mNumChannels, mBitRate);
if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_AOT,
- mAACProfile == OMX_AUDIO_AACObjectELD ? AOT_ER_AAC_ELD : AOT_AAC_LC)) {
+ getAOTFromProfile(mAACProfile))) {
ALOGE("Failed to set AAC encoder parameters");
return UNKNOWN_ERROR;
}
@@ -341,12 +352,17 @@
}
if (AACENC_OK != aacEncEncode(mAACEncoder, NULL, NULL, NULL, NULL)) {
- ALOGE("Failed to initialize AAC encoder");
+ ALOGE("Unable to initialize encoder for profile / sample-rate / bit-rate / channels");
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
return;
}
+ OMX_U32 actualBitRate = aacEncoder_GetParam(mAACEncoder, AACENC_BITRATE);
+ if (mBitRate != actualBitRate) {
+ ALOGW("Requested bitrate %lu unsupported, using %lu", mBitRate, actualBitRate);
+ }
+
AACENC_InfoStruct encInfo;
if (AACENC_OK != aacEncInfo(mAACEncoder, &encInfo)) {
ALOGE("Failed to get AAC encoder info");
@@ -373,7 +389,7 @@
size_t numBytesPerInputFrame =
mNumChannels * kNumSamplesPerFrame * sizeof(int16_t);
- // BUGBUG: Fraunhofer's decoder chokes on large chunks of AAC-ELD
+ // Limit input size so we only get one ELD frame
if (mAACProfile == OMX_AUDIO_AACObjectELD && numBytesPerInputFrame > 512) {
numBytesPerInputFrame = 512;
}
@@ -402,7 +418,7 @@
}
if (mInputFrame == NULL) {
- mInputFrame = new int16_t[kNumSamplesPerFrame * mNumChannels];
+ mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
}
if (mInputSize == 0) {
@@ -490,6 +506,7 @@
// Encode the mInputFrame, which is treated as a modulo buffer
AACENC_ERROR encoderErr = AACENC_OK;
size_t nOutputBytes = 0;
+
do {
memset(&outargs, 0, sizeof(outargs));
diff --git a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
deleted file mode 100644
index 7533f07..0000000
--- a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AVCEncoder"
-#include <utils/Log.h>
-
-#include "AVCEncoder.h"
-
-#include "avcenc_api.h"
-#include "avcenc_int.h"
-#include "OMX_Video.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static status_t ConvertOmxAvcProfileToAvcSpecProfile(
- int32_t omxProfile, AVCProfile* pvProfile) {
- ALOGV("ConvertOmxAvcProfileToAvcSpecProfile: %d", omxProfile);
- switch (omxProfile) {
- case OMX_VIDEO_AVCProfileBaseline:
- *pvProfile = AVC_BASELINE;
- return OK;
- default:
- ALOGE("Unsupported omx profile: %d", omxProfile);
- }
- return BAD_VALUE;
-}
-
-static status_t ConvertOmxAvcLevelToAvcSpecLevel(
- int32_t omxLevel, AVCLevel *pvLevel) {
- ALOGV("ConvertOmxAvcLevelToAvcSpecLevel: %d", omxLevel);
- AVCLevel level = AVC_LEVEL5_1;
- switch (omxLevel) {
- case OMX_VIDEO_AVCLevel1:
- level = AVC_LEVEL1_B;
- break;
- case OMX_VIDEO_AVCLevel1b:
- level = AVC_LEVEL1;
- break;
- case OMX_VIDEO_AVCLevel11:
- level = AVC_LEVEL1_1;
- break;
- case OMX_VIDEO_AVCLevel12:
- level = AVC_LEVEL1_2;
- break;
- case OMX_VIDEO_AVCLevel13:
- level = AVC_LEVEL1_3;
- break;
- case OMX_VIDEO_AVCLevel2:
- level = AVC_LEVEL2;
- break;
- case OMX_VIDEO_AVCLevel21:
- level = AVC_LEVEL2_1;
- break;
- case OMX_VIDEO_AVCLevel22:
- level = AVC_LEVEL2_2;
- break;
- case OMX_VIDEO_AVCLevel3:
- level = AVC_LEVEL3;
- break;
- case OMX_VIDEO_AVCLevel31:
- level = AVC_LEVEL3_1;
- break;
- case OMX_VIDEO_AVCLevel32:
- level = AVC_LEVEL3_2;
- break;
- case OMX_VIDEO_AVCLevel4:
- level = AVC_LEVEL4;
- break;
- case OMX_VIDEO_AVCLevel41:
- level = AVC_LEVEL4_1;
- break;
- case OMX_VIDEO_AVCLevel42:
- level = AVC_LEVEL4_2;
- break;
- case OMX_VIDEO_AVCLevel5:
- level = AVC_LEVEL5;
- break;
- case OMX_VIDEO_AVCLevel51:
- level = AVC_LEVEL5_1;
- break;
- default:
- ALOGE("Unknown omx level: %d", omxLevel);
- return BAD_VALUE;
- }
- *pvLevel = level;
- return OK;
-}
-
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
- uint8_t *inyuv, uint8_t* outyuv,
- int32_t width, int32_t height) {
-
- int32_t outYsize = width * height;
- uint32_t *outy = (uint32_t *) outyuv;
- uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
- uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
- /* Y copying */
- memcpy(outy, inyuv, outYsize);
-
- /* U & V copying */
- uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
- for (int32_t i = height >> 1; i > 0; --i) {
- for (int32_t j = width >> 2; j > 0; --j) {
- uint32_t temp = *inyuv_4++;
- uint32_t tempU = temp & 0xFF;
- tempU = tempU | ((temp >> 8) & 0xFF00);
-
- uint32_t tempV = (temp >> 8) & 0xFF;
- tempV = tempV | ((temp >> 16) & 0xFF00);
-
- // Flip U and V
- *outcb++ = tempV;
- *outcr++ = tempU;
- }
- }
-}
-
-static int32_t MallocWrapper(
- void *userData, int32_t size, int32_t attrs) {
- return reinterpret_cast<int32_t>(malloc(size));
-}
-
-static void FreeWrapper(void *userData, int32_t ptr) {
- free(reinterpret_cast<void *>(ptr));
-}
-
-static int32_t DpbAllocWrapper(void *userData,
- unsigned int sizeInMbs, unsigned int numBuffers) {
- AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->allocOutputBuffers(sizeInMbs, numBuffers);
-}
-
-static int32_t BindFrameWrapper(
- void *userData, int32_t index, uint8_t **yuv) {
- AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->bindOutputBuffer(index, yuv);
-}
-
-static void UnbindFrameWrapper(void *userData, int32_t index) {
- AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
- CHECK(encoder != NULL);
- return encoder->unbindOutputBuffer(index);
-}
-
-AVCEncoder::AVCEncoder(
- const sp<MediaSource>& source,
- const sp<MetaData>& meta)
- : mSource(source),
- mMeta(meta),
- mNumInputFrames(-1),
- mPrevTimestampUs(-1),
- mStarted(false),
- mInputBuffer(NULL),
- mInputFrameData(NULL),
- mGroup(NULL) {
-
- ALOGI("Construct software AVCEncoder");
-
- mHandle = new tagAVCHandle;
- memset(mHandle, 0, sizeof(tagAVCHandle));
- mHandle->AVCObject = NULL;
- mHandle->userData = this;
- mHandle->CBAVC_DPBAlloc = DpbAllocWrapper;
- mHandle->CBAVC_FrameBind = BindFrameWrapper;
- mHandle->CBAVC_FrameUnbind = UnbindFrameWrapper;
- mHandle->CBAVC_Malloc = MallocWrapper;
- mHandle->CBAVC_Free = FreeWrapper;
-
- mInitCheck = initCheck(meta);
-}
-
-AVCEncoder::~AVCEncoder() {
- ALOGV("Destruct software AVCEncoder");
- if (mStarted) {
- stop();
- }
-
- delete mEncParams;
- delete mHandle;
-}
-
-status_t AVCEncoder::initCheck(const sp<MetaData>& meta) {
- ALOGV("initCheck");
- CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
- CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
- CHECK(meta->findInt32(kKeyFrameRate, &mVideoFrameRate));
- CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
-
- // XXX: Add more color format support
- CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
- ALOGE("Color format %d is not supported", mVideoColorFormat);
- return BAD_VALUE;
- }
- // Allocate spare buffer only when color conversion is needed.
- // Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
- mInputFrameData =
- (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
- CHECK(mInputFrameData);
- }
-
- // XXX: Remove this restriction
- if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
- ALOGE("Video frame size %dx%d must be a multiple of 16",
- mVideoWidth, mVideoHeight);
- return BAD_VALUE;
- }
-
- mEncParams = new tagAVCEncParam;
- memset(mEncParams, 0, sizeof(mEncParams));
- mEncParams->width = mVideoWidth;
- mEncParams->height = mVideoHeight;
- mEncParams->frame_rate = 1000 * mVideoFrameRate; // In frames/ms!
- mEncParams->rate_control = AVC_ON;
- mEncParams->bitrate = mVideoBitRate;
- mEncParams->initQP = 0;
- mEncParams->init_CBP_removal_delay = 1600;
- mEncParams->CPB_size = (uint32_t) (mVideoBitRate >> 1);
-
- mEncParams->intramb_refresh = 0;
- mEncParams->auto_scd = AVC_ON;
- mEncParams->out_of_band_param_set = AVC_ON;
- mEncParams->poc_type = 2;
- mEncParams->log2_max_poc_lsb_minus_4 = 12;
- mEncParams->delta_poc_zero_flag = 0;
- mEncParams->offset_poc_non_ref = 0;
- mEncParams->offset_top_bottom = 0;
- mEncParams->num_ref_in_cycle = 0;
- mEncParams->offset_poc_ref = NULL;
-
- mEncParams->num_ref_frame = 1;
- mEncParams->num_slice_group = 1;
- mEncParams->fmo_type = 0;
-
- mEncParams->db_filter = AVC_ON;
- mEncParams->disable_db_idc = 0;
-
- mEncParams->alpha_offset = 0;
- mEncParams->beta_offset = 0;
- mEncParams->constrained_intra_pred = AVC_OFF;
-
- mEncParams->data_par = AVC_OFF;
- mEncParams->fullsearch = AVC_OFF;
- mEncParams->search_range = 16;
- mEncParams->sub_pel = AVC_OFF;
- mEncParams->submb_pred = AVC_OFF;
- mEncParams->rdopt_mode = AVC_OFF;
- mEncParams->bidir_pred = AVC_OFF;
- int32_t nMacroBlocks = ((((mVideoWidth + 15) >> 4) << 4) *
- (((mVideoHeight + 15) >> 4) << 4)) >> 8;
- uint32_t *sliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
- for (int ii = 0, idx = 0; ii < nMacroBlocks; ++ii) {
- sliceGroup[ii] = idx++;
- if (idx >= mEncParams->num_slice_group) {
- idx = 0;
- }
- }
- mEncParams->slice_group = sliceGroup;
-
- mEncParams->use_overrun_buffer = AVC_OFF;
-
- // Set IDR frame refresh interval
- int32_t iFramesIntervalSec;
- CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
- if (iFramesIntervalSec < 0) {
- mEncParams->idr_period = -1;
- } else if (iFramesIntervalSec == 0) {
- mEncParams->idr_period = 1; // All I frames
- } else {
- mEncParams->idr_period =
- (iFramesIntervalSec * mVideoFrameRate);
- }
- ALOGV("idr_period: %d, I-frames interval: %d seconds, and frame rate: %d",
- mEncParams->idr_period, iFramesIntervalSec, mVideoFrameRate);
-
- // Set profile and level
- // If profile and level setting is not correct, failure
- // is reported when the encoder is initialized.
- mEncParams->profile = AVC_BASELINE;
- mEncParams->level = AVC_LEVEL3_2;
- int32_t profile, level;
- if (meta->findInt32(kKeyVideoProfile, &profile)) {
- if (OK != ConvertOmxAvcProfileToAvcSpecProfile(
- profile, &mEncParams->profile)) {
- return BAD_VALUE;
- }
- }
- if (meta->findInt32(kKeyVideoLevel, &level)) {
- if (OK != ConvertOmxAvcLevelToAvcSpecLevel(
- level, &mEncParams->level)) {
- return BAD_VALUE;
- }
- }
-
-
- mFormat = new MetaData;
- mFormat->setInt32(kKeyWidth, mVideoWidth);
- mFormat->setInt32(kKeyHeight, mVideoHeight);
- mFormat->setInt32(kKeyBitRate, mVideoBitRate);
- mFormat->setInt32(kKeyFrameRate, mVideoFrameRate);
- mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
- mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
- mFormat->setCString(kKeyDecoderComponent, "AVCEncoder");
- return OK;
-}
-
-status_t AVCEncoder::start(MetaData *params) {
- ALOGV("start");
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mStarted) {
- ALOGW("Call start() when encoder already started");
- return OK;
- }
-
- AVCEnc_Status err;
- err = PVAVCEncInitialize(mHandle, mEncParams, NULL, NULL);
- if (err != AVCENC_SUCCESS) {
- ALOGE("Failed to initialize the encoder: %d", err);
- return UNKNOWN_ERROR;
- }
-
- mGroup = new MediaBufferGroup();
- int32_t maxSize;
- if (AVCENC_SUCCESS !=
- PVAVCEncGetMaxOutputBufferSize(mHandle, &maxSize)) {
- maxSize = 31584; // Magic #
- }
- mGroup->add_buffer(new MediaBuffer(maxSize));
-
- mSource->start(params);
- mNumInputFrames = -2; // 1st two buffers contain SPS and PPS
- mStarted = true;
- mSpsPpsHeaderReceived = false;
- mReadyForNextFrame = true;
- mIsIDRFrame = 0;
-
- return OK;
-}
-
-status_t AVCEncoder::stop() {
- ALOGV("stop");
- if (!mStarted) {
- ALOGW("Call stop() when encoder has not started");
- return OK;
- }
-
- if (mInputBuffer) {
- mInputBuffer->release();
- mInputBuffer = NULL;
- }
-
- if (mGroup) {
- delete mGroup;
- mGroup = NULL;
- }
-
- if (mInputFrameData) {
- delete mInputFrameData;
- mInputFrameData = NULL;
- }
-
- PVAVCCleanUpEncoder(mHandle);
- mSource->stop();
- releaseOutputBuffers();
- mStarted = false;
-
- return OK;
-}
-
-void AVCEncoder::releaseOutputBuffers() {
- ALOGV("releaseOutputBuffers");
- for (size_t i = 0; i < mOutputBuffers.size(); ++i) {
- MediaBuffer *buffer = mOutputBuffers.editItemAt(i);
- buffer->setObserver(NULL);
- buffer->release();
- }
- mOutputBuffers.clear();
-}
-
-sp<MetaData> AVCEncoder::getFormat() {
- ALOGV("getFormat");
- return mFormat;
-}
-
-status_t AVCEncoder::read(
- MediaBuffer **out, const ReadOptions *options) {
-
- CHECK(!options);
- *out = NULL;
-
- MediaBuffer *outputBuffer;
- CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
- uint8_t *outPtr = (uint8_t *) outputBuffer->data();
- uint32_t dataLength = outputBuffer->size();
-
- if (!mSpsPpsHeaderReceived && mNumInputFrames < 0) {
- // 4 bytes are reserved for holding the start code 0x00000001
- // of the sequence parameter set at the beginning.
- outPtr += 4;
- dataLength -= 4;
- }
-
- int32_t type;
- AVCEnc_Status encoderStatus = AVCENC_SUCCESS;
-
- // Combine SPS and PPS and place them in the very first output buffer
- // SPS and PPS are separated by start code 0x00000001
- // Assume that we have exactly one SPS and exactly one PPS.
- while (!mSpsPpsHeaderReceived && mNumInputFrames <= 0) {
- encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
- if (encoderStatus == AVCENC_WRONG_STATE) {
- mSpsPpsHeaderReceived = true;
- CHECK_EQ(0, mNumInputFrames); // 1st video frame is 0
- } else {
- switch (type) {
- case AVC_NALTYPE_SPS:
- ++mNumInputFrames;
- memcpy((uint8_t *)outputBuffer->data(), "\x00\x00\x00\x01", 4);
- outputBuffer->set_range(0, dataLength + 4);
- outPtr += (dataLength + 4); // 4 bytes for next start code
- dataLength = outputBuffer->size() -
- (outputBuffer->range_length() + 4);
- break;
- case AVC_NALTYPE_PPS:
- ++mNumInputFrames;
- memcpy(((uint8_t *) outputBuffer->data()) +
- outputBuffer->range_length(),
- "\x00\x00\x00\x01", 4);
- outputBuffer->set_range(0,
- dataLength + outputBuffer->range_length() + 4);
- outputBuffer->meta_data()->setInt32(kKeyIsCodecConfig, 1);
- outputBuffer->meta_data()->setInt64(kKeyTime, 0);
- *out = outputBuffer;
- return OK;
- default:
- ALOGE("Nal type (%d) other than SPS/PPS is unexpected", type);
- return UNKNOWN_ERROR;
- }
- }
- }
-
- // Get next input video frame
- if (mReadyForNextFrame) {
- if (mInputBuffer) {
- mInputBuffer->release();
- mInputBuffer = NULL;
- }
- status_t err = mSource->read(&mInputBuffer, options);
- if (err != OK) {
- if (err != ERROR_END_OF_STREAM) {
- ALOGE("Failed to read input video frame: %d", err);
- }
- outputBuffer->release();
- return err;
- }
-
- if (mInputBuffer->size() - ((mVideoWidth * mVideoHeight * 3) >> 1) != 0) {
- outputBuffer->release();
- mInputBuffer->release();
- mInputBuffer = NULL;
- return UNKNOWN_ERROR;
- }
-
- int64_t timeUs;
- CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
- outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
-
- // When the timestamp of the current sample is the same as
- // that of the previous sample, the encoding of the sample
- // is bypassed, and the output length is set to 0.
- if (mNumInputFrames >= 1 && mPrevTimestampUs == timeUs) {
- // Frame arrives too late
- mInputBuffer->release();
- mInputBuffer = NULL;
- outputBuffer->set_range(0, 0);
- *out = outputBuffer;
- return OK;
- }
-
- // Don't accept out-of-order samples
- CHECK(mPrevTimestampUs < timeUs);
- mPrevTimestampUs = timeUs;
-
- AVCFrameIO videoInput;
- memset(&videoInput, 0, sizeof(videoInput));
- videoInput.height = ((mVideoHeight + 15) >> 4) << 4;
- videoInput.pitch = ((mVideoWidth + 15) >> 4) << 4;
- videoInput.coding_timestamp = (timeUs + 500) / 1000; // in ms
- uint8_t *inputData = (uint8_t *) mInputBuffer->data();
-
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- CHECK(mInputFrameData);
- CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
- ConvertYUV420SemiPlanarToYUV420Planar(
- inputData, mInputFrameData, mVideoWidth, mVideoHeight);
- inputData = mInputFrameData;
- }
- CHECK(inputData != NULL);
- videoInput.YCbCr[0] = inputData;
- videoInput.YCbCr[1] = videoInput.YCbCr[0] + videoInput.height * videoInput.pitch;
- videoInput.YCbCr[2] = videoInput.YCbCr[1] +
- ((videoInput.height * videoInput.pitch) >> 2);
- videoInput.disp_order = mNumInputFrames;
-
- encoderStatus = PVAVCEncSetInput(mHandle, &videoInput);
- if (encoderStatus == AVCENC_SUCCESS ||
- encoderStatus == AVCENC_NEW_IDR) {
- mReadyForNextFrame = false;
- ++mNumInputFrames;
- if (encoderStatus == AVCENC_NEW_IDR) {
- mIsIDRFrame = 1;
- }
- } else {
- if (encoderStatus < AVCENC_SUCCESS) {
- outputBuffer->release();
- return UNKNOWN_ERROR;
- } else {
- outputBuffer->set_range(0, 0);
- *out = outputBuffer;
- return OK;
- }
- }
- }
-
- // Encode an input video frame
- CHECK(encoderStatus == AVCENC_SUCCESS ||
- encoderStatus == AVCENC_NEW_IDR);
- dataLength = outputBuffer->size(); // Reset the output buffer length
- encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
- if (encoderStatus == AVCENC_SUCCESS) {
- outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
- CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
- } else if (encoderStatus == AVCENC_PICTURE_READY) {
- CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
- if (mIsIDRFrame) {
- outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
- mIsIDRFrame = 0;
- ALOGV("Output an IDR frame");
- }
- mReadyForNextFrame = true;
- AVCFrameIO recon;
- if (PVAVCEncGetRecon(mHandle, &recon) == AVCENC_SUCCESS) {
- PVAVCEncReleaseRecon(mHandle, &recon);
- }
- } else {
- dataLength = 0;
- mReadyForNextFrame = true;
- }
- if (encoderStatus < AVCENC_SUCCESS) {
- outputBuffer->release();
- return UNKNOWN_ERROR;
- }
-
- outputBuffer->set_range(0, dataLength);
- *out = outputBuffer;
- return OK;
-}
-
-int32_t AVCEncoder::allocOutputBuffers(
- unsigned int sizeInMbs, unsigned int numBuffers) {
- CHECK(mOutputBuffers.isEmpty());
- size_t frameSize = (sizeInMbs << 7) * 3;
- for (unsigned int i = 0; i < numBuffers; ++i) {
- MediaBuffer *buffer = new MediaBuffer(frameSize);
- buffer->setObserver(this);
- mOutputBuffers.push(buffer);
- }
-
- return 1;
-}
-
-void AVCEncoder::unbindOutputBuffer(int32_t index) {
- CHECK(index >= 0);
-}
-
-int32_t AVCEncoder::bindOutputBuffer(int32_t index, uint8_t **yuv) {
- CHECK(index >= 0);
- CHECK(index < (int32_t) mOutputBuffers.size());
- int64_t timeUs;
- CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
- mOutputBuffers[index]->meta_data()->setInt64(kKeyTime, timeUs);
-
- *yuv = (uint8_t *) mOutputBuffers[index]->data();
-
- return 1;
-}
-
-void AVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
-}
-
-} // namespace android
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index ee31ab2..48923cf 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -2,8 +2,6 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- AVCEncoder.cpp \
- SoftAVCEncoder.cpp \
src/avcenc_api.cpp \
src/bitstream_io.cpp \
src/block.cpp \
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index e6aa563..484180d 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -2,8 +2,6 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- M4vH263Encoder.cpp \
- SoftMPEG4Encoder.cpp \
src/bitstream_io.cpp \
src/combined_encode.cpp \
src/datapart_encode.cpp \
diff --git a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
deleted file mode 100644
index 20b0f8d..0000000
--- a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "M4vH263Encoder"
-#include <utils/Log.h>
-
-#include "M4vH263Encoder.h"
-
-#include "mp4enc_api.h"
-#include "OMX_Video.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static status_t ConvertOmxProfileLevel(
- MP4EncodingMode mode,
- int32_t omxProfile,
- int32_t omxLevel,
- ProfileLevelType* pvProfileLevel) {
- ALOGV("ConvertOmxProfileLevel: %d/%d/%d", mode, omxProfile, omxLevel);
- ProfileLevelType profileLevel;
- if (mode == H263_MODE) {
- switch (omxProfile) {
- case OMX_VIDEO_H263ProfileBaseline:
- if (omxLevel > OMX_VIDEO_H263Level45) {
- ALOGE("Unsupported level (%d) for H263", omxLevel);
- return BAD_VALUE;
- } else {
- ALOGW("PV does not support level configuration for H263");
- profileLevel = CORE_PROFILE_LEVEL2;
- break;
- }
- break;
- default:
- ALOGE("Unsupported profile (%d) for H263", omxProfile);
- return BAD_VALUE;
- }
- } else { // MPEG4
- switch (omxProfile) {
- case OMX_VIDEO_MPEG4ProfileSimple:
- switch (omxLevel) {
- case OMX_VIDEO_MPEG4Level0b:
- profileLevel = SIMPLE_PROFILE_LEVEL0;
- break;
- case OMX_VIDEO_MPEG4Level1:
- profileLevel = SIMPLE_PROFILE_LEVEL1;
- break;
- case OMX_VIDEO_MPEG4Level2:
- profileLevel = SIMPLE_PROFILE_LEVEL2;
- break;
- case OMX_VIDEO_MPEG4Level3:
- profileLevel = SIMPLE_PROFILE_LEVEL3;
- break;
- default:
- ALOGE("Unsupported level (%d) for MPEG4 simple profile",
- omxLevel);
- return BAD_VALUE;
- }
- break;
- case OMX_VIDEO_MPEG4ProfileSimpleScalable:
- switch (omxLevel) {
- case OMX_VIDEO_MPEG4Level0b:
- profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL0;
- break;
- case OMX_VIDEO_MPEG4Level1:
- profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL1;
- break;
- case OMX_VIDEO_MPEG4Level2:
- profileLevel = SIMPLE_SCALABLE_PROFILE_LEVEL2;
- break;
- default:
- ALOGE("Unsupported level (%d) for MPEG4 simple "
- "scalable profile", omxLevel);
- return BAD_VALUE;
- }
- break;
- case OMX_VIDEO_MPEG4ProfileCore:
- switch (omxLevel) {
- case OMX_VIDEO_MPEG4Level1:
- profileLevel = CORE_PROFILE_LEVEL1;
- break;
- case OMX_VIDEO_MPEG4Level2:
- profileLevel = CORE_PROFILE_LEVEL2;
- break;
- default:
- ALOGE("Unsupported level (%d) for MPEG4 core "
- "profile", omxLevel);
- return BAD_VALUE;
- }
- break;
- case OMX_VIDEO_MPEG4ProfileCoreScalable:
- switch (omxLevel) {
- case OMX_VIDEO_MPEG4Level1:
- profileLevel = CORE_SCALABLE_PROFILE_LEVEL1;
- break;
- case OMX_VIDEO_MPEG4Level2:
- profileLevel = CORE_SCALABLE_PROFILE_LEVEL2;
- break;
- case OMX_VIDEO_MPEG4Level3:
- profileLevel = CORE_SCALABLE_PROFILE_LEVEL3;
- break;
- default:
- ALOGE("Unsupported level (%d) for MPEG4 core "
- "scalable profile", omxLevel);
- return BAD_VALUE;
- }
- break;
- default:
- ALOGE("Unsupported MPEG4 profile (%d)", omxProfile);
- return BAD_VALUE;
- }
- }
-
- *pvProfileLevel = profileLevel;
- return OK;
-}
-
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
- uint8_t *inyuv, uint8_t* outyuv,
- int32_t width, int32_t height) {
-
- int32_t outYsize = width * height;
- uint32_t *outy = (uint32_t *) outyuv;
- uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
- uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
- /* Y copying */
- memcpy(outy, inyuv, outYsize);
-
- /* U & V copying */
- uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
- for (int32_t i = height >> 1; i > 0; --i) {
- for (int32_t j = width >> 2; j > 0; --j) {
- uint32_t temp = *inyuv_4++;
- uint32_t tempU = temp & 0xFF;
- tempU = tempU | ((temp >> 8) & 0xFF00);
-
- uint32_t tempV = (temp >> 8) & 0xFF;
- tempV = tempV | ((temp >> 16) & 0xFF00);
-
- // Flip U and V
- *outcb++ = tempV;
- *outcr++ = tempU;
- }
- }
-}
-
-M4vH263Encoder::M4vH263Encoder(
- const sp<MediaSource>& source,
- const sp<MetaData>& meta)
- : mSource(source),
- mMeta(meta),
- mNumInputFrames(-1),
- mNextModTimeUs(0),
- mPrevTimestampUs(-1),
- mStarted(false),
- mInputBuffer(NULL),
- mInputFrameData(NULL),
- mGroup(NULL) {
-
- ALOGI("Construct software M4vH263Encoder");
-
- mHandle = new tagvideoEncControls;
- memset(mHandle, 0, sizeof(tagvideoEncControls));
-
- mInitCheck = initCheck(meta);
-}
-
-M4vH263Encoder::~M4vH263Encoder() {
- ALOGV("Destruct software M4vH263Encoder");
- if (mStarted) {
- stop();
- }
-
- delete mEncParams;
- delete mHandle;
-}
-
-status_t M4vH263Encoder::initCheck(const sp<MetaData>& meta) {
- ALOGV("initCheck");
- CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
- CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
- CHECK(meta->findInt32(kKeyFrameRate, &mVideoFrameRate));
- CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
-
- // XXX: Add more color format support
- CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
- ALOGE("Color format %d is not supported", mVideoColorFormat);
- return BAD_VALUE;
- }
- // Allocate spare buffer only when color conversion is needed.
- // Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
- mInputFrameData =
- (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
- CHECK(mInputFrameData);
- }
-
- // XXX: Remove this restriction
- if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
- ALOGE("Video frame size %dx%d must be a multiple of 16",
- mVideoWidth, mVideoHeight);
- return BAD_VALUE;
- }
-
- mEncParams = new tagvideoEncOptions;
- memset(mEncParams, 0, sizeof(tagvideoEncOptions));
- if (!PVGetDefaultEncOption(mEncParams, 0)) {
- ALOGE("Failed to get default encoding parameters");
- return BAD_VALUE;
- }
-
- // Need to know which role the encoder is in.
- // XXX: Set the mode proper for other types of applications
- // like streaming or video conference
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
- CHECK(!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
- !strcmp(mime, MEDIA_MIMETYPE_VIDEO_H263));
- if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
- mEncParams->encMode = COMBINE_MODE_WITH_ERR_RES;
- } else {
- mEncParams->encMode = H263_MODE;
- }
- mEncParams->encWidth[0] = mVideoWidth;
- mEncParams->encHeight[0] = mVideoHeight;
- mEncParams->encFrameRate[0] = mVideoFrameRate;
- mEncParams->rcType = VBR_1;
- mEncParams->vbvDelay = (float)5.0;
-
- // Set profile and level
- // If profile and level setting is not correct, failure
- // is reported when the encoder is initialized.
- mEncParams->profile_level = CORE_PROFILE_LEVEL2;
- int32_t profile, level;
- if (meta->findInt32(kKeyVideoProfile, &profile) &&
- meta->findInt32(kKeyVideoLevel, &level)) {
- if (OK != ConvertOmxProfileLevel(
- mEncParams->encMode, profile, level,
- &mEncParams->profile_level)) {
- return BAD_VALUE;
- }
- }
-
- mEncParams->packetSize = 32;
- mEncParams->rvlcEnable = PV_OFF;
- mEncParams->numLayers = 1;
- mEncParams->timeIncRes = 1000;
- mEncParams->tickPerSrc = mEncParams->timeIncRes / mVideoFrameRate;
-
- mEncParams->bitRate[0] = mVideoBitRate;
- mEncParams->iQuant[0] = 15;
- mEncParams->pQuant[0] = 12;
- mEncParams->quantType[0] = 0;
- mEncParams->noFrameSkipped = PV_OFF;
-
- // Set IDR frame refresh interval
- int32_t iFramesIntervalSec;
- CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
- if (iFramesIntervalSec < 0) {
- mEncParams->intraPeriod = -1;
- } else if (iFramesIntervalSec == 0) {
- mEncParams->intraPeriod = 1; // All I frames
- } else {
- mEncParams->intraPeriod =
- (iFramesIntervalSec * mVideoFrameRate);
- }
-
- mEncParams->numIntraMB = 0;
- mEncParams->sceneDetect = PV_ON;
- mEncParams->searchRange = 16;
- mEncParams->mv8x8Enable = PV_OFF;
- mEncParams->gobHeaderInterval = 0;
- mEncParams->useACPred = PV_ON;
- mEncParams->intraDCVlcTh = 0;
-
- mFormat = new MetaData;
- mFormat->setInt32(kKeyWidth, mVideoWidth);
- mFormat->setInt32(kKeyHeight, mVideoHeight);
- mFormat->setInt32(kKeyBitRate, mVideoBitRate);
- mFormat->setInt32(kKeyFrameRate, mVideoFrameRate);
- mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
-
- mFormat->setCString(kKeyMIMEType, mime);
- mFormat->setCString(kKeyDecoderComponent, "M4vH263Encoder");
- return OK;
-}
-
-status_t M4vH263Encoder::start(MetaData *params) {
- ALOGV("start");
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mStarted) {
- ALOGW("Call start() when encoder already started");
- return OK;
- }
-
- if (!PVInitVideoEncoder(mHandle, mEncParams)) {
- ALOGE("Failed to initialize the encoder");
- return UNKNOWN_ERROR;
- }
-
- mGroup = new MediaBufferGroup();
- int32_t maxSize;
- if (!PVGetMaxVideoFrameSize(mHandle, &maxSize)) {
- maxSize = 256 * 1024; // Magic #
- }
- ALOGV("Max output buffer size: %d", maxSize);
- mGroup->add_buffer(new MediaBuffer(maxSize));
-
- mSource->start(params);
- mNumInputFrames = -1; // 1st frame contains codec specific data
- mStarted = true;
-
- return OK;
-}
-
-status_t M4vH263Encoder::stop() {
- ALOGV("stop");
- if (!mStarted) {
- ALOGW("Call stop() when encoder has not started");
- return OK;
- }
-
- if (mInputBuffer) {
- mInputBuffer->release();
- mInputBuffer = NULL;
- }
-
- if (mGroup) {
- delete mGroup;
- mGroup = NULL;
- }
-
- if (mInputFrameData) {
- delete mInputFrameData;
- mInputFrameData = NULL;
- }
-
- CHECK(PVCleanUpVideoEncoder(mHandle));
-
- mSource->stop();
- mStarted = false;
-
- return OK;
-}
-
-sp<MetaData> M4vH263Encoder::getFormat() {
- ALOGV("getFormat");
- return mFormat;
-}
-
-status_t M4vH263Encoder::read(
- MediaBuffer **out, const ReadOptions *options) {
-
- *out = NULL;
-
- MediaBuffer *outputBuffer;
- CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
- uint8_t *outPtr = (uint8_t *) outputBuffer->data();
- int32_t dataLength = outputBuffer->size();
-
- // Output codec specific data
- if (mNumInputFrames < 0) {
- if (!PVGetVolHeader(mHandle, outPtr, &dataLength, 0)) {
- ALOGE("Failed to get VOL header");
- return UNKNOWN_ERROR;
- }
- ALOGV("Output VOL header: %d bytes", dataLength);
- outputBuffer->meta_data()->setInt32(kKeyIsCodecConfig, 1);
- outputBuffer->set_range(0, dataLength);
- *out = outputBuffer;
- ++mNumInputFrames;
- return OK;
- }
-
- // Ready for accepting an input video frame
- status_t err = mSource->read(&mInputBuffer, options);
- if (OK != err) {
- if (err != ERROR_END_OF_STREAM) {
- ALOGE("Failed to read from data source");
- }
- outputBuffer->release();
- return err;
- }
-
- if (mInputBuffer->size() - ((mVideoWidth * mVideoHeight * 3) >> 1) != 0) {
- outputBuffer->release();
- mInputBuffer->release();
- mInputBuffer = NULL;
- return UNKNOWN_ERROR;
- }
-
- int64_t timeUs;
- CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
- // When the timestamp of the current sample is the same as that
- // of the previous sample, encoding of the current sample is
- // bypassed, and the output length of the sample is set to 0
- if (mNumInputFrames >= 1 &&
- (mNextModTimeUs > timeUs || mPrevTimestampUs == timeUs)) {
- // Frame arrives too late
- outputBuffer->set_range(0, 0);
- *out = outputBuffer;
- mInputBuffer->release();
- mInputBuffer = NULL;
- return OK;
- }
-
- // Don't accept out-of-order samples
- CHECK(mPrevTimestampUs < timeUs);
- mPrevTimestampUs = timeUs;
-
- // Color convert to OMX_COLOR_FormatYUV420Planar if necessary
- outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
- uint8_t *inPtr = (uint8_t *) mInputBuffer->data();
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- CHECK(mInputFrameData);
- CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
- ConvertYUV420SemiPlanarToYUV420Planar(
- inPtr, mInputFrameData, mVideoWidth, mVideoHeight);
- inPtr = mInputFrameData;
- }
- CHECK(inPtr != NULL);
-
- // Ready for encoding a video frame
- VideoEncFrameIO vin, vout;
- vin.height = ((mVideoHeight + 15) >> 4) << 4;
- vin.pitch = ((mVideoWidth + 15) >> 4) << 4;
- vin.timestamp = (timeUs + 500) / 1000; // in ms
- vin.yChan = inPtr;
- vin.uChan = vin.yChan + vin.height * vin.pitch;
- vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
- unsigned long modTimeMs = 0;
- int32_t nLayer = 0;
- MP4HintTrack hintTrack;
- if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
- &modTimeMs, outPtr, &dataLength, &nLayer) ||
- !PVGetHintTrack(mHandle, &hintTrack)) {
- ALOGE("Failed to encode frame or get hink track at frame %lld",
- mNumInputFrames);
- outputBuffer->release();
- mInputBuffer->release();
- mInputBuffer = NULL;
- return UNKNOWN_ERROR;
- }
- CHECK(NULL == PVGetOverrunBuffer(mHandle));
- if (hintTrack.CodeType == 0) { // I-frame serves as sync frame
- outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- }
-
- ++mNumInputFrames;
- mNextModTimeUs = modTimeMs * 1000LL;
- outputBuffer->set_range(0, dataLength);
- *out = outputBuffer;
- mInputBuffer->release();
- mInputBuffer = NULL;
- return OK;
-}
-
-void M4vH263Encoder::signalBufferReturned(MediaBuffer *buffer) {
-}
-
-} // namespace android
diff --git a/media/libstagefright/include/AVCEncoder.h b/media/libstagefright/include/AVCEncoder.h
deleted file mode 100644
index 83e1f97..0000000
--- a/media/libstagefright/include/AVCEncoder.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AVC_ENCODER_H_
-
-#define AVC_ENCODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/Vector.h>
-
-struct tagAVCHandle;
-struct tagAVCEncParam;
-
-namespace android {
-
-struct MediaBuffer;
-struct MediaBufferGroup;
-
-struct AVCEncoder : public MediaSource,
- public MediaBufferObserver {
- AVCEncoder(const sp<MediaSource> &source,
- const sp<MetaData>& meta);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
- virtual void signalBufferReturned(MediaBuffer *buffer);
-
- // Callbacks required by the encoder
- int32_t allocOutputBuffers(unsigned int sizeInMbs, unsigned int numBuffers);
- void unbindOutputBuffer(int32_t index);
- int32_t bindOutputBuffer(int32_t index, uint8_t **yuv);
-
-protected:
- virtual ~AVCEncoder();
-
-private:
- sp<MediaSource> mSource;
- sp<MetaData> mFormat;
- sp<MetaData> mMeta;
-
- int32_t mVideoWidth;
- int32_t mVideoHeight;
- int32_t mVideoFrameRate;
- int32_t mVideoBitRate;
- int32_t mVideoColorFormat;
- int64_t mNumInputFrames;
- int64_t mPrevTimestampUs;
- status_t mInitCheck;
- bool mStarted;
- bool mSpsPpsHeaderReceived;
- bool mReadyForNextFrame;
- int32_t mIsIDRFrame; // for set kKeyIsSyncFrame
-
- tagAVCHandle *mHandle;
- tagAVCEncParam *mEncParams;
- MediaBuffer *mInputBuffer;
- uint8_t *mInputFrameData;
- MediaBufferGroup *mGroup;
- Vector<MediaBuffer *> mOutputBuffers;
-
-
- status_t initCheck(const sp<MetaData>& meta);
- void releaseOutputBuffers();
-
- AVCEncoder(const AVCEncoder &);
- AVCEncoder &operator=(const AVCEncoder &);
-};
-
-} // namespace android
-
-#endif // AVC_ENCODER_H_
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index a2e2e85..1409dc7 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -175,6 +175,7 @@
int32_t mDisplayWidth;
int32_t mDisplayHeight;
+ int32_t mVideoScalingMode;
uint32_t mFlags;
uint32_t mExtractorFlags;
@@ -318,6 +319,8 @@
Vector<TrackStat> mTracks;
} mStats;
+ status_t setVideoScalingMode(int32_t mode);
+ status_t setVideoScalingMode_l(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
// when select is true, the given track is selected.
diff --git a/media/libstagefright/include/M4vH263Encoder.h b/media/libstagefright/include/M4vH263Encoder.h
deleted file mode 100644
index dbe9fd0..0000000
--- a/media/libstagefright/include/M4vH263Encoder.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef M4V_H263_ENCODER_H_
-
-#define M4V_H263_ENCODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-
-struct tagvideoEncControls;
-struct tagvideoEncOptions;
-
-namespace android {
-
-struct MediaBuffer;
-struct MediaBufferGroup;
-
-struct M4vH263Encoder : public MediaSource,
- public MediaBufferObserver {
- M4vH263Encoder(const sp<MediaSource> &source,
- const sp<MetaData>& meta);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
- virtual void signalBufferReturned(MediaBuffer *buffer);
-
-protected:
- virtual ~M4vH263Encoder();
-
-private:
- sp<MediaSource> mSource;
- sp<MetaData> mFormat;
- sp<MetaData> mMeta;
-
- int32_t mVideoWidth;
- int32_t mVideoHeight;
- int32_t mVideoFrameRate;
- int32_t mVideoBitRate;
- int32_t mVideoColorFormat;
- int64_t mNumInputFrames;
- int64_t mNextModTimeUs;
- int64_t mPrevTimestampUs;
- status_t mInitCheck;
- bool mStarted;
-
- tagvideoEncControls *mHandle;
- tagvideoEncOptions *mEncParams;
- MediaBuffer *mInputBuffer;
- uint8_t *mInputFrameData;
- MediaBufferGroup *mGroup;
-
- status_t initCheck(const sp<MetaData>& meta);
- void releaseOutputBuffers();
-
- M4vH263Encoder(const M4vH263Encoder &);
- M4vH263Encoder &operator=(const M4vH263Encoder &);
-};
-
-} // namespace android
-
-#endif // M4V_H263_ENCODER_H_
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 7fd99a8..1cab077 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -305,10 +305,7 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
- Vector<size_t> ranges;
- Vector<size_t> frameOffsets;
- Vector<size_t> frameSizes;
- size_t auSize = 0;
+ int64_t timeUs;
size_t offset = 0;
while (offset + 7 <= mBuffer->size()) {
@@ -332,6 +329,8 @@
mFormat = MakeAACCodecSpecificData(
profile, sampling_freq_index, channel_configuration);
+ mFormat->setInt32(kKeyIsADTS, true);
+
int32_t sampleRate;
int32_t numChannels;
CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
@@ -367,10 +366,12 @@
size_t headerSize = protection_absent ? 7 : 9;
- ranges.push(aac_frame_length);
- frameOffsets.push(offset + headerSize);
- frameSizes.push(aac_frame_length - headerSize);
- auSize += aac_frame_length - headerSize;
+ int64_t tmpUs = fetchTimestamp(aac_frame_length);
+ CHECK_GE(tmpUs, 0ll);
+
+ if (offset == 0) {
+ timeUs = tmpUs;
+ }
offset += aac_frame_length;
}
@@ -379,37 +380,14 @@
return NULL;
}
- int64_t timeUs = -1;
-
- for (size_t i = 0; i < ranges.size(); ++i) {
- int64_t tmpUs = fetchTimestamp(ranges.itemAt(i));
-
- if (i == 0) {
- timeUs = tmpUs;
- }
- }
-
- sp<ABuffer> accessUnit = new ABuffer(auSize);
- size_t dstOffset = 0;
- for (size_t i = 0; i < frameOffsets.size(); ++i) {
- size_t frameOffset = frameOffsets.itemAt(i);
-
- memcpy(accessUnit->data() + dstOffset,
- mBuffer->data() + frameOffset,
- frameSizes.itemAt(i));
-
- dstOffset += frameSizes.itemAt(i);
- }
+ sp<ABuffer> accessUnit = new ABuffer(offset);
+ memcpy(accessUnit->data(), mBuffer->data(), offset);
memmove(mBuffer->data(), mBuffer->data() + offset,
mBuffer->size() - offset);
mBuffer->setRange(0, mBuffer->size() - offset);
- if (timeUs >= 0) {
- accessUnit->meta()->setInt64("timeUs", timeUs);
- } else {
- ALOGW("no time for AAC access unit");
- }
+ accessUnit->meta()->setInt64("timeUs", timeUs);
return accessUnit;
}
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f11fcd2..681e321 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -253,7 +253,11 @@
OMXNodeInstance *instance = findInstance(node);
ssize_t index = mLiveNodes.indexOfKey(instance->observer()->asBinder());
- CHECK(index >= 0);
+ if (index < 0) {
+ // This could conceivably happen if the observer dies at roughly the
+ // same time that a client attempts to free the node explicitly.
+ return OK;
+ }
mLiveNodes.removeItemsAt(index);
instance->observer()->asBinder()->unlinkToDeath(this);
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
index 7b1f7f6..1f5d037 100644
--- a/media/libstagefright/timedtext/TimedTextSRTSource.cpp
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
@@ -212,6 +212,9 @@
status_t TimedTextSRTSource::getText(
const MediaSource::ReadOptions *options,
AString *text, int64_t *startTimeUs, int64_t *endTimeUs) {
+ if (mTextVector.size() == 0) {
+ return ERROR_END_OF_STREAM;
+ }
text->clear();
int64_t seekTimeUs;
MediaSource::ReadOptions::SeekMode mode;
@@ -225,31 +228,38 @@
mIndex = 0;
} else {
// binary search
- ssize_t low = 0;
- ssize_t high = mTextVector.size() - 1;
- ssize_t mid = 0;
+ size_t low = 0;
+ size_t high = mTextVector.size() - 1;
+ size_t mid = 0;
int64_t currTimeUs;
while (low <= high) {
mid = low + (high - low)/2;
currTimeUs = mTextVector.keyAt(mid);
- const int diff = currTimeUs - seekTimeUs;
+ const int64_t diffTime = currTimeUs - seekTimeUs;
- if (diff == 0) {
+ if (diffTime == 0) {
break;
- } else if (diff < 0) {
+ } else if (diffTime < 0) {
low = mid + 1;
} else {
if ((high == mid + 1)
&& (seekTimeUs < mTextVector.keyAt(high))) {
break;
}
+ if (mid < 1) {
+ break;
+ }
high = mid - 1;
}
}
mIndex = mid;
}
}
+
+ if (mIndex >= mTextVector.size()) {
+ return ERROR_END_OF_STREAM;
+ }
const TextInfo &info = mTextVector.valueAt(mIndex);
*startTimeUs = mTextVector.keyAt(mIndex);
*endTimeUs = info.endTimeUs;
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.h b/media/libstagefright/timedtext/TimedTextSRTSource.h
index e1371b8..9eeab39 100644
--- a/media/libstagefright/timedtext/TimedTextSRTSource.h
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.h
@@ -56,7 +56,7 @@
int textLen;
};
- int mIndex;
+ size_t mIndex;
KeyedVector<int64_t, TextInfo> mTextVector;
void reset();
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d123cbb..7ddaa29 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1492,13 +1492,11 @@
mMasterVolume(audioFlinger->masterVolumeSW_l()),
mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
+ mMixerStatusIgnoringFastTracks(MIXER_IDLE),
standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
- mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1),
- mFastTrackNewMask(0)
+ // index 0 is reserved for normal mixer's submix
+ mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
{
-#if !LOG_NDEBUG
- memset(mFastTrackNewArray, 0, sizeof(mFastTrackNewArray));
-#endif
snprintf(mName, kNameLength, "AudioOut_%X", id);
readOutputParameters();
@@ -1550,8 +1548,7 @@
snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
result.append(buffer);
- result.append(" Name Client Type Fmt Chn mask Session Frames S M F SRate L dB R dB "
- "Server User Main buf Aux Buf\n");
+ Track::appendDumpHeader(result);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (track != 0) {
@@ -1562,8 +1559,7 @@
snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
result.append(buffer);
- result.append(" Name Client Type Fmt Chn mask Session Frames S M F SRate L dB R dB "
- "Server User Main buf Aux Buf\n");
+ Track::appendDumpHeader(result);
for (size_t i = 0; i < mActiveTracks.size(); ++i) {
sp<Track> track = mActiveTracks[i].promote();
if (track != 0) {
@@ -1676,7 +1672,7 @@
if (frameCount == 0) {
frameCount = mFrameCount * 2; // FIXME * 2 is due to SRC jitter, should be computed
}
- ALOGI("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
frameCount, mFrameCount);
} else {
ALOGW("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
@@ -1866,6 +1862,7 @@
void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
{
track->mState = TrackBase::TERMINATED;
+ // active tracks are removed by threadLoop()
if (mActiveTracks.indexOf(track) < 0) {
removeTrack_l(track);
}
@@ -1875,6 +1872,16 @@
{
mTracks.remove(track);
deleteTrackName_l(track->name());
+ // redundant as track is about to be destroyed, for dumpsys only
+ track->mName = -1;
+ if (track->isFastTrack()) {
+ int index = track->mFastIndex;
+ ALOG_ASSERT(0 < index && index < FastMixerState::kMaxFastTracks);
+ ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
+ mFastTrackAvailMask |= 1 << index;
+ // redundant as track is about to be destroyed, for dumpsys only
+ track->mFastIndex = -1;
+ }
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
chain->decTrackCnt();
@@ -2436,6 +2443,7 @@
acquireWakeLock_l();
mMixerStatus = MIXER_IDLE;
+ mMixerStatusIgnoringFastTracks = MIXER_IDLE;
checkSilentMode_l();
@@ -2449,6 +2457,7 @@
}
}
+ // mMixerStatusIgnoringFastTracks is also updated internally
mMixerStatus = prepareTracks_l(&tracksToRemove);
// prevent any changes in effect chain list and in each effect chain
@@ -2541,99 +2550,9 @@
return false;
}
-// FIXME This method needs a better name.
-// It pushes a new fast mixer state and returns (via tracksToRemove) a set of tracks to remove.
+// returns (via tracksToRemove) a set of tracks to remove.
void AudioFlinger::MixerThread::threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove)
{
- // were any of the removed tracks also fast tracks?
- unsigned removedMask = 0;
- for (size_t i = 0; i < tracksToRemove.size(); ++i) {
- if (tracksToRemove[i]->isFastTrack()) {
- int j = tracksToRemove[i]->mFastIndex;
- ALOG_ASSERT(0 < j && j < FastMixerState::kMaxFastTracks);
- removedMask |= 1 << j;
- }
- }
- Track* newArray[FastMixerState::kMaxFastTracks];
- unsigned newMask;
- {
- AutoMutex _l(mLock);
- mFastTrackAvailMask |= removedMask;
- newMask = mFastTrackNewMask;
- if (newMask) {
- mFastTrackNewMask = 0;
- memcpy(newArray, mFastTrackNewArray, sizeof(mFastTrackNewArray));
-#if !LOG_NDEBUG
- memset(mFastTrackNewArray, 0, sizeof(mFastTrackNewArray));
-#endif
- }
- }
- unsigned changedMask = newMask | removedMask;
- // are there any newly added or removed fast tracks?
- if (changedMask) {
-
- // This assert would be incorrect because it's theoretically possible (though unlikely)
- // for a track to be created and then removed within the same normal mix cycle:
- // ALOG_ASSERT(!(newMask & removedMask));
- // The converse, of removing a track and then creating a new track at the identical slot
- // within the same normal mix cycle, is impossible because the slot isn't marked available.
-
- // prepare a new state to push
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
- while (changedMask) {
- int j = __builtin_ctz(changedMask);
- ALOG_ASSERT(0 < j && j < FastMixerState::kMaxFastTracks);
- changedMask &= ~(1 << j);
- FastTrack *fastTrack = &state->mFastTracks[j];
- // must first do new tracks, then removed tracks, in case same track in both
- if (newMask & (1 << j)) {
- ALOG_ASSERT(!(state->mTrackMask & (1 << j)));
- ALOG_ASSERT(fastTrack->mBufferProvider == NULL &&
- fastTrack->mVolumeProvider == NULL);
- Track *track = newArray[j];
- AudioBufferProvider *abp = track;
- VolumeProvider *vp = track;
- fastTrack->mBufferProvider = abp;
- fastTrack->mVolumeProvider = vp;
- fastTrack->mSampleRate = track->mSampleRate;
- fastTrack->mChannelMask = track->mChannelMask;
- state->mTrackMask |= 1 << j;
- }
- if (removedMask & (1 << j)) {
- ALOG_ASSERT(state->mTrackMask & (1 << j));
- ALOG_ASSERT(fastTrack->mBufferProvider != NULL &&
- fastTrack->mVolumeProvider != NULL);
- fastTrack->mBufferProvider = NULL;
- fastTrack->mVolumeProvider = NULL;
- fastTrack->mSampleRate = mSampleRate;
- fastTrack->mChannelMask = AUDIO_CHANNEL_OUT_STEREO;
- state->mTrackMask &= ~(1 << j);
- }
- fastTrack->mGeneration++;
- }
- state->mFastTracksGen++;
- // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
- if (kUseFastMixer == FastMixer_Dynamic &&
- state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
- state->mCommand = FastMixerState::COLD_IDLE;
- state->mColdFutexAddr = &mFastMixerFutex;
- state->mColdGen++;
- mFastMixerFutex = 0;
- mNormalSink = mOutputSink;
- block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
- }
- sq->end();
- // If any fast tracks were removed, we must wait for acknowledgement
- // because we're about to decrement the last sp<> on those tracks.
- // Similarly if we put it into cold idle, need to wait for acknowledgement
- // so that it stops doing I/O.
- if (removedMask) {
- block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
- }
- sq->push(block);
- }
PlaybackThread::threadLoop_removeTracks(tracksToRemove);
}
@@ -2783,7 +2702,9 @@
size_t count = mActiveTracks.size();
size_t mixedTracks = 0;
size_t tracksWithEffect = 0;
+ // counts only _active_ fast tracks
size_t fastTracks = 0;
+ uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
float masterVolume = mMasterVolume;
bool masterMute = mMasterMute;
@@ -2800,6 +2721,16 @@
chain.clear();
}
+ // prepare a new state to push
+ FastMixerStateQueue *sq = NULL;
+ FastMixerState *state = NULL;
+ bool didModify = false;
+ FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
+ if (mFastMixer != NULL) {
+ sq = mFastMixer->sq();
+ state = sq->begin();
+ }
+
for (size_t i=0 ; i<count ; i++) {
sp<Track> t = mActiveTracks[i].promote();
if (t == 0) continue;
@@ -2807,13 +2738,98 @@
// this const just means the local variable doesn't change
Track* const track = t.get();
+ // process fast tracks
if (track->isFastTrack()) {
- // cache the combined master volume and stream type volume for fast mixer;
- // this lacks any synchronization or barrier so VolumeProvider may read a stale value
- track->mCachedVolume = masterVolume * mStreamTypes[track->streamType()].volume;
- ++fastTracks;
- if (track->isTerminated()) {
- tracksToRemove->add(track);
+
+ // It's theoretically possible (though unlikely) for a fast track to be created
+ // and then removed within the same normal mix cycle. This is not a problem, as
+ // the track never becomes active so it's fast mixer slot is never touched.
+ // The converse, of removing an (active) track and then creating a new track
+ // at the identical fast mixer slot within the same normal mix cycle,
+ // is impossible because the slot isn't marked available until the end of each cycle.
+ int j = track->mFastIndex;
+ FastTrack *fastTrack = &state->mFastTracks[j];
+
+ // Determine whether the track is currently in underrun condition,
+ // and whether it had a recent underrun.
+ uint32_t underruns = mFastMixerDumpState.mTracks[j].mUnderruns;
+ uint32_t recentUnderruns = (underruns - (track->mObservedUnderruns & ~1)) >> 1;
+ // don't count underruns that occur while stopping or pausing
+ if (!(track->isStopped() || track->isPausing())) {
+ track->mUnderrunCount += recentUnderruns;
+ }
+ track->mObservedUnderruns = underruns;
+
+ // This is similar to the formula for normal tracks,
+ // with a few modifications for fast tracks.
+ bool isActive;
+ if (track->isStopped()) {
+ // track stays active after stop() until first underrun
+ isActive = recentUnderruns == 0;
+ } else if (track->isPaused() || track->isTerminated()) {
+ isActive = false;
+ } else if (track->isPausing()) {
+ // ramp down is not yet implemented
+ isActive = true;
+ track->setPaused();
+ } else if (track->isResuming()) {
+ // ramp up is not yet implemented
+ isActive = true;
+ track->mState = TrackBase::ACTIVE;
+ } else {
+ // no minimum frame count for fast tracks; continual underrun is allowed,
+ // but later could implement automatic pause after several consecutive underruns,
+ // or auto-mute yet still consider the track active and continue to service it
+ isActive = true;
+ }
+
+ if (isActive) {
+ // was it previously inactive?
+ if (!(state->mTrackMask & (1 << j))) {
+ ExtendedAudioBufferProvider *eabp = track;
+ VolumeProvider *vp = track;
+ fastTrack->mBufferProvider = eabp;
+ fastTrack->mVolumeProvider = vp;
+ fastTrack->mSampleRate = track->mSampleRate;
+ fastTrack->mChannelMask = track->mChannelMask;
+ fastTrack->mGeneration++;
+ state->mTrackMask |= 1 << j;
+ didModify = true;
+ // no acknowledgement required for newly active tracks
+ }
+ // cache the combined master volume and stream type volume for fast mixer; this
+ // lacks any synchronization or barrier so VolumeProvider may read a stale value
+ track->mCachedVolume = track->isMuted() ?
+ 0 : masterVolume * mStreamTypes[track->streamType()].volume;
+ ++fastTracks;
+ } else {
+ // was it previously active?
+ if (state->mTrackMask & (1 << j)) {
+ fastTrack->mBufferProvider = NULL;
+ fastTrack->mGeneration++;
+ state->mTrackMask &= ~(1 << j);
+ didModify = true;
+ // If any fast tracks were removed, we must wait for acknowledgement
+ // because we're about to decrement the last sp<> on those tracks.
+ block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+ }
+ // Remainder of this block is copied from similar code for normal tracks
+ if (track->isStopped()) {
+ // Can't reset directly, as fast mixer is still polling this track
+ // track->reset();
+ // So instead mark this track as needing to be reset after push with ack
+ resetMask |= 1 << i;
+ }
+ // This would be incomplete if we auto-paused on underrun
+ size_t audioHALFrames =
+ (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ size_t framesWritten =
+ mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+ if (track->presentationComplete(framesWritten, audioHALFrames)) {
+ tracksToRemove->add(track);
+ }
+ // Avoids a misleading display in dumpsys
+ track->mObservedUnderruns &= ~1;
}
continue;
}
@@ -2832,7 +2848,7 @@
// during last round
uint32_t minFrames = 1;
if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
- (mMixerStatus == MIXER_TRACKS_READY)) {
+ (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
if (t->sampleRate() == (int)mSampleRate) {
minFrames = mNormalFrameCount;
} else {
@@ -2981,7 +2997,7 @@
// If one track is ready, set the mixer ready if:
// - the mixer was not ready during previous round OR
// - no other track is not ready
- if (mMixerStatus != MIXER_TRACKS_READY ||
+ if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
mixerStatus != MIXER_TRACKS_ENABLED) {
mixerStatus = MIXER_TRACKS_READY;
}
@@ -3009,12 +3025,13 @@
if (--(track->mRetryCount) <= 0) {
ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this);
tracksToRemove->add(track);
- // indicate to client process that the track was disabled because of underrun
+ // indicate to client process that the track was disabled because of underrun;
+ // it will then automatically call start() when data is available
android_atomic_or(CBLK_DISABLED_ON, &cblk->flags);
// If one track is not ready, mark the mixer also not ready if:
// - the mixer was ready during previous round OR
// - no other track is ready
- } else if (mMixerStatus == MIXER_TRACKS_READY ||
+ } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
mixerStatus != MIXER_TRACKS_READY) {
mixerStatus = MIXER_TRACKS_ENABLED;
}
@@ -3027,7 +3044,41 @@
}
- // FIXME Here is where we would push the new FastMixer state if necessary
+ // Push the new FastMixer state if necessary
+ if (didModify) {
+ state->mFastTracksGen++;
+ // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
+ if (kUseFastMixer == FastMixer_Dynamic &&
+ state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
+ state->mCommand = FastMixerState::COLD_IDLE;
+ state->mColdFutexAddr = &mFastMixerFutex;
+ state->mColdGen++;
+ mFastMixerFutex = 0;
+ if (kUseFastMixer == FastMixer_Dynamic) {
+ mNormalSink = mOutputSink;
+ }
+ // If we go into cold idle, need to wait for acknowledgement
+ // so that fast mixer stops doing I/O.
+ block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
+ }
+ sq->end();
+ }
+ if (sq != NULL) {
+ sq->end(didModify);
+ sq->push(block);
+ }
+
+ // Now perform the deferred reset on fast tracks that have stopped
+ while (resetMask != 0) {
+ size_t i = __builtin_ctz(resetMask);
+ ALOG_ASSERT(i < count);
+ resetMask &= ~(1 << i);
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t == 0) continue;
+ Track* track = t.get();
+ ALOG_ASSERT(track->isFastTrack() && track->isStopped());
+ track->reset();
+ }
// remove all the tracks that need to be...
count = tracksToRemove->size();
@@ -3057,6 +3108,7 @@
}
// if any fast tracks, then status is ready
+ mMixerStatusIgnoringFastTracks = mixerStatus;
if (fastTracks > 0) {
mixerStatus = MIXER_TRACKS_READY;
}
@@ -3940,6 +3992,7 @@
{
buffer->raw = NULL;
mFrameCount = buffer->frameCount;
+ // FIXME See note at getNextBuffer()
(void) step(); // ignore return value of step()
buffer->frameCount = 0;
}
@@ -4021,6 +4074,8 @@
mPresentationCompleteFrames(0),
mFlags(flags),
mFastIndex(-1),
+ mObservedUnderruns(0),
+ mUnderrunCount(0),
mCachedVolume(1.0)
{
if (mCblk != NULL) {
@@ -4032,18 +4087,22 @@
ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
int i = __builtin_ctz(thread->mFastTrackAvailMask);
ALOG_ASSERT(0 < i && i < FastMixerState::kMaxFastTracks);
+ // FIXME This is too eager. We allocate a fast track index before the
+ // fast track becomes active. Since fast tracks are a scarce resource,
+ // this means we are potentially denying other more important fast tracks from
+ // being created. It would be better to allocate the index dynamically.
mFastIndex = i;
+ // Read the initial underruns because this field is never cleared by the fast mixer
+ mObservedUnderruns = thread->getFastTrackUnderruns(i) & ~1;
thread->mFastTrackAvailMask &= ~(1 << i);
- // Although we've allocated an index, we can't mutate or push a new fast track state
- // here, because that data structure can only be changed within the normal mixer
- // threadLoop(). So instead, make a note to mutate and push later.
- thread->mFastTrackNewArray[i] = this;
- thread->mFastTrackNewMask |= 1 << i;
}
// to avoid leaking a track name, do not allocate one unless there is an mCblk
mName = thread->getTrackName_l((audio_channel_mask_t)channelMask);
if (mName < 0) {
ALOGE("no more track names available");
+ // FIXME bug - if sufficient fast track indices, but insufficient normal mixer names,
+ // then we leak a fast track index. Should swap these two sections, or better yet
+ // only allocate a normal mixer name for normal tracks.
}
}
ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
@@ -4091,22 +4150,59 @@
}
}
+/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
+{
+ result.append(" Name Client Type Fmt Chn mask Session Frames S M F SRate L dB R dB "
+ " Server User Main buf Aux Buf FastUnder\n");
+
+}
+
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
{
uint32_t vlr = mCblk->getVolumeLR();
if (isFastTrack()) {
- strcpy(buffer, " fast");
+ sprintf(buffer, " F %2d", mFastIndex);
} else {
sprintf(buffer, " %4d", mName - AudioMixer::TRACK0);
}
- snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %1d %1d %1d %5u %5.2g %5.2g 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ track_state state = mState;
+ char stateChar;
+ switch (state) {
+ case IDLE:
+ stateChar = 'I';
+ break;
+ case TERMINATED:
+ stateChar = 'T';
+ break;
+ case STOPPED:
+ stateChar = 'S';
+ break;
+ case RESUMING:
+ stateChar = 'R';
+ break;
+ case ACTIVE:
+ stateChar = 'A';
+ break;
+ case PAUSING:
+ stateChar = 'p';
+ break;
+ case PAUSED:
+ stateChar = 'P';
+ break;
+ default:
+ stateChar = '?';
+ break;
+ }
+ bool nowInUnderrun = mObservedUnderruns & 1;
+ snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %1c %1d %1d %5u %5.2g %5.2g "
+ "0x%08x 0x%08x 0x%08x 0x%08x %9u%c\n",
(mClient == 0) ? getpid_cached : mClient->pid(),
mStreamType,
mFormat,
mChannelMask,
mSessionId,
mFrameCount,
- mState,
+ stateChar,
mMute,
mFillingUpStatus,
mCblk->sampleRate,
@@ -4115,7 +4211,9 @@
mCblk->server,
mCblk->user,
(int)mMainBuffer,
- (int)mAuxBuffer);
+ (int)mAuxBuffer,
+ mUnderrunCount,
+ nowInUnderrun ? '*' : ' ');
}
// AudioBufferProvider interface
@@ -4128,11 +4226,19 @@
// Check if last stepServer failed, try to step now
if (mStepServerFailed) {
+ // FIXME When called by fast mixer, this takes a mutex with tryLock().
+ // Since the fast mixer is higher priority than client callback thread,
+ // it does not result in priority inversion for client.
+ // But a non-blocking solution would be preferable to avoid
+ // fast mixer being unable to tryLock(), and
+ // to avoid the extra context switches if the client wakes up,
+ // discovers the mutex is locked, then has to wait for fast mixer to unlock.
if (!step()) goto getNextBuffer_exit;
ALOGV("stepServer recovered");
mStepServerFailed = false;
}
+ // FIXME Same as above
framesReady = cblk->framesReady();
if (CC_LIKELY(framesReady)) {
@@ -4161,10 +4267,19 @@
return NOT_ENOUGH_DATA;
}
-uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const {
+// Note that framesReady() takes a mutex on the control block using tryLock().
+// This could result in priority inversion if framesReady() is called by the normal mixer,
+// as the normal mixer thread runs at lower
+// priority than the client's callback thread: there is a short window within framesReady()
+// during which the normal mixer could be preempted, and the client callback would block.
+// Another problem can occur if framesReady() is called by the fast mixer:
+// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
+// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
+size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
return mCblk->framesReady();
}
+// Don't call for fast tracks; the framesReady() could result in priority inversion
bool AudioFlinger::PlaybackThread::Track::isReady() const {
if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
@@ -4895,7 +5010,7 @@
buffer->frameCount = 0;
}
-uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
Mutex::Autolock _l(mTimedBufferQueueLock);
return mFramesPendingInQueue;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 9a0bbcd..f10295f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -46,6 +46,7 @@
#include <hardware/audio_policy.h>
#include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
#include "FastMixer.h"
#include "NBAIO.h"
@@ -355,7 +356,7 @@
void clearPowerManager();
// base for record and playback
- class TrackBase : public AudioBufferProvider, public RefBase {
+ class TrackBase : public ExtendedAudioBufferProvider, public RefBase {
public:
enum track_state {
@@ -396,6 +397,10 @@
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+ // ExtendedAudioBufferProvider interface is only needed for Track,
+ // but putting it in TrackBase avoids the complexity of virtual inheritance
+ virtual size_t framesReady() const { return SIZE_MAX; }
+
audio_format_t format() const {
return mFormat;
}
@@ -676,6 +681,7 @@
IAudioFlinger::track_flags_t flags);
virtual ~Track();
+ static void appendDumpHeader(String8& result);
void dump(char* buffer, size_t size);
virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
int triggerSession = 0);
@@ -699,11 +705,6 @@
int16_t *mainBuffer() const { return mMainBuffer; }
int auxEffectId() const { return mAuxEffectId; }
-#if 0
- bool isFastTrack() const
- { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
-#endif
-
// implement FastMixerState::VolumeProvider interface
virtual uint32_t getVolumeLR();
@@ -720,7 +721,7 @@
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
// releaseBuffer() not overridden
- virtual uint32_t framesReady() const;
+ virtual size_t framesReady() const;
bool isMuted() const { return mMute; }
bool isPausing() const {
@@ -729,6 +730,9 @@
bool isPaused() const {
return mState == PAUSED;
}
+ bool isResuming() const {
+ return mState == RESUMING;
+ }
bool isReady() const;
void setPaused() { mState = PAUSED; }
void reset();
@@ -756,7 +760,9 @@
const sp<IMemory> mSharedBuffer;
bool mResetDone;
const audio_stream_type_t mStreamType;
- int mName; // track name on the normal mixer
+ int mName; // track name on the normal mixer,
+ // allocated statically at track creation time,
+ // and is even allocated (though unused) for fast tracks
int16_t *mMainBuffer;
int32_t *mAuxBuffer;
int mAuxEffectId;
@@ -765,7 +771,17 @@
// when this track will be fully rendered
private:
IAudioFlinger::track_flags_t mFlags;
- int mFastIndex; // index within FastMixerState::mFastTracks[] or -1
+
+ // The following fields are only for fast tracks, and should be in a subclass
+ int mFastIndex; // index within FastMixerState::mFastTracks[];
+ // either mFastIndex == -1
+ // or 0 < mFastIndex < FastMixerState::kMaxFast because
+ // index 0 is reserved for normal mixer's submix;
+ // index is allocated statically at track creation time
+ // but the slot is only used if track is active
+ uint32_t mObservedUnderruns; // Most recently observed value of
+ // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
+ uint32_t mUnderrunCount; // Counter of total number of underruns, never reset
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
@@ -800,7 +816,7 @@
// Mixer facing methods.
virtual bool isTimedTrack() const { return true; }
- virtual uint32_t framesReady() const;
+ virtual size_t framesReady() const;
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
@@ -921,9 +937,9 @@
virtual void threadLoop_standby();
virtual void threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove) { }
- // prepareTracks_l reads and writes mActiveTracks, and also returns the
- // pending set of tracks to remove via Vector 'tracksToRemove'. The caller is
- // responsible for clearing or destroying this Vector later on, when it
+ // prepareTracks_l reads and writes mActiveTracks, and returns
+ // the pending set of tracks to remove via Vector 'tracksToRemove'. The caller
+ // is responsible for clearing or destroying this Vector later on, when it
// is safe to do so. That will drop the final ref count and destroy the tracks.
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
@@ -993,7 +1009,7 @@
bool mMasterMute;
void setMasterMute_l(bool muted) { mMasterMute = muted; }
protected:
- SortedVector< wp<Track> > mActiveTracks;
+ SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<>
// Allocate a track name for a given channel mask.
// Returns name >= 0 if successful, -1 on failure.
@@ -1056,6 +1072,8 @@
// mixer status returned by prepareTracks_l()
mixer_state mMixerStatus; // current cycle
// previous cycle when in prepareTracks_l()
+ mixer_state mMixerStatusIgnoringFastTracks;
+ // FIXME or a separate ready state per track
// FIXME move these declarations into the specific sub-class that needs them
// MIXER only
@@ -1080,12 +1098,11 @@
sp<NBAIO_Sink> mNormalSink;
public:
virtual bool hasFastMixer() const = 0;
+ virtual uint32_t getFastTrackUnderruns(size_t fastIndex) const { return 0; }
protected:
// accessed by both binder threads and within threadLoop(), lock on mutex needed
unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available
- unsigned mFastTrackNewMask; // bit i set if fast track [i] just created
- Track* mFastTrackNewArray[FastMixerState::kMaxFastTracks];
};
@@ -1136,6 +1153,11 @@
public:
virtual bool hasFastMixer() const { return mFastMixer != NULL; }
+ virtual uint32_t getFastTrackUnderruns(size_t fastIndex) const {
+ ALOG_ASSERT(0 < fastIndex &&
+ fastIndex < FastMixerState::kMaxFastTracks);
+ return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
+ }
};
class DirectOutputThread : public PlaybackThread {
diff --git a/services/audioflinger/ExtendedAudioBufferProvider.h b/services/audioflinger/ExtendedAudioBufferProvider.h
new file mode 100644
index 0000000..88279b4
--- /dev/null
+++ b/services/audioflinger/ExtendedAudioBufferProvider.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
+#define ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
+
+#include "AudioBufferProvider.h"
+
+namespace android {
+
+class ExtendedAudioBufferProvider : public AudioBufferProvider {
+public:
+ virtual size_t framesReady() const = 0; // see description at AudioFlinger.h
+};
+
+} // namespace android
+
+#endif // ANDROID_EXTENDED_AUDIO_BUFFER_PROVIDER_H
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 841b06a..bf264be 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -29,6 +29,7 @@
#define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling
#define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep
+#define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup
namespace android {
@@ -58,8 +59,9 @@
unsigned sampleRate = 0;
int fastTracksGen = 0;
long periodNs = 0; // expected period; the time required to render one mix buffer
- long underrunNs = 0; // an underrun is likely if an actual cycle is greater than this value
- long overrunNs = 0; // an overrun is likely if an actual cycle if less than this value
+ long underrunNs = 0; // underrun likely when write cycle is greater than this value
+ long overrunNs = 0; // overrun likely when write cycle is less than this value
+ long warmupNs = 0; // warmup complete when write cycle is greater than to this value
FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState;
bool ignoreNextOverrun = true; // used to ignore initial overrun and first after an underrun
#ifdef FAST_MIXER_STATISTICS
@@ -67,6 +69,9 @@
static const unsigned kMaxSamples = 1000;
#endif
unsigned coldGen = 0; // last observed mColdGen
+ bool isWarm = false; // true means ready to mix, false means wait for warmup before mixing
+ struct timespec measuredWarmupTs = {0, 0}; // how long did it take for warmup to complete
+ uint32_t warmupCycles = 0; // counter of number of loop cycles required to warmup
for (;;) {
@@ -138,6 +143,12 @@
if (old <= 0) {
__futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
}
+ // This may be overly conservative; there could be times that the normal mixer
+ // requests such a brief cold idle that it doesn't require resetting this flag.
+ isWarm = false;
+ measuredWarmupTs.tv_sec = 0;
+ measuredWarmupTs.tv_nsec = 0;
+ warmupCycles = 0;
sleepNs = -1;
coldGen = current->mColdGen;
} else {
@@ -195,6 +206,7 @@
periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00
underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75
overrunNs = (frameCount * 250000000LL) / sampleRate; // 0.25
+ warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50
} else {
periodNs = 0;
underrunNs = 0;
@@ -226,6 +238,7 @@
i = __builtin_ctz(removedTracks);
removedTracks &= ~(1 << i);
const FastTrack* fastTrack = ¤t->mFastTracks[i];
+ ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
if (mixer != NULL) {
name = fastTrackNames[i];
ALOG_ASSERT(name >= 0);
@@ -234,6 +247,7 @@
#if !LOG_NDEBUG
fastTrackNames[i] = -1;
#endif
+ // don't reset track dump state, since other side is ignoring it
generations[i] = fastTrack->mGeneration;
}
@@ -313,13 +327,13 @@
}
// do work using current state here
- if ((command & FastMixerState::MIX) && (mixer != NULL)) {
+ if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) {
ALOG_ASSERT(mixBuffer != NULL);
- // update volumes
- unsigned volumeTracks = current->mTrackMask;
- while (volumeTracks != 0) {
- i = __builtin_ctz(volumeTracks);
- volumeTracks &= ~(1 << i);
+ // for each track, update volume and check for underrun
+ unsigned currentTrackMask = current->mTrackMask;
+ while (currentTrackMask != 0) {
+ i = __builtin_ctz(currentTrackMask);
+ currentTrackMask &= ~(1 << i);
const FastTrack* fastTrack = ¤t->mFastTracks[i];
int name = fastTrackNames[i];
ALOG_ASSERT(name >= 0);
@@ -330,6 +344,25 @@
mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1,
(void *)(vlr >> 16));
}
+ // FIXME The current implementation of framesReady() for fast tracks
+ // takes a tryLock, which can block
+ // up to 1 ms. If enough active tracks all blocked in sequence, this would result
+ // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO.
+ size_t framesReady = fastTrack->mBufferProvider->framesReady();
+ FastTrackDump *ftDump = &dumpState->mTracks[i];
+ uint32_t underruns = ftDump->mUnderruns;
+ if (framesReady < frameCount) {
+ ftDump->mUnderruns = (underruns + 2) | 1;
+ if (framesReady == 0) {
+ mixer->disable(name);
+ } else {
+ // allow mixing partial buffer
+ mixer->enable(name);
+ }
+ } else if (underruns & 1) {
+ ftDump->mUnderruns = underruns & ~1;
+ mixer->enable(name);
+ }
}
// process() is CPU-bound
mixer->process(AudioBufferProvider::kInvalidPTS);
@@ -337,6 +370,8 @@
} else if (mixBufferState == MIXED) {
mixBufferState = UNDEFINED;
}
+ bool attemptedWrite = false;
+ //bool didFullWrite = false; // dumpsys could display a count of partial writes
if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
if (mixBufferState == UNDEFINED) {
memset(mixBuffer, 0, frameCount * 2 * sizeof(short));
@@ -348,10 +383,15 @@
ssize_t framesWritten = outputSink->write(mixBuffer, frameCount);
dumpState->mWriteSequence++;
if (framesWritten >= 0) {
+ ALOG_ASSERT(framesWritten <= frameCount);
dumpState->mFramesWritten += framesWritten;
+ //if ((size_t) framesWritten == frameCount) {
+ // didFullWrite = true;
+ //}
} else {
dumpState->mWriteErrors++;
}
+ attemptedWrite = true;
// FIXME count # of writes blocked excessively, CPU usage, etc. for dump
}
@@ -368,6 +408,27 @@
--sec;
nsec += 1000000000;
}
+ // To avoid an initial underrun on fast tracks after exiting standby,
+ // do not start pulling data from tracks and mixing until warmup is complete.
+ // Warmup is considered complete after the earlier of:
+ // first successful single write() that blocks for more than warmupNs
+ // MAX_WARMUP_CYCLES write() attempts.
+ // This is overly conservative, but to get better accuracy requires a new HAL API.
+ if (!isWarm && attemptedWrite) {
+ measuredWarmupTs.tv_sec += sec;
+ measuredWarmupTs.tv_nsec += nsec;
+ if (measuredWarmupTs.tv_nsec >= 1000000000) {
+ measuredWarmupTs.tv_sec++;
+ measuredWarmupTs.tv_nsec -= 1000000000;
+ }
+ ++warmupCycles;
+ if ((attemptedWrite && nsec > warmupNs) ||
+ (warmupCycles >= MAX_WARMUP_CYCLES)) {
+ isWarm = true;
+ dumpState->mMeasuredWarmupTs = measuredWarmupTs;
+ dumpState->mWarmupCycles = warmupCycles;
+ }
+ }
if (sec > 0 || nsec > underrunNs) {
// FIXME only log occasionally
ALOGV("underrun: time since last cycle %d.%03ld sec",
@@ -421,11 +482,13 @@
FastMixerDumpState::FastMixerDumpState() :
mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0),
mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0),
- mSampleRate(0), mFrameCount(0)
+ mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0)
#ifdef FAST_MIXER_STATISTICS
, mMean(0.0), mMinimum(0.0), mMaximum(0.0), mStddev(0.0)
#endif
{
+ mMeasuredWarmupTs.tv_sec = 0;
+ mMeasuredWarmupTs.tv_nsec = 0;
}
FastMixerDumpState::~FastMixerDumpState()
@@ -462,12 +525,14 @@
snprintf(string, COMMAND_MAX, "%d", mCommand);
break;
}
+ double mMeasuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
+ (mMeasuredWarmupTs.tv_nsec / 1000000.0);
fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n"
" numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
- " sampleRate=%u frameCount=%u\n",
+ " sampleRate=%u frameCount=%u measuredWarmup=%.3g ms, warmupCycles=%u\n",
string, mWriteSequence, mFramesWritten,
mNumTracks, mWriteErrors, mUnderruns, mOverruns,
- mSampleRate, mFrameCount);
+ mSampleRate, mFrameCount, mMeasuredWarmupMs, mWarmupCycles);
#ifdef FAST_MIXER_STATISTICS
fdprintf(fd, " cycle time in ms: mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
mMean*1e3, mMinimum*1e3, mMaximum*1e3, mStddev*1e3);
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 8a8fcb8..a6dd310 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -42,8 +42,23 @@
}; // class FastMixer
+// Represents the dump state of a fast track
+struct FastTrackDump {
+ FastTrackDump() : mUnderruns(0) { }
+ /*virtual*/ ~FastTrackDump() { }
+ uint32_t mUnderruns; // Underrun status, represented as follows:
+ // bit 0 == 0 means not currently in underrun
+ // bit 0 == 1 means currently in underrun
+ // bits 1 to 31 == total number of underruns
+ // Not reset to zero for new tracks or if track generation changes.
+ // This representation is used to keep the information atomic.
+};
+
// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys.
-// Since used non-atomically, only POD types are permitted, and the contents can't be trusted.
+// Each individual native word-sized field is accessed atomically. But the
+// overall structure is non-atomic, that is there may be an inconsistency between fields.
+// No barriers or locks are used for either writing or reading.
+// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
struct FastMixerDumpState {
FastMixerDumpState();
@@ -60,6 +75,9 @@
uint32_t mOverruns; // total number of overruns
uint32_t mSampleRate;
size_t mFrameCount;
+ struct timespec mMeasuredWarmupTs; // measured warmup time
+ uint32_t mWarmupCycles; // number of loop cycles required to warmup
+ FastTrackDump mTracks[FastMixerState::kMaxFastTracks];
#ifdef FAST_MIXER_STATISTICS
// cycle times in seconds
float mMean;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 83094c8..ce0cdb5 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -18,7 +18,7 @@
#define ANDROID_AUDIO_FAST_MIXER_STATE_H
#include <system/audio.h>
-#include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
#include "NBAIO.h"
namespace android {
@@ -40,7 +40,7 @@
FastTrack();
/*virtual*/ ~FastTrack();
- AudioBufferProvider* mBufferProvider; // must not be NULL
+ ExtendedAudioBufferProvider* mBufferProvider; // must be NULL if inactive, or non-NULL if active
VolumeProvider* mVolumeProvider; // optional; if NULL then full-scale
unsigned mSampleRate; // optional; if zero then use mixer sample rate
audio_channel_mask_t mChannelMask; // AUDIO_CHANNEL_OUT_MONO or AUDIO_CHANNEL_OUT_STEREO
@@ -57,7 +57,7 @@
// all pointer fields use raw pointers; objects are owned and ref-counted by the normal mixer
FastTrack mFastTracks[kMaxFastTracks];
int mFastTracksGen; // increment when any mFastTracks[i].mGeneration is incremented
- unsigned mTrackMask; // bit i is set if and only if mFastTracks[i] != NULL
+ unsigned mTrackMask; // bit i is set if and only if mFastTracks[i] is active
NBAIO_Sink* mOutputSink; // HAL output device, must already be negotiated
int mOutputSinkGen; // increment when mOutputSink is assigned
size_t mFrameCount; // number of frames per fast mix buffer
diff --git a/services/audioflinger/SourceAudioBufferProvider.cpp b/services/audioflinger/SourceAudioBufferProvider.cpp
index e9e8c16..e9d6d2c 100644
--- a/services/audioflinger/SourceAudioBufferProvider.cpp
+++ b/services/audioflinger/SourceAudioBufferProvider.cpp
@@ -95,4 +95,10 @@
mGetCount = 0;
}
+size_t SourceAudioBufferProvider::framesReady() const
+{
+ ssize_t avail = mSource->availableToRead();
+ return avail < 0 ? 0 : (size_t) avail;
+}
+
} // namespace android
diff --git a/services/audioflinger/SourceAudioBufferProvider.h b/services/audioflinger/SourceAudioBufferProvider.h
index 3219d78..85ccbb2 100644
--- a/services/audioflinger/SourceAudioBufferProvider.h
+++ b/services/audioflinger/SourceAudioBufferProvider.h
@@ -20,11 +20,11 @@
#define ANDROID_SOURCE_AUDIO_BUFFER_PROVIDER_H
#include "NBAIO.h"
-#include "AudioBufferProvider.h"
+#include "ExtendedAudioBufferProvider.h"
namespace android {
-class SourceAudioBufferProvider : public AudioBufferProvider {
+class SourceAudioBufferProvider : public ExtendedAudioBufferProvider {
public:
SourceAudioBufferProvider(const sp<NBAIO_Source>& source);
@@ -34,6 +34,9 @@
virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
virtual void releaseBuffer(Buffer *buffer);
+ // ExtendedAudioBufferProvider interface
+ virtual size_t framesReady() const;
+
private:
const sp<NBAIO_Source> mSource; // the wrapped source
/*const*/ size_t mFrameBitShift; // log2(frame size in bytes)