Merge "SurfaceMediaSource: use the vid enc usage bit" into ics-mr1
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
index cc63356..19646b0 100644
--- a/include/media/IStreamSource.h
+++ b/include/media/IStreamSource.h
@@ -52,15 +52,20 @@
static const char *const kKeyResumeAtPTS;
// When signalling a discontinuity you can optionally
- // signal that this is a "hard" discontinuity, i.e. the format
- // or configuration of subsequent stream data differs from that
- // currently active. To do so, include a non-zero int32_t value
- // under the key "kKeyFormatChange" when issuing the DISCONTINUITY
+ // specify the type(s) of discontinuity, i.e. if the
+ // audio format has changed, the video format has changed,
+ // time has jumped or any combination thereof.
+ // To do so, include a non-zero int32_t value
+ // under the key "kKeyDiscontinuityMask" when issuing the DISCONTINUITY
// command.
- // The new logical stream must start with proper codec initialization
+ // If there is a change in audio/video format, The new logical stream
+ // must start with proper codec initialization
// information for playback to continue, i.e. SPS and PPS in the case
// of AVC video etc.
- static const char *const kKeyFormatChange;
+ // If this key is not present, only a time discontinuity is assumed.
+ // The value should be a bitmask of values from
+ // ATSParser::DiscontinuityType.
+ static const char *const kKeyDiscontinuityMask;
virtual void issueCommand(
Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 5822877..3963d9c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -166,6 +166,8 @@
bool allYourBuffersAreBelongToUs();
+ size_t countBuffersOwnedByComponent(OMX_U32 portIndex) const;
+
void deferMessage(const sp<AMessage> &msg);
void processDeferredMessages();
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 8c1c593..446720b 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -153,6 +153,9 @@
bool mStarted;
int32_t mNumFramesEncoded;
+ // Time between capture of two frames.
+ int64_t mTimeBetweenFrameCaptureUs;
+
CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
Size videoSize, int32_t frameRate,
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 0e264c7..b060691 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -57,10 +57,6 @@
int32_t mVideoWidth;
int32_t mVideoHeight;
- // Time between capture of two frames during time lapse recording
- // Negative value indicates that timelapse is disabled.
- int64_t mTimeBetweenTimeLapseFrameCaptureUs;
-
// Time between two frames in final video (1/frameRate)
int64_t mTimeBetweenTimeLapseVideoFramesUs;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index c21d19d..84f8282 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -336,6 +336,10 @@
int64_t retrieveDecodingTimeUs(bool isCodecSpecific);
+ status_t parseAVCCodecSpecificData(
+ const void *data, size_t size,
+ unsigned *profile, unsigned *level);
+
OMXCodec(const OMXCodec &);
OMXCodec &operator=(const OMXCodec &);
};
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index b311f35..078be94 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -30,7 +30,7 @@
const char *const IStreamListener::kKeyResumeAtPTS = "resume-at-PTS";
// static
-const char *const IStreamListener::kKeyFormatChange = "format-change";
+const char *const IStreamListener::kKeyDiscontinuityMask = "discontinuity-mask";
enum {
// IStreamSource
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 2a5c0a6..93ab704 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -54,6 +54,7 @@
mVideoEOS(false),
mScanSourcesPending(false),
mScanSourcesGeneration(0),
+ mTimeDiscontinuityPending(false),
mFlushingAudio(NONE),
mFlushingVideo(NONE),
mResetInProgress(false),
@@ -462,11 +463,24 @@
{
LOGV("kWhatReset");
+ if (mRenderer != NULL) {
+ // There's an edge case where the renderer owns all output
+ // buffers and is paused, therefore the decoder will not read
+ // more input data and will never encounter the matching
+ // discontinuity. To avoid this, we resume the renderer.
+
+ if (mFlushingAudio == AWAITING_DISCONTINUITY
+ || mFlushingVideo == AWAITING_DISCONTINUITY) {
+ mRenderer->resume();
+ }
+ }
+
if (mFlushingAudio != NONE || mFlushingVideo != NONE) {
// We're currently flushing, postpone the reset until that's
// completed.
- LOGV("postponing reset");
+ LOGV("postponing reset mFlushingAudio=%d, mFlushingVideo=%d",
+ mFlushingAudio, mFlushingVideo);
mResetPostponed = true;
break;
@@ -477,6 +491,8 @@
break;
}
+ mTimeDiscontinuityPending = true;
+
if (mAudioDecoder != NULL) {
flushDecoder(true /* audio */, true /* needShutdown */);
}
@@ -540,7 +556,10 @@
LOGV("both audio and video are flushed now.");
- mRenderer->signalTimeDiscontinuity();
+ if (mTimeDiscontinuityPending) {
+ mRenderer->signalTimeDiscontinuity();
+ mTimeDiscontinuityPending = false;
+ }
if (mAudioDecoder != NULL) {
mAudioDecoder->signalResume();
@@ -663,10 +682,15 @@
CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
bool formatChange =
- type == ATSParser::DISCONTINUITY_FORMATCHANGE;
+ (audio &&
+ (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT))
+ || (!audio &&
+ (type & ATSParser::DISCONTINUITY_VIDEO_FORMAT));
- LOGV("%s discontinuity (formatChange=%d)",
- audio ? "audio" : "video", formatChange);
+ bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0;
+
+ LOGI("%s discontinuity (formatChange=%d, time=%d)",
+ audio ? "audio" : "video", formatChange, timeChange);
if (audio) {
mSkipRenderingAudioUntilMediaTimeUs = -1;
@@ -674,26 +698,45 @@
mSkipRenderingVideoUntilMediaTimeUs = -1;
}
- sp<AMessage> extra;
- if (accessUnit->meta()->findMessage("extra", &extra)
- && extra != NULL) {
- int64_t resumeAtMediaTimeUs;
- if (extra->findInt64(
- "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
- LOGI("suppressing rendering of %s until %lld us",
- audio ? "audio" : "video", resumeAtMediaTimeUs);
+ if (timeChange) {
+ sp<AMessage> extra;
+ if (accessUnit->meta()->findMessage("extra", &extra)
+ && extra != NULL) {
+ int64_t resumeAtMediaTimeUs;
+ if (extra->findInt64(
+ "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
+ LOGI("suppressing rendering of %s until %lld us",
+ audio ? "audio" : "video", resumeAtMediaTimeUs);
- if (audio) {
- mSkipRenderingAudioUntilMediaTimeUs =
- resumeAtMediaTimeUs;
- } else {
- mSkipRenderingVideoUntilMediaTimeUs =
- resumeAtMediaTimeUs;
+ if (audio) {
+ mSkipRenderingAudioUntilMediaTimeUs =
+ resumeAtMediaTimeUs;
+ } else {
+ mSkipRenderingVideoUntilMediaTimeUs =
+ resumeAtMediaTimeUs;
+ }
}
}
}
- flushDecoder(audio, formatChange);
+ mTimeDiscontinuityPending =
+ mTimeDiscontinuityPending || timeChange;
+
+ if (formatChange || timeChange) {
+ flushDecoder(audio, formatChange);
+ } else {
+ // This stream is unaffected by the discontinuity
+
+ if (audio) {
+ mFlushingAudio = FLUSHED;
+ } else {
+ mFlushingVideo = FLUSHED;
+ }
+
+ finishFlushIfPossible();
+
+ return -EWOULDBLOCK;
+ }
}
reply->setInt32("err", err);
@@ -794,6 +837,11 @@
}
void NuPlayer::flushDecoder(bool audio, bool needShutdown) {
+ if ((audio && mAudioDecoder == NULL) || (!audio && mVideoDecoder == NULL)) {
+ LOGI("flushDecoder %s without decoder present",
+ audio ? "audio" : "video");
+ }
+
// Make sure we don't continue to scan sources until we finish flushing.
++mScanSourcesGeneration;
mScanSourcesPending = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index f23deea..ffc710e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -112,6 +112,10 @@
SHUT_DOWN,
};
+ // Once the current flush is complete this indicates whether the
+ // notion of time has changed.
+ bool mTimeDiscontinuityPending;
+
FlushStatus mFlushingAudio;
FlushStatus mFlushingVideo;
bool mResetInProgress;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 640e9fa..0cb7f45 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -628,11 +628,16 @@
mAudioSink->pause();
}
+ LOGV("now paused audio queue has %d entries, video has %d entries",
+ mAudioQueue.size(), mVideoQueue.size());
+
mPaused = true;
}
void NuPlayer::Renderer::onResume() {
- CHECK(mPaused);
+ if (!mPaused) {
+ return;
+ }
if (mHasAudio) {
mAudioSink->start();
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index f795654..2e63b3b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -63,17 +63,22 @@
mFinalResult = ERROR_END_OF_STREAM;
break;
} else if (n == INFO_DISCONTINUITY) {
- ATSParser::DiscontinuityType type = ATSParser::DISCONTINUITY_SEEK;
+ int32_t type = ATSParser::DISCONTINUITY_SEEK;
- int32_t formatChange;
+ int32_t mask;
if (extra != NULL
&& extra->findInt32(
- IStreamListener::kKeyFormatChange, &formatChange)
- && formatChange != 0) {
- type = ATSParser::DISCONTINUITY_FORMATCHANGE;
+ IStreamListener::kKeyDiscontinuityMask, &mask)) {
+ if (mask == 0) {
+ LOGE("Client specified an illegal discontinuity type.");
+ return ERROR_UNSUPPORTED;
+ }
+
+ type = mask;
}
- mTSParser->signalDiscontinuity(type, extra);
+ mTSParser->signalDiscontinuity(
+ (ATSParser::DiscontinuityType)type, extra);
} else if (n < 0) {
CHECK_EQ(n, -EWOULDBLOCK);
break;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d947760..dbc9b7e 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -342,6 +342,7 @@
}
void ACodec::signalFlush() {
+ LOGV("[%s] signalFlush", mComponentName.c_str());
(new AMessage(kWhatFlush, id()))->post();
}
@@ -1092,6 +1093,20 @@
return OK;
}
+size_t ACodec::countBuffersOwnedByComponent(OMX_U32 portIndex) const {
+ size_t n = 0;
+
+ for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+ const BufferInfo &info = mBuffers[portIndex].itemAt(i);
+
+ if (info.mStatus == BufferInfo::OWNED_BY_COMPONENT) {
+ ++n;
+ }
+ }
+
+ return n;
+}
+
bool ACodec::allYourBuffersAreBelongToUs(
OMX_U32 portIndex) {
for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
@@ -2041,6 +2056,14 @@
case kWhatFlush:
{
+ LOGV("[%s] ExecutingState flushing now "
+ "(codec owns %d/%d input, %d/%d output).",
+ mCodec->mComponentName.c_str(),
+ mCodec->countBuffersOwnedByComponent(kPortIndexInput),
+ mCodec->mBuffers[kPortIndexInput].size(),
+ mCodec->countBuffersOwnedByComponent(kPortIndexOutput),
+ mCodec->mBuffers[kPortIndexOutput].size());
+
mActive = false;
CHECK_EQ(mCodec->mOMX->sendCommand(
@@ -2180,6 +2203,12 @@
err);
mCodec->signalError();
+
+ // This is technically not correct, since we were unable
+ // to allocate output buffers and therefore the output port
+ // remains disabled. It is necessary however to allow us
+ // to shutdown the codec properly.
+ mCodec->changeState(mCodec->mExecutingState);
}
return true;
@@ -2408,6 +2437,9 @@
bool ACodec::FlushingState::onOMXEvent(
OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
+ LOGV("[%s] FlushingState onOMXEvent(%d,%ld)",
+ mCodec->mComponentName.c_str(), event, data1);
+
switch (event) {
case OMX_EventCmdComplete:
{
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 256f3ba..57989c5 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -33,6 +33,8 @@
namespace android {
+static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
+
struct CameraSourceListener : public CameraListener {
CameraSourceListener(const sp<CameraSource> &source);
@@ -156,6 +158,7 @@
mLastFrameTimestampUs(0),
mStarted(false),
mNumFramesEncoded(0),
+ mTimeBetweenFrameCaptureUs(0),
mFirstFrameTimeUs(0),
mNumFramesDropped(0),
mNumGlitches(0),
@@ -644,7 +647,8 @@
releaseQueuedFrames();
while (!mFramesBeingEncoded.empty()) {
if (NO_ERROR !=
- mFrameCompleteCondition.waitRelative(mLock, 3000000000LL)) {
+ mFrameCompleteCondition.waitRelative(mLock,
+ mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
LOGW("Timed out waiting for outstanding frames being encoded: %d",
mFramesBeingEncoded.size());
}
@@ -736,7 +740,8 @@
Mutex::Autolock autoLock(mLock);
while (mStarted && mFramesReceived.empty()) {
if (NO_ERROR !=
- mFrameAvailableCondition.waitRelative(mLock, 1000000000LL)) {
+ mFrameAvailableCondition.waitRelative(mLock,
+ mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
if (mCameraRecordingProxy != 0 &&
!mCameraRecordingProxy->asBinder()->isBinderAlive()) {
LOGW("camera recording proxy is gone");
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index e4de20a..eb456f4 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -39,12 +39,12 @@
Size videoSize,
int32_t videoFrameRate,
const sp<Surface>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs) {
+ int64_t timeBetweenFrameCaptureUs) {
CameraSourceTimeLapse *source = new
CameraSourceTimeLapse(camera, proxy, cameraId,
videoSize, videoFrameRate, surface,
- timeBetweenTimeLapseFrameCaptureUs);
+ timeBetweenFrameCaptureUs);
if (source != NULL) {
if (source->initCheck() != OK) {
@@ -62,15 +62,15 @@
Size videoSize,
int32_t videoFrameRate,
const sp<Surface>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs)
+ int64_t timeBetweenFrameCaptureUs)
: CameraSource(camera, proxy, cameraId, videoSize, videoFrameRate, surface, true),
- mTimeBetweenTimeLapseFrameCaptureUs(timeBetweenTimeLapseFrameCaptureUs),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
mSkipCurrentFrame(false) {
+ mTimeBetweenFrameCaptureUs = timeBetweenFrameCaptureUs;
LOGD("starting time lapse mode: %lld us",
- mTimeBetweenTimeLapseFrameCaptureUs);
+ mTimeBetweenFrameCaptureUs);
mVideoWidth = videoSize.width;
mVideoHeight = videoSize.height;
@@ -271,14 +271,14 @@
// The first 2 output frames from the encoder are: decoder specific info and
// the compressed video frame data for the first input video frame.
if (mNumFramesEncoded >= 1 && *timestampUs <
- (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenTimeLapseFrameCaptureUs)) {
+ (mLastTimeLapseFrameRealTimestampUs + mTimeBetweenFrameCaptureUs)) {
// Skip all frames from last encoded frame until
- // sufficient time (mTimeBetweenTimeLapseFrameCaptureUs) has passed.
+ // sufficient time (mTimeBetweenFrameCaptureUs) has passed.
// Tell the camera to release its recording frame and return.
LOGV("dataCallbackTimestamp timelapse: skipping intermediate frame");
return true;
} else {
- // Desired frame has arrived after mTimeBetweenTimeLapseFrameCaptureUs time:
+ // Desired frame has arrived after mTimeBetweenFrameCaptureUs time:
// - Reset mLastTimeLapseFrameRealTimestampUs to current time.
// - Artificially modify timestampUs to be one frame time (1/framerate) ahead
// of the last encoded frame's time stamp.
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index dfd3f4a..86b3fe4 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -520,6 +520,85 @@
return NULL;
}
+status_t OMXCodec::parseAVCCodecSpecificData(
+ const void *data, size_t size,
+ unsigned *profile, unsigned *level) {
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ // verify minimum size and configurationVersion == 1.
+ if (size < 7 || ptr[0] != 1) {
+ return ERROR_MALFORMED;
+ }
+
+ *profile = ptr[1];
+ *level = ptr[3];
+
+ // There is decodable content out there that fails the following
+ // assertion, let's be lenient for now...
+ // CHECK((ptr[4] >> 2) == 0x3f); // reserved
+
+ size_t lengthSize = 1 + (ptr[4] & 3);
+
+ // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
+ // violates it...
+ // CHECK((ptr[5] >> 5) == 7); // reserved
+
+ size_t numSeqParameterSets = ptr[5] & 31;
+
+ ptr += 6;
+ size -= 6;
+
+ for (size_t i = 0; i < numSeqParameterSets; ++i) {
+ if (size < 2) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t length = U16_AT(ptr);
+
+ ptr += 2;
+ size -= 2;
+
+ if (size < length) {
+ return ERROR_MALFORMED;
+ }
+
+ addCodecSpecificData(ptr, length);
+
+ ptr += length;
+ size -= length;
+ }
+
+ if (size < 1) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t numPictureParameterSets = *ptr;
+ ++ptr;
+ --size;
+
+ for (size_t i = 0; i < numPictureParameterSets; ++i) {
+ if (size < 2) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t length = U16_AT(ptr);
+
+ ptr += 2;
+ size -= 2;
+
+ if (size < length) {
+ return ERROR_MALFORMED;
+ }
+
+ addCodecSpecificData(ptr, length);
+
+ ptr += length;
+ size -= length;
+ }
+
+ return OK;
+}
+
status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
LOGV("configureCodec protected=%d",
(mFlags & kEnableGrallocUsageProtected) ? 1 : 0);
@@ -542,66 +621,17 @@
} else if (meta->findData(kKeyAVCC, &type, &data, &size)) {
// Parse the AVCDecoderConfigurationRecord
- const uint8_t *ptr = (const uint8_t *)data;
-
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
- uint8_t profile = ptr[1];
- uint8_t level = ptr[3];
-
- // There is decodable content out there that fails the following
- // assertion, let's be lenient for now...
- // CHECK((ptr[4] >> 2) == 0x3f); // reserved
-
- size_t lengthSize = 1 + (ptr[4] & 3);
-
- // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
- // violates it...
- // CHECK((ptr[5] >> 5) == 7); // reserved
-
- size_t numSeqParameterSets = ptr[5] & 31;
-
- ptr += 6;
- size -= 6;
-
- for (size_t i = 0; i < numSeqParameterSets; ++i) {
- CHECK(size >= 2);
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- CHECK(size >= length);
-
- addCodecSpecificData(ptr, length);
-
- ptr += length;
- size -= length;
- }
-
- CHECK(size >= 1);
- size_t numPictureParameterSets = *ptr;
- ++ptr;
- --size;
-
- for (size_t i = 0; i < numPictureParameterSets; ++i) {
- CHECK(size >= 2);
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- CHECK(size >= length);
-
- addCodecSpecificData(ptr, length);
-
- ptr += length;
- size -= length;
+ unsigned profile, level;
+ status_t err;
+ if ((err = parseAVCCodecSpecificData(
+ data, size, &profile, &level)) != OK) {
+ LOGE("Malformed AVC codec specific data.");
+ return err;
}
CODEC_LOGI(
- "AVC profile = %d (%s), level = %d",
- (int)profile, AVCProfileToString(profile), level);
+ "AVC profile = %u (%s), level = %u",
+ profile, AVCProfileToString(profile), level);
if (!strcmp(mComponentName, "OMX.TI.Video.Decoder")
&& (profile != kAVCProfileBaseline || level > 30)) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 72f1282..6cec63a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -123,6 +123,9 @@
void extractAACFrames(const sp<ABuffer> &buffer);
+ bool isAudio() const;
+ bool isVideo() const;
+
DISALLOW_EVIL_CONSTRUCTORS(Stream);
};
@@ -401,7 +404,7 @@
case STREAMTYPE_H264:
mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::H264);
break;
- case STREAMTYPE_MPEG2_AUDIO_ATDS:
+ case STREAMTYPE_MPEG2_AUDIO_ADTS:
mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::AAC);
break;
case STREAMTYPE_MPEG1_AUDIO:
@@ -486,6 +489,31 @@
return OK;
}
+bool ATSParser::Stream::isVideo() const {
+ switch (mStreamType) {
+ case STREAMTYPE_H264:
+ case STREAMTYPE_MPEG1_VIDEO:
+ case STREAMTYPE_MPEG2_VIDEO:
+ case STREAMTYPE_MPEG4_VIDEO:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool ATSParser::Stream::isAudio() const {
+ switch (mStreamType) {
+ case STREAMTYPE_MPEG1_AUDIO:
+ case STREAMTYPE_MPEG2_AUDIO:
+ case STREAMTYPE_MPEG2_AUDIO_ADTS:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
void ATSParser::Stream::signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra) {
if (mQueue == NULL) {
@@ -495,34 +523,34 @@
mPayloadStarted = false;
mBuffer->setRange(0, 0);
- switch (type) {
- case DISCONTINUITY_SEEK:
- case DISCONTINUITY_FORMATCHANGE:
- {
- bool isASeek = (type == DISCONTINUITY_SEEK);
-
- mQueue->clear(!isASeek);
-
- uint64_t resumeAtPTS;
- if (extra != NULL
- && extra->findInt64(
- IStreamListener::kKeyResumeAtPTS,
- (int64_t *)&resumeAtPTS)) {
- int64_t resumeAtMediaTimeUs =
- mProgram->convertPTSToTimestamp(resumeAtPTS);
-
- extra->setInt64("resume-at-mediatimeUs", resumeAtMediaTimeUs);
- }
-
- if (mSource != NULL) {
- mSource->queueDiscontinuity(type, extra);
- }
- break;
+ bool clearFormat = false;
+ if (isAudio()) {
+ if (type & DISCONTINUITY_AUDIO_FORMAT) {
+ clearFormat = true;
}
+ } else {
+ if (type & DISCONTINUITY_VIDEO_FORMAT) {
+ clearFormat = true;
+ }
+ }
- default:
- TRESPASS();
- break;
+ mQueue->clear(clearFormat);
+
+ if (type & DISCONTINUITY_TIME) {
+ uint64_t resumeAtPTS;
+ if (extra != NULL
+ && extra->findInt64(
+ IStreamListener::kKeyResumeAtPTS,
+ (int64_t *)&resumeAtPTS)) {
+ int64_t resumeAtMediaTimeUs =
+ mProgram->convertPTSToTimestamp(resumeAtPTS);
+
+ extra->setInt64("resume-at-mediatimeUs", resumeAtMediaTimeUs);
+ }
+ }
+
+ if (mSource != NULL) {
+ mSource->queueDiscontinuity(type, extra);
}
}
@@ -764,10 +792,7 @@
switch (type) {
case VIDEO:
{
- if (mStreamType == STREAMTYPE_H264
- || mStreamType == STREAMTYPE_MPEG1_VIDEO
- || mStreamType == STREAMTYPE_MPEG2_VIDEO
- || mStreamType == STREAMTYPE_MPEG4_VIDEO) {
+ if (isVideo()) {
return mSource;
}
break;
@@ -775,9 +800,7 @@
case AUDIO:
{
- if (mStreamType == STREAMTYPE_MPEG1_AUDIO
- || mStreamType == STREAMTYPE_MPEG2_AUDIO
- || mStreamType == STREAMTYPE_MPEG2_AUDIO_ATDS) {
+ if (isAudio()) {
return mSource;
}
break;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 878e534..c8038d1 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -33,9 +33,18 @@
struct ATSParser : public RefBase {
enum DiscontinuityType {
- DISCONTINUITY_NONE,
- DISCONTINUITY_SEEK,
- DISCONTINUITY_FORMATCHANGE
+ DISCONTINUITY_NONE = 0,
+ DISCONTINUITY_TIME = 1,
+ DISCONTINUITY_AUDIO_FORMAT = 2,
+ DISCONTINUITY_VIDEO_FORMAT = 4,
+
+ DISCONTINUITY_SEEK = DISCONTINUITY_TIME,
+
+ // For legacy reasons this also implies a time discontinuity.
+ DISCONTINUITY_FORMATCHANGE =
+ DISCONTINUITY_AUDIO_FORMAT
+ | DISCONTINUITY_VIDEO_FORMAT
+ | DISCONTINUITY_TIME,
};
enum Flags {
@@ -71,7 +80,7 @@
STREAMTYPE_MPEG2_VIDEO = 0x02,
STREAMTYPE_MPEG1_AUDIO = 0x03,
STREAMTYPE_MPEG2_AUDIO = 0x04,
- STREAMTYPE_MPEG2_AUDIO_ATDS = 0x0f,
+ STREAMTYPE_MPEG2_AUDIO_ADTS = 0x0f,
STREAMTYPE_MPEG4_VIDEO = 0x10,
STREAMTYPE_H264 = 0x1b,
};
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index ce07e32..f782ce5 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -29,8 +29,17 @@
namespace android {
AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
- : mFormat(meta),
+ : mIsAudio(false),
+ mFormat(meta),
mEOSResult(OK) {
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!strncasecmp("audio/", mime, 6)) {
+ mIsAudio = true;
+ } else {
+ CHECK(!strncasecmp("video/", mime, 6));
+ }
}
void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
@@ -67,8 +76,7 @@
int32_t discontinuity;
if ((*buffer)->meta()->findInt32("discontinuity", &discontinuity)) {
-
- if (discontinuity == ATSParser::DISCONTINUITY_FORMATCHANGE) {
+ if (wasFormatChange(discontinuity)) {
mFormat.clear();
}
@@ -96,7 +104,7 @@
int32_t discontinuity;
if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
- if (discontinuity == ATSParser::DISCONTINUITY_FORMATCHANGE) {
+ if (wasFormatChange(discontinuity)) {
mFormat.clear();
}
@@ -117,6 +125,15 @@
return mEOSResult;
}
+bool AnotherPacketSource::wasFormatChange(
+ int32_t discontinuityType) const {
+ if (mIsAudio) {
+ return (discontinuityType & ATSParser::DISCONTINUITY_AUDIO_FORMAT) != 0;
+ }
+
+ return (discontinuityType & ATSParser::DISCONTINUITY_VIDEO_FORMAT) != 0;
+}
+
void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) {
int32_t damaged;
if (buffer->meta()->findInt32("damaged", &damaged) && damaged) {
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 439c785..c99f7f2 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -61,10 +61,13 @@
Mutex mLock;
Condition mCondition;
+ bool mIsAudio;
sp<MetaData> mFormat;
List<sp<ABuffer> > mBuffers;
status_t mEOSResult;
+ bool wasFormatChange(int32_t discontinuityType) const;
+
DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource);
};
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index f55be6e..a089dbf 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -543,7 +543,7 @@
case ATSParser::STREAMTYPE_H264:
mode = ElementaryStreamQueue::H264;
break;
- case ATSParser::STREAMTYPE_MPEG2_AUDIO_ATDS:
+ case ATSParser::STREAMTYPE_MPEG2_AUDIO_ADTS:
mode = ElementaryStreamQueue::AAC;
break;
case ATSParser::STREAMTYPE_MPEG1_AUDIO:
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 780c0d2..aea31a8 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -89,6 +89,12 @@
static const nsecs_t kSetParametersTimeout = seconds(2);
+// minimum sleep time for the mixer thread loop when tracks are active but in underrun
+static const uint32_t kMinThreadSleepTimeUs = 5000;
+// maximum divider applied to the active sleep time in the mixer thread loop
+static const uint32_t kMaxThreadSleepTimeShift = 2;
+
+
// ----------------------------------------------------------------------------
static bool recordingAllowed() {
@@ -1846,6 +1852,7 @@
uint32_t activeSleepTime = activeSleepTimeUs();
uint32_t idleSleepTime = idleSleepTimeUs();
uint32_t sleepTime = idleSleepTime;
+ uint32_t sleepTimeShift = 0;
Vector< sp<EffectChain> > effectChains;
#ifdef DEBUG_CPU_USAGE
ThreadCpuUsage cpu;
@@ -1937,6 +1944,7 @@
standbyTime = systemTime() + kStandbyTimeInNsecs;
sleepTime = idleSleepTime;
+ sleepTimeShift = 0;
continue;
}
}
@@ -1953,6 +1961,10 @@
// mix buffers...
mAudioMixer->process();
sleepTime = 0;
+ // increase sleep time progressively when application underrun condition clears
+ if (sleepTimeShift > 0) {
+ sleepTimeShift--;
+ }
standbyTime = systemTime() + kStandbyTimeInNsecs;
//TODO: delay standby when effects have a tail
} else {
@@ -1960,7 +1972,17 @@
// buffer size, then write 0s to the output
if (sleepTime == 0) {
if (mixerStatus == MIXER_TRACKS_ENABLED) {
- sleepTime = activeSleepTime;
+ sleepTime = activeSleepTime >> sleepTimeShift;
+ if (sleepTime < kMinThreadSleepTimeUs) {
+ sleepTime = kMinThreadSleepTimeUs;
+ }
+ // reduce sleep time in case of consecutive application underruns to avoid
+ // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+ // duration we would end up writing less data than needed by the audio HAL if
+ // the condition persists.
+ if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+ sleepTimeShift++;
+ }
} else {
sleepTime = idleSleepTime;
}