Merge "Fix Widevine classic playback errors with NuPlayer" into lmp-dev
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index b87a09e..bca78b9 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -268,7 +268,7 @@
static void PostReplyWithError(int32_t replyID, int32_t err);
- status_t init(const char *name, bool nameIsType, bool encoder);
+ status_t init(const AString &name, bool nameIsType, bool encoder);
void setState(State newState);
void returnBuffersToCodec();
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libmediaplayerservice/VideoFrameScheduler.cpp
index 4251c4e..1a5f3e0 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.cpp
+++ b/media/libmediaplayerservice/VideoFrameScheduler.cpp
@@ -152,7 +152,7 @@
#endif
-void VideoFrameScheduler::PLL::fit(
+bool VideoFrameScheduler::PLL::fit(
nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
int64_t *a, int64_t *b, int64_t *err) {
if (numSamplesToUse > mNumSamples) {
@@ -187,6 +187,10 @@
}
int64_t div = numSamplesToUse * sumXX - sumX * sumX;
+ if (div == 0) {
+ return false;
+ }
+
int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY;
int64_t b_nom = sumXX * sumY - sumX * sumXY;
*a = divRound(a_nom, div);
@@ -198,6 +202,7 @@
(long long)*a, (*a / (float)(1 << kPrecision)),
(long long)*b, (*b / (float)(1 << kPrecision)),
(long long)*err, (*err / (float)(1 << (kPrecision * 2))));
+ return true;
}
void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) {
@@ -226,7 +231,7 @@
deltas.sort(compare<nsecs_t>);
size_t numDeltas = deltas.size();
if (numDeltas > 1) {
- nsecs_t deltaMinLimit = min(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
+ nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
if (deltas[i] > deltaMaxLimit) {
@@ -314,8 +319,15 @@
if (doFit) {
int64_t a, b, err;
+ if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) {
+ // samples are not suitable for fitting. this means they are
+ // also not suitable for priming.
+ ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod);
+ return mPeriod;
+ }
+
mRefitAt = time + kRefitRefreshPeriod;
- fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err);
+
mPhase += (mPeriod * b) >> kPrecision;
mPeriod = (mPeriod * a) >> kPrecision;
ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/media/libmediaplayerservice/VideoFrameScheduler.h
index 19f0787..84b27b4 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.h
+++ b/media/libmediaplayerservice/VideoFrameScheduler.h
@@ -71,7 +71,8 @@
nsecs_t mTimes[kHistorySize];
void test();
- void fit(nsecs_t phase, nsecs_t period, size_t numSamples,
+ // returns whether fit was successful
+ bool fit(nsecs_t phase, nsecs_t period, size_t numSamples,
int64_t *a, int64_t *b, int64_t *err);
void prime(size_t numSamples);
};
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index dad480d..ceedb40 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -64,16 +64,18 @@
};
struct NuPlayer::SeekAction : public Action {
- SeekAction(int64_t seekTimeUs)
- : mSeekTimeUs(seekTimeUs) {
+ SeekAction(int64_t seekTimeUs, bool needNotify)
+ : mSeekTimeUs(seekTimeUs),
+ mNeedNotify(needNotify) {
}
virtual void execute(NuPlayer *player) {
- player->performSeek(mSeekTimeUs);
+ player->performSeek(mSeekTimeUs, mNeedNotify);
}
private:
int64_t mSeekTimeUs;
+ bool mNeedNotify;
DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
};
@@ -155,6 +157,7 @@
mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
mAudioDecoderGeneration(0),
mVideoDecoderGeneration(0),
+ mRendererGeneration(0),
mAudioEOS(false),
mVideoEOS(false),
mScanSourcesPending(false),
@@ -323,9 +326,10 @@
(new AMessage(kWhatReset, id()))->post();
}
-void NuPlayer::seekToAsync(int64_t seekTimeUs) {
+void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) {
sp<AMessage> msg = new AMessage(kWhatSeek, id());
msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("needNotify", needNotify);
msg->post();
}
@@ -560,7 +564,8 @@
// the extractor may not yet be started and will assert.
// If the video decoder is not set (perhaps audio only in this case)
// do not perform a seek as it is not needed.
- mDeferredActions.push_back(new SeekAction(mCurrentPositionUs));
+ mDeferredActions.push_back(
+ new SeekAction(mCurrentPositionUs, false /* needNotify */));
}
// If there is a new surface texture, instantiate decoders
@@ -633,10 +638,10 @@
flags |= Renderer::FLAG_OFFLOAD_AUDIO;
}
- mRenderer = new Renderer(
- mAudioSink,
- new AMessage(kWhatRendererNotify, id()),
- flags);
+ sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
+ ++mRendererGeneration;
+ notify->setInt32("generation", mRendererGeneration);
+ mRenderer = new Renderer(mAudioSink, notify, flags);
mRendererLooper = new ALooper;
mRendererLooper->setName("NuPlayerRenderer");
@@ -813,11 +818,13 @@
ALOGV("%s shutdown completed", audio ? "audio" : "video");
if (audio) {
mAudioDecoder.clear();
+ ++mAudioDecoderGeneration;
CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
mFlushingAudio = SHUT_DOWN;
} else {
mVideoDecoder.clear();
+ ++mVideoDecoderGeneration;
CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
mFlushingVideo = SHUT_DOWN;
@@ -835,9 +842,11 @@
mRenderer->queueEOS(audio, err);
if (audio && mFlushingAudio != NONE) {
mAudioDecoder.clear();
+ ++mAudioDecoderGeneration;
mFlushingAudio = SHUT_DOWN;
} else if (!audio && mFlushingVideo != NONE){
mVideoDecoder.clear();
+ ++mVideoDecoderGeneration;
mFlushingVideo = SHUT_DOWN;
}
finishFlushIfPossible();
@@ -857,6 +866,14 @@
case kWhatRendererNotify:
{
+ int32_t requesterGeneration = mRendererGeneration - 1;
+ CHECK(msg->findInt32("generation", &requesterGeneration));
+ if (requesterGeneration != mRendererGeneration) {
+ ALOGV("got message from old renderer, generation(%d:%d)",
+ requesterGeneration, mRendererGeneration);
+ return;
+ }
+
int32_t what;
CHECK(msg->findInt32("what", &what));
@@ -919,6 +936,7 @@
CHECK(msg->findInt64("positionUs", &positionUs));
closeAudioSink();
mAudioDecoder.clear();
+ ++mAudioDecoderGeneration;
mRenderer->flush(true /* audio */);
if (mVideoDecoder != NULL) {
mRenderer->flush(false /* audio */);
@@ -926,7 +944,7 @@
mRenderer->signalDisableOffloadAudio();
mOffloadAudio = false;
- performSeek(positionUs);
+ performSeek(positionUs, false /* needNotify */);
instantiateDecoder(true /* audio */, &mAudioDecoder);
}
break;
@@ -955,14 +973,18 @@
case kWhatSeek:
{
int64_t seekTimeUs;
+ int32_t needNotify;
CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("needNotify", &needNotify));
- ALOGV("kWhatSeek seekTimeUs=%lld us", seekTimeUs);
+ ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
+ seekTimeUs, needNotify);
mDeferredActions.push_back(
new SimpleAction(&NuPlayer::performDecoderFlush));
- mDeferredActions.push_back(new SeekAction(seekTimeUs));
+ mDeferredActions.push_back(
+ new SeekAction(seekTimeUs, needNotify));
processDeferredActions();
break;
@@ -970,17 +992,31 @@
case kWhatPause:
{
- CHECK(mRenderer != NULL);
- mSource->pause();
- mRenderer->pause();
+ if (mSource != NULL) {
+ mSource->pause();
+ } else {
+ ALOGW("pause called when source is gone or not set");
+ }
+ if (mRenderer != NULL) {
+ mRenderer->pause();
+ } else {
+ ALOGW("pause called when renderer is gone or not set");
+ }
break;
}
case kWhatResume:
{
- CHECK(mRenderer != NULL);
- mSource->resume();
- mRenderer->resume();
+ if (mSource != NULL) {
+ mSource->resume();
+ } else {
+ ALOGW("resume called when source is gone or not set");
+ }
+ if (mRenderer != NULL) {
+ mRenderer->resume();
+ } else {
+ ALOGW("resume called when renderer is gone or not set");
+ }
break;
}
@@ -1774,10 +1810,11 @@
}
}
-void NuPlayer::performSeek(int64_t seekTimeUs) {
- ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)",
+void NuPlayer::performSeek(int64_t seekTimeUs, bool needNotify) {
+ ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), needNotify(%d)",
seekTimeUs,
- seekTimeUs / 1E6);
+ seekTimeUs / 1E6,
+ needNotify);
if (mSource == NULL) {
// This happens when reset occurs right before the loop mode
@@ -1794,7 +1831,9 @@
sp<NuPlayerDriver> driver = mDriver.promote();
if (driver != NULL) {
driver->notifyPosition(seekTimeUs);
- driver->notifySeekComplete();
+ if (needNotify) {
+ driver->notifySeekComplete();
+ }
}
}
@@ -1849,9 +1888,6 @@
++mScanSourcesGeneration;
mScanSourcesPending = false;
- ++mAudioDecoderGeneration;
- ++mVideoDecoderGeneration;
-
if (mRendererLooper != NULL) {
if (mRenderer != NULL) {
mRendererLooper->unregisterHandler(mRenderer->id());
@@ -1860,6 +1896,7 @@
mRendererLooper.clear();
}
mRenderer.clear();
+ ++mRendererGeneration;
if (mSource != NULL) {
mSource->stop();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 7197e5f..8fa7269 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -59,8 +59,9 @@
// Will notify the driver through "notifyResetComplete" once finished.
void resetAsync();
- // Will notify the driver through "notifySeekComplete" once finished.
- void seekToAsync(int64_t seekTimeUs);
+ // Will notify the driver through "notifySeekComplete" once finished
+ // and needNotify is true.
+ void seekToAsync(int64_t seekTimeUs, bool needNotify = false);
status_t setVideoScalingMode(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
@@ -137,6 +138,7 @@
sp<ALooper> mRendererLooper;
int32_t mAudioDecoderGeneration;
int32_t mVideoDecoderGeneration;
+ int32_t mRendererGeneration;
List<sp<Action> > mDeferredActions;
@@ -214,7 +216,7 @@
void processDeferredActions();
- void performSeek(int64_t seekTimeUs);
+ void performSeek(int64_t seekTimeUs, bool needNotify);
void performDecoderFlush();
void performDecoderShutdown(bool audio, bool video);
void performReset();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index cdb860c..1b1b1c8 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -53,6 +53,10 @@
}
NuPlayer::Decoder::~Decoder() {
+ mDecoderLooper->unregisterHandler(id());
+ mDecoderLooper->stop();
+
+ releaseAndResetMediaBuffers();
}
static
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 7ec9876..a9bca49 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -240,9 +240,7 @@
mPlayer->start();
if (mStartupSeekTimeUs >= 0) {
- if (mStartupSeekTimeUs == 0) {
- notifySeekComplete_l();
- } else {
+ if (mStartupSeekTimeUs > 0) {
mPlayer->seekToAsync(mStartupSeekTimeUs);
}
@@ -369,7 +367,7 @@
mAtEOS = false;
// seeks can take a while, so we essentially paused
notifyListener_l(MEDIA_PAUSED);
- mPlayer->seekToAsync(seekTimeUs);
+ mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
break;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a8c8818..73ac057 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -32,6 +32,10 @@
namespace android {
+// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
+// is closed to allow the audio DSP to power down.
+static const int64_t kOffloadPauseMaxUs = 60000000ll;
+
// static
const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
@@ -61,7 +65,9 @@
mVideoRenderingStartGeneration(0),
mAudioRenderingStartGeneration(0),
mLastPositionUpdateUs(-1ll),
- mVideoLateByUs(0ll) {
+ mVideoLateByUs(0ll),
+ mAudioOffloadPauseTimeoutGeneration(0),
+ mAudioOffloadTornDown(false) {
}
NuPlayer::Renderer::~Renderer() {
@@ -259,6 +265,17 @@
break;
}
+ case kWhatAudioOffloadPauseTimeout:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation != mAudioOffloadPauseTimeoutGeneration) {
+ break;
+ }
+ onAudioOffloadTearDown();
+ break;
+ }
+
default:
TRESPASS();
break;
@@ -951,6 +968,7 @@
if (mHasAudio) {
mAudioSink->pause();
+ startAudioOffloadPauseTimeout();
}
ALOGV("now paused audio queue has %d entries, video has %d entries",
@@ -963,6 +981,7 @@
}
if (mHasAudio) {
+ cancelAudioOffloadPauseTimeout();
mAudioSink->start();
}
@@ -1051,6 +1070,11 @@
}
void NuPlayer::Renderer::onAudioOffloadTearDown() {
+ if (mAudioOffloadTornDown) {
+ return;
+ }
+ mAudioOffloadTornDown = true;
+
int64_t firstAudioTimeUs;
{
Mutex::Autolock autoLock(mLock);
@@ -1069,5 +1093,19 @@
notify->post();
}
+void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
+ if (offloadingAudio()) {
+ sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, id());
+ msg->setInt32("generation", mAudioOffloadPauseTimeoutGeneration);
+ msg->post(kOffloadPauseMaxUs);
+ }
+}
+
+void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
+ if (offloadingAudio()) {
+ ++mAudioOffloadPauseTimeoutGeneration;
+ }
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index e28071f..8e6112b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -66,6 +66,7 @@
kWhatVideoRenderingStart = 'vdrd',
kWhatMediaRenderingStart = 'mdrd',
kWhatAudioOffloadTearDown = 'aOTD',
+ kWhatAudioOffloadPauseTimeout = 'aOPT',
};
protected:
@@ -132,6 +133,9 @@
int64_t mLastPositionUpdateUs;
int64_t mVideoLateByUs;
+ int32_t mAudioOffloadPauseTimeoutGeneration;
+ bool mAudioOffloadTornDown;
+
size_t fillAudioBuffer(void *buffer, size_t size);
bool onDrainAudioQueue();
@@ -168,6 +172,9 @@
bool offloadingAudio() const { return (mFlags & FLAG_OFFLOAD_AUDIO) != 0; }
+ void startAudioOffloadPauseTimeout();
+ void cancelAudioOffloadPauseTimeout();
+
DISALLOW_EVIL_CONSTRUCTORS(Renderer);
};
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3c04859..4589ed1 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2933,13 +2933,6 @@
image.mNumPlanes = 0;
const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
- // we need stride and slice-height to be non-zero
- if (params.nStride == 0 || params.nSliceHeight == 0) {
- ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
- fmt, fmt, params.nStride, params.nSliceHeight);
- return false;
- }
-
image.mWidth = params.nFrameWidth;
image.mHeight = params.nFrameHeight;
@@ -2952,6 +2945,20 @@
return false;
}
+ // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
+ if (params.nStride != 0 && params.nSliceHeight == 0) {
+ ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
+ params.nFrameHeight);
+ params.nSliceHeight = params.nFrameHeight;
+ }
+
+ // we need stride and slice-height to be non-zero
+ if (params.nStride == 0 || params.nSliceHeight == 0) {
+ ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+ fmt, fmt, params.nStride, params.nSliceHeight);
+ return false;
+ }
+
// set-up YUV format
image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
image.mNumPlanes = 3;
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 9d6fd78..a72cbd5 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -199,7 +199,18 @@
} else if (!strncasecmp("http://", uri, 7)
|| !strncasecmp("https://", uri, 8)
|| isWidevine) {
- sp<HTTPBase> httpSource = new MediaHTTP(httpService->makeHTTPConnection());
+ if (httpService == NULL) {
+ ALOGE("Invalid http service!");
+ return NULL;
+ }
+
+ sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
+ return NULL;
+ }
+
+ sp<HTTPBase> httpSource = new MediaHTTP(conn);
String8 tmp;
if (isWidevine) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 0bfc6e4..b56819c 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -179,7 +179,7 @@
response->postReply(replyID);
}
-status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) {
+status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
// save init parameters for reset
mInitName = name;
mInitNameIsType = nameIsType;
@@ -191,7 +191,7 @@
// queue.
mCodec = new ACodec;
bool needDedicatedLooper = false;
- if (nameIsType && !strncasecmp(name, "video/", 6)) {
+ if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
needDedicatedLooper = true;
} else {
AString tmp = name;
@@ -357,7 +357,7 @@
mHaveInputSurface = false;
if (err == OK) {
- err = init(mInitName.c_str(), mInitNameIsType, mInitIsEncoder);
+ err = init(mInitName, mInitNameIsType, mInitIsEncoder);
}
return err;
}
@@ -589,7 +589,12 @@
if (index < buffers->size()) {
const BufferInfo &info = buffers->itemAt(index);
if (info.mOwnedByClient) {
- *buffer = info.mData;
+ // by the time buffers array is initialized, crypto is set
+ if (portIndex == kPortIndexInput && mCrypto != NULL) {
+ *buffer = info.mEncryptedData;
+ } else {
+ *buffer = info.mData;
+ }
*format = info.mFormat;
}
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index be2a873..f469d4d 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -254,6 +254,10 @@
// set mDisconnecting to true, if a fetch returns after
// this, the source will be marked as EOS.
mDisconnecting = true;
+
+ // explicitly signal mCondition so that the pending readAt()
+ // will immediately return
+ mCondition.signal();
}
// explicitly disconnect from the source, to allow any
@@ -325,7 +329,11 @@
Mutex::Autolock autoLock(mLock);
- if (err == ERROR_UNSUPPORTED || err == -EPIPE) {
+ if (mDisconnecting) {
+ mNumRetriesLeft = 0;
+ mFinalStatus = ERROR_END_OF_STREAM;
+ return;
+ } else if (err == ERROR_UNSUPPORTED || err == -EPIPE) {
// These are errors that are not likely to go away even if we
// retry, i.e. the server doesn't support range requests or similar.
mNumRetriesLeft = 0;
@@ -515,10 +523,14 @@
CHECK(mAsyncResult == NULL);
msg->post();
- while (mAsyncResult == NULL) {
+ while (mAsyncResult == NULL && !mDisconnecting) {
mCondition.wait(mLock);
}
+ if (mDisconnecting) {
+ return ERROR_END_OF_STREAM;
+ }
+
int32_t result;
CHECK(mAsyncResult->findInt32("result", &result));
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index b0d0827..f4cba54 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -67,22 +67,16 @@
: SoftVideoDecoderOMXComponent(name, componentName, codingType,
kProfileLevels, ARRAY_SIZE(kProfileLevels),
320 /* width */, 240 /* height */, callbacks,
- appData, component) {
+ appData, component),
+ mMemRecords(NULL),
+ mFlushOutBuffer(NULL),
+ mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
+ mIvColorFormat(IV_YUV_420P),
+ mNewWidth(mWidth),
+ mNewHeight(mHeight),
+ mChangingResolution(false) {
initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers,
CODEC_MIME_TYPE);
-
- mOmxColorFormat = OMX_COLOR_FormatYUV420Planar;
- mStride = mWidth;
-
- if (OMX_COLOR_FormatYUV420Planar == mOmxColorFormat) {
- mIvColorFormat = IV_YUV_420P;
- } else if (OMX_COLOR_FormatYUV420SemiPlanar == mOmxColorFormat) {
- mIvColorFormat = IV_YUV_420SP_UV;
- }
-
- mInitWidth = mWidth;
- mInitHeight = mHeight;
-
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -144,7 +138,7 @@
s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
- ALOGD("Set the run-time (dynamic) parameters");
+ ALOGV("Set the run-time (dynamic) parameters stride = %u", stride);
status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip,
(void *)&s_ctl_op);
@@ -188,7 +182,7 @@
}
/* Set the run-time (dynamic) parameters */
- setParams(0);
+ setParams(outputBufferWidth());
/* Set number of cores/threads to be used by the codec */
setNumCores();
@@ -250,23 +244,25 @@
WORD32 i4_level;
mNumCores = GetCPUCoreCount();
- mMemRecords = NULL;
- mFlushOutBuffer = NULL;
/* Initialize number of ref and reorder modes (for HEVC) */
u4_num_reorder_frames = 16;
u4_num_ref_frames = 16;
u4_share_disp_buf = 0;
- if ((mWidth * mHeight) > (1920 * 1088)) {
+ uint32_t displayStride = outputBufferWidth();
+ uint32_t displayHeight = outputBufferHeight();
+ uint32_t displaySizeY = displayStride * displayHeight;
+
+ if (displaySizeY > (1920 * 1088)) {
i4_level = 50;
- } else if ((mWidth * mHeight) > (1280 * 720)) {
+ } else if (displaySizeY > (1280 * 720)) {
i4_level = 40;
- } else if ((mWidth * mHeight) > (960 * 540)) {
+ } else if (displaySizeY > (960 * 540)) {
i4_level = 31;
- } else if ((mWidth * mHeight) > (640 * 360)) {
+ } else if (displaySizeY > (640 * 360)) {
i4_level = 30;
- } else if ((mWidth * mHeight) > (352 * 288)) {
+ } else if (displaySizeY > (352 * 288)) {
i4_level = 21;
} else {
i4_level = 20;
@@ -317,8 +313,8 @@
s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.pv_mem_rec_location = mMemRecords;
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = mWidth;
- s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = mHeight;
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = displayStride;
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = displayHeight;
s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_size =
sizeof(ivdext_fill_mem_rec_op_t);
@@ -363,8 +359,8 @@
s_init_ip.s_ivd_init_ip_t.u4_size = sizeof(ivdext_init_ip_t);
s_init_ip.s_ivd_init_ip_t.e_cmd = (IVD_API_COMMAND_TYPE_T)IV_CMD_INIT;
s_init_ip.s_ivd_init_ip_t.pv_mem_rec_location = mMemRecords;
- s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = mWidth;
- s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = mHeight;
+ s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = displayStride;
+ s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = displayHeight;
s_init_ip.i4_level = i4_level;
s_init_ip.u4_num_reorder_frames = u4_num_reorder_frames;
@@ -395,7 +391,7 @@
resetPlugin();
/* Set the run time (dynamic) parameters */
- setParams(0);
+ setParams(displayStride);
/* Set number of cores/threads to be used by the codec */
setNumCores();
@@ -404,12 +400,15 @@
logVersion();
/* Allocate internal picture buffer */
- mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, mStride * mHeight * 3 / 2);
+ uint32_t bufferSize = displaySizeY * 3 / 2;
+ mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
if (NULL == mFlushOutBuffer) {
- ALOGE("Could not allocate flushOutputBuffer of size %zu", mStride * mHeight * 3 / 2);
+ ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
return NO_MEMORY;
}
+ mInitNeeded = false;
+ mFlushNeeded = false;
return OK;
}
@@ -428,11 +427,17 @@
ps_mem_rec++;
}
ivd_aligned_free(mMemRecords);
+ mMemRecords = NULL;
}
if(mFlushOutBuffer) {
ivd_aligned_free(mFlushOutBuffer);
+ mFlushOutBuffer = NULL;
}
+
+ mInitNeeded = true;
+ mChangingResolution = false;
+
return OK;
}
@@ -449,6 +454,7 @@
}
return OK;
}
+
void SoftHEVC::onReset() {
ALOGD("onReset called");
SoftVideoDecoderOMXComponent::onReset();
@@ -457,12 +463,22 @@
resetPlugin();
}
+OMX_ERRORTYPE SoftHEVC::internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params) {
+ const uint32_t oldWidth = mWidth;
+ const uint32_t oldHeight = mHeight;
+ OMX_ERRORTYPE ret = SoftVideoDecoderOMXComponent::internalSetParameter(index, params);
+ if (mWidth != oldWidth || mHeight != oldHeight) {
+ reInitDecoder();
+ }
+ return ret;
+}
+
void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
OMX_BUFFERHEADERTYPE *outHeader,
- size_t sizeY,
size_t timeStampIx) {
+ size_t sizeY = outputBufferWidth() * outputBufferHeight();
size_t sizeUV;
uint8_t *pBuf;
@@ -502,8 +518,6 @@
return;
}
void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) {
- ALOGD("onPortFlushCompleted on port %d", portIndex);
-
/* Once the output buffers are flushed, ignore any buffers that are held in decoder */
if (kOutputPortIndex == portIndex) {
setFlushMode();
@@ -514,7 +528,7 @@
IV_API_CALL_STATUS_T status;
size_t sizeY, sizeUV;
- setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, mStride * mHeight, 0);
+ setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, 0);
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip,
(void *)&s_dec_op);
@@ -527,8 +541,6 @@
}
void SoftHEVC::onQueueFilled(OMX_U32 portIndex) {
- IV_API_CALL_STATUS_T status;
-
UNUSED(portIndex);
if (mOutputPortSettingsChange != NONE) {
@@ -548,7 +560,7 @@
setFlushMode();
}
- while (outQueue.size() == kNumBuffers) {
+ while (!outQueue.empty()) {
BufferInfo *inInfo;
OMX_BUFFERHEADERTYPE *inHeader;
@@ -586,6 +598,16 @@
}
}
+ // When there is an init required and the decoder is not in flush mode,
+ // update output port's definition and reinitialize decoder.
+ if (mInitNeeded && !mIsInFlush) {
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
+
+ CHECK_EQ(reInitDecoder(), (status_t)OK);
+ return;
+ }
+
/* Get a free slot in timestamp array to hold input timestamp */
{
size_t i;
@@ -608,68 +630,91 @@
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader,
- mStride * mHeight, timeStampIx);
+ setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
GETTIME(&mTimeStart, NULL);
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
- status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip,
- (void *)&s_dec_op);
+ IV_API_CALL_STATUS_T status;
+ status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ // FIXME: Compare |status| to IHEVCD_UNSUPPORTED_DIMENSIONS, which is not one of the
+ // IV_API_CALL_STATUS_T, seems be wrong. But this is what the decoder returns right now.
+ // The decoder should be fixed so that |u4_error_code| instead of |status| returns
+ // IHEVCD_UNSUPPORTED_DIMENSIONS.
+ bool unsupportedDimensions =
+ ((IHEVCD_UNSUPPORTED_DIMENSIONS == status)
+ || (IHEVCD_UNSUPPORTED_DIMENSIONS == s_dec_op.u4_error_code));
+ bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
GETTIME(&mTimeEnd, NULL);
/* Compute time taken for decode() */
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
- ALOGD("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
- s_dec_op.u4_num_bytes_consumed);
-
- /* If width and height are greater than the
- * the dimensions used during codec create, then
- * delete the current instance and recreate an instance with
- * new dimensions */
-
- if(IHEVCD_UNSUPPORTED_DIMENSIONS == s_dec_op.u4_error_code) {
- mInitWidth = s_dec_op.u4_pic_wd;
- mInitHeight = s_dec_op.u4_pic_ht;
- mStride = mInitWidth;
- CHECK_EQ(reInitDecoder(), (status_t)OK);
-
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader,
- mStride * mHeight, timeStampIx);
-
- status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip,
- (void *)&s_dec_op);
+ ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
+ s_dec_op.u4_num_bytes_consumed);
+ if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
+ mFlushNeeded = true;
}
+
if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) {
/* If the input did not contain picture data, then ignore
* the associated timestamp */
mTimeStampsValid[timeStampIx] = false;
}
- /* If valid height and width are decoded,
- * then look at change in resolution */
+ // This is needed to handle CTS DecoderTest testCodecResetsHEVCWithoutSurface,
+ // which is not sending SPS/PPS after port reconfiguration and flush to the codec.
+ if (unsupportedDimensions && !mFlushNeeded) {
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht);
+
+ CHECK_EQ(reInitDecoder(), (status_t)OK);
+
+ setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+
+ ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ return;
+ }
+
+ // If the decoder is in the changing resolution mode and there is no output present,
+ // that means the switching is done and it's ready to reset the decoder and the plugin.
+ if (mChangingResolution && !s_dec_op.u4_output_present) {
+ mChangingResolution = false;
+ resetDecoder();
+ resetPlugin();
+ continue;
+ }
+
+ if (unsupportedDimensions || resChanged) {
+ mChangingResolution = true;
+ if (mFlushNeeded) {
+ setFlushMode();
+ }
+
+ if (unsupportedDimensions) {
+ mNewWidth = s_dec_op.u4_pic_wd;
+ mNewHeight = s_dec_op.u4_pic_ht;
+ mInitNeeded = true;
+ }
+ continue;
+ }
+
if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
uint32_t width = s_dec_op.u4_pic_wd;
uint32_t height = s_dec_op.u4_pic_ht;
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, width, height);
- if ((width != mWidth) || (height != mHeight)) {
- mWidth = width;
- mHeight = height;
- mStride = mWidth;
-
- updatePortDefinitions();
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
+ if (portWillReset) {
+ resetDecoder();
return;
}
}
if (s_dec_op.u4_output_present) {
- outHeader->nFilledLen = (mStride * mHeight * 3) / 2;
+ outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
mTimeStampsValid[s_dec_op.u4_ts] = false;
@@ -711,7 +756,7 @@
}
}
-} // namespace android
+} // namespace android
android::SoftOMXComponent *createSoftOMXComponent(const char *name,
const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
index 233db0c..a91f528 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -62,6 +62,7 @@
virtual void onQueueFilled(OMX_U32 portIndex);
virtual void onPortFlushCompleted(OMX_U32 portIndex);
virtual void onReset();
+ virtual OMX_ERRORTYPE internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params);
private:
// Number of input and output buffers
enum {
@@ -72,12 +73,6 @@
iv_mem_rec_t *mMemRecords; // Memory records requested by the codec
size_t mNumMemRecords; // Number of memory records requested by the codec
- uint32_t mNewWidth; // New width after change in resolution
- uint32_t mNewHeight; // New height after change in resolution
- uint32_t mInitWidth; // Width used during codec creation
- uint32_t mInitHeight; // Height used during codec creation
- size_t mStride; // Stride to be used for display buffers
-
size_t mNumCores; // Number of cores to be uesd by the codec
struct timeval mTimeStart; // Time at the start of decode()
@@ -98,7 +93,13 @@
bool mIsInFlush; // codec is flush mode
bool mReceivedEOS; // EOS is receieved on input port
- bool mIsAdapting; // plugin in middle of change in resolution
+ bool mInitNeeded;
+ uint32_t mNewWidth;
+ uint32_t mNewHeight;
+ // The input stream has changed to a different resolution, which is still supported by the
+ // codec. So the codec is switching to decode the new resolution.
+ bool mChangingResolution;
+ bool mFlushNeeded;
status_t initDecoder();
status_t deInitDecoder();
@@ -114,7 +115,6 @@
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
OMX_BUFFERHEADERTYPE *outHeader,
- size_t sizeY,
size_t timeStampIx);
DISALLOW_EVIL_CONSTRUCTORS (SoftHEVC);
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 5b2ab84..d98fa80 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -295,19 +295,23 @@
ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d",
disp_width, disp_height, buf_width, buf_height);
- bool cropChanged = false;
- if (mCropWidth != disp_width || mCropHeight != disp_height) {
- mCropLeft = 0;
- mCropTop = 0;
- mCropWidth = disp_width;
- mCropHeight = disp_height;
- cropChanged = true;
+ CropSettingsMode cropSettingsMode = kCropUnSet;
+ if (disp_width != buf_width || disp_height != buf_height) {
+ cropSettingsMode = kCropSet;
+
+ if (mCropWidth != disp_width || mCropHeight != disp_height) {
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = disp_width;
+ mCropHeight = disp_height;
+ cropSettingsMode = kCropChanged;
+ }
}
bool portWillReset = false;
const bool fakeStride = true;
SoftVideoDecoderOMXComponent::handlePortSettingsChange(
- &portWillReset, buf_width, buf_height, cropChanged, fakeStride);
+ &portWillReset, buf_width, buf_height, cropSettingsMode, fakeStride);
if (portWillReset) {
if (mMode == MODE_H263) {
PVCleanUpVideoDecoder(mHandle);
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index cf3c3e3..168208f 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -160,10 +160,11 @@
H264SwDecInfo decoderInfo;
CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK);
- bool cropChanged = handleCropChange(decoderInfo);
+ SoftVideoDecoderOMXComponent::CropSettingsMode cropSettingsMode =
+ handleCropParams(decoderInfo);
handlePortSettingsChange(
&portWillReset, decoderInfo.picWidth, decoderInfo.picHeight,
- cropChanged);
+ cropSettingsMode);
}
} else {
if (portWillReset) {
@@ -209,9 +210,10 @@
}
}
-bool SoftAVC::handleCropChange(const H264SwDecInfo& decInfo) {
+SoftVideoDecoderOMXComponent::CropSettingsMode SoftAVC::handleCropParams(
+ const H264SwDecInfo& decInfo) {
if (!decInfo.croppingFlag) {
- return false;
+ return kCropUnSet;
}
const CropParams& crop = decInfo.cropParams;
@@ -219,14 +221,14 @@
mCropTop == crop.cropTopOffset &&
mCropWidth == crop.cropOutWidth &&
mCropHeight == crop.cropOutHeight) {
- return false;
+ return kCropSet;
}
mCropLeft = crop.cropLeftOffset;
mCropTop = crop.cropTopOffset;
mCropWidth = crop.cropOutWidth;
mCropHeight = crop.cropOutHeight;
- return true;
+ return kCropChanged;
}
void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) {
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index 253a406..069107d 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -73,7 +73,7 @@
void drainAllOutputBuffers(bool eos);
void drainOneOutputBuffer(int32_t picId, uint8_t *data);
void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
- bool handleCropChange(const H264SwDecInfo& decInfo);
+ CropSettingsMode handleCropParams(const H264SwDecInfo& decInfo);
DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
};
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index c97be28..1cbef39 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -73,22 +73,22 @@
<Encoders>
<MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
<!-- profiles and levels: ProfileBaseline : Level45 -->
- <Limit name="size" min="2x2" max="176x144" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="size" min="16x16" max="176x144" />
+ <Limit name="alignment" value="16x16" />
<Limit name="bitrate" range="1-128000" />
</MediaCodec>
<MediaCodec name="OMX.google.h264.encoder" type="video/avc">
<!-- profiles and levels: ProfileBaseline : Level2 -->
- <Limit name="size" min="2x2" max="896x896" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="size" min="16x16" max="896x896" />
+ <Limit name="alignment" value="16x16" />
<Limit name="block-size" value="16x16" />
<Limit name="blocks-per-second" range="1-11880" />
<Limit name="bitrate" range="1-2000000" />
</MediaCodec>
<MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
<!-- profiles and levels: ProfileCore : Level2 -->
- <Limit name="size" min="2x2" max="176x144" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="size" min="16x16" max="176x144" />
+ <Limit name="alignment" value="16x16" />
<Limit name="block-size" value="16x16" />
<Limit name="blocks-per-second" range="12-1485" />
<Limit name="bitrate" range="1-64000" />
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index a289637..7a9a1a3 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -353,10 +353,6 @@
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
- uint32_t replyID;
- CHECK(response == mSeekReply && 0 != mSeekReplyID);
- mSeekReply.clear();
- mSeekReplyID = 0;
return err;
}
@@ -382,12 +378,16 @@
case kWhatSeek:
{
- CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
+ uint32_t seekReplyID;
+ CHECK(msg->senderAwaitsResponse(&seekReplyID));
+ mSeekReplyID = seekReplyID;
+ mSeekReply = new AMessage;
status_t err = onSeek(msg);
- mSeekReply = new AMessage;
- mSeekReply->setInt32("err", err);
+ if (err != OK) {
+ msg->post(50000);
+ }
break;
}
@@ -422,7 +422,10 @@
if (mSeekReplyID != 0) {
CHECK(mSeekReply != NULL);
+ mSeekReply->setInt32("err", OK);
mSeekReply->postReply(mSeekReplyID);
+ mSeekReplyID = 0;
+ mSeekReply.clear();
}
}
}
@@ -1094,10 +1097,11 @@
CHECK(msg->findInt64("timeUs", &timeUs));
if (!mReconfigurationInProgress) {
- changeConfiguration(timeUs, getBandwidthIndex());
+ changeConfiguration(timeUs, mCurBandwidthIndex);
+ return OK;
+ } else {
+ return -EWOULDBLOCK;
}
-
- return OK;
}
status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -1254,7 +1258,10 @@
if (mSeekReplyID != 0) {
CHECK(mSeekReply != NULL);
+ mSeekReply->setInt32("err", OK);
mSeekReply->postReply(mSeekReplyID);
+ mSeekReplyID = 0;
+ mSeekReply.clear();
}
}
}
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index 8cb8ed7..9e97ebd 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -65,9 +65,17 @@
virtual void updatePortDefinitions(bool updateCrop = true);
+ uint32_t outputBufferWidth();
+ uint32_t outputBufferHeight();
+
+ enum CropSettingsMode {
+ kCropUnSet = 0,
+ kCropSet,
+ kCropChanged,
+ };
void handlePortSettingsChange(
bool *portWillReset, uint32_t width, uint32_t height,
- bool cropChanged = false, bool fakeStride = false);
+ CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false);
void copyYV12FrameToOutputBuffer(
uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 1cb1859..3d20a79 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -133,8 +133,8 @@
def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
def = &editPortInfo(kOutputPortIndex)->mDef;
- def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
- def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+ def->format.video.nFrameWidth = outputBufferWidth();
+ def->format.video.nFrameHeight = outputBufferHeight();
def->format.video.nStride = def->format.video.nFrameWidth;
def->format.video.nSliceHeight = def->format.video.nFrameHeight;
@@ -150,16 +150,27 @@
}
}
+
+uint32_t SoftVideoDecoderOMXComponent::outputBufferWidth() {
+ return mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+}
+
+uint32_t SoftVideoDecoderOMXComponent::outputBufferHeight() {
+ return mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+}
+
void SoftVideoDecoderOMXComponent::handlePortSettingsChange(
- bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged, bool fakeStride) {
+ bool *portWillReset, uint32_t width, uint32_t height,
+ CropSettingsMode cropSettingsMode, bool fakeStride) {
*portWillReset = false;
bool sizeChanged = (width != mWidth || height != mHeight);
+ bool updateCrop = (cropSettingsMode == kCropUnSet);
+ bool cropChanged = (cropSettingsMode == kCropChanged);
if (sizeChanged || cropChanged) {
mWidth = width;
mHeight = height;
- bool updateCrop = !cropChanged;
if ((sizeChanged && !mIsAdaptive)
|| width > mAdaptiveMaxWidth
|| height > mAdaptiveMaxHeight) {
@@ -199,9 +210,9 @@
void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
size_t srcYStride, size_t srcUStride, size_t srcVStride) {
- size_t dstYStride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+ size_t dstYStride = outputBufferWidth();
size_t dstUVStride = dstYStride / 2;
- size_t dstHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+ size_t dstHeight = outputBufferHeight();
uint8_t *dstStart = dst;
for (size_t i = 0; i < mHeight; ++i) {
@@ -334,6 +345,40 @@
return OMX_ErrorNone;
}
+ case OMX_IndexParamPortDefinition:
+ {
+ OMX_PARAM_PORTDEFINITIONTYPE *newParams =
+ (OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &newParams->format.video;
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(newParams->nPortIndex)->mDef;
+
+ uint32_t oldWidth = def->format.video.nFrameWidth;
+ uint32_t oldHeight = def->format.video.nFrameHeight;
+ uint32_t newWidth = video_def->nFrameWidth;
+ uint32_t newHeight = video_def->nFrameHeight;
+ if (newWidth != oldWidth || newHeight != oldHeight) {
+ bool outputPort = (newParams->nPortIndex == kOutputPortIndex);
+ def->format.video.nFrameWidth =
+ (mIsAdaptive && outputPort) ? mAdaptiveMaxWidth : newWidth;
+ def->format.video.nFrameHeight =
+ (mIsAdaptive && outputPort) ? mAdaptiveMaxHeight : newHeight;
+ def->format.video.nStride = def->format.video.nFrameWidth;
+ def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+ def->nBufferSize =
+ def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
+ if (outputPort) {
+ mWidth = newWidth;
+ mHeight = newHeight;
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = newWidth;
+ mCropHeight = newHeight;
+ }
+ newParams->nBufferSize = def->nBufferSize;
+ }
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index f3dfc59..423a420 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -254,7 +254,9 @@
static void addSDES(int s, const sp<ABuffer> &buffer) {
struct sockaddr_in addr;
socklen_t addrSize = sizeof(addr);
- CHECK_EQ(0, getsockname(s, (sockaddr *)&addr, &addrSize));
+ if (getsockname(s, (sockaddr *)&addr, &addrSize) != 0) {
+ inet_aton("0.0.0.0", &(addr.sin_addr));
+ }
uint8_t *data = buffer->data() + buffer->size();
data[0] = 0x80 | 1;
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 95ac070..d51ee8e 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -112,7 +112,7 @@
STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
};
-const StringToEnum sFlagNameToEnumTable[] = {
+const StringToEnum sOutputFlagNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
@@ -122,6 +122,11 @@
STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
};
+const StringToEnum sInputFlagNameToEnumTable[] = {
+ STRING_TO_ENUM(AUDIO_INPUT_FLAG_FAST),
+ STRING_TO_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+};
+
const StringToEnum sFormatNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
@@ -1292,16 +1297,41 @@
break;
}
+ audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+ bool isSoundTrigger = false;
+ audio_source_t halInputSource = inputSource;
+ if (inputSource == AUDIO_SOURCE_HOTWORD) {
+ ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+ if (index >= 0) {
+ input = mSoundTriggerSessions.valueFor(session);
+ isSoundTrigger = true;
+ flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
+ ALOGV("SoundTrigger capture on session %d input %d", session, input);
+ } else {
+ halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
+ }
+ }
+
sp<IOProfile> profile = getInputProfile(device,
samplingRate,
format,
channelMask,
flags);
if (profile == 0) {
- ALOGW("getInput() could not find profile for device 0x%X, samplingRate %u, format %#x, "
- "channelMask 0x%X, flags %#x",
- device, samplingRate, format, channelMask, flags);
- return AUDIO_IO_HANDLE_NONE;
+ //retry without flags
+ audio_input_flags_t log_flags = flags;
+ flags = AUDIO_INPUT_FLAG_NONE;
+ profile = getInputProfile(device,
+ samplingRate,
+ format,
+ channelMask,
+ flags);
+ if (profile == 0) {
+ ALOGW("getInput() could not find profile for device 0x%X, samplingRate %u, format %#x, "
+ "channelMask 0x%X, flags %#x",
+ device, samplingRate, format, channelMask, log_flags);
+ return AUDIO_IO_HANDLE_NONE;
+ }
}
if (profile->mModule->mHandle == 0) {
@@ -1313,20 +1343,7 @@
config.sample_rate = samplingRate;
config.channel_mask = channelMask;
config.format = format;
- audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- bool isSoundTrigger = false;
- audio_source_t halInputSource = inputSource;
- if (inputSource == AUDIO_SOURCE_HOTWORD) {
- ssize_t index = mSoundTriggerSessions.indexOfKey(session);
- if (index >= 0) {
- input = mSoundTriggerSessions.valueFor(session);
- isSoundTrigger = true;
- ALOGV("SoundTrigger capture on session %d input %d", session, input);
- } else {
- halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
- }
- }
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
&input,
&config,
@@ -5220,7 +5237,7 @@
mStrategyMutedByDevice[i] = false;
}
if (profile != NULL) {
- mFlags = profile->mFlags;
+ mFlags = (audio_output_flags_t)profile->mFlags;
mSamplingRate = profile->pickSamplingRate();
mFormat = profile->pickFormat();
mChannelMask = profile->pickChannelMask();
@@ -5568,6 +5585,8 @@
} else if (strcmp(node->name, DEVICES_TAG) == 0) {
profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
mDeclaredDevices);
+ } else if (strcmp(node->name, FLAGS_TAG) == 0) {
+ profile->mFlags = parseInputFlagNames((char *)node->value);
} else if (strcmp(node->name, GAINS_TAG) == 0) {
profile->loadGains(node);
}
@@ -5613,7 +5632,7 @@
profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
mDeclaredDevices);
} else if (strcmp(node->name, FLAGS_TAG) == 0) {
- profile->mFlags = parseFlagNames((char *)node->value);
+ profile->mFlags = parseOutputFlagNames((char *)node->value);
} else if (strcmp(node->name, GAINS_TAG) == 0) {
profile->loadGains(node);
}
@@ -5728,7 +5747,7 @@
AudioPolicyManager::AudioPort::AudioPort(const String8& name, audio_port_type_t type,
audio_port_role_t role, const sp<HwModule>& module) :
- mName(name), mType(type), mRole(role), mModule(module), mFlags((audio_output_flags_t)0)
+ mName(name), mType(type), mRole(role), mModule(module), mFlags(0)
{
mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) ||
((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK));
@@ -6560,7 +6579,7 @@
uint32_t *updatedSamplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- audio_output_flags_t flags) const
+ uint32_t flags) const
{
const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
@@ -6602,7 +6621,7 @@
// An existing fast stream is compatible with a normal track request.
// An existing normal stream is compatible with a fast track request,
// but the fast request will be denied by AudioFlinger and converted to normal track.
- if (isRecordThread && (((audio_input_flags_t) mFlags ^ (audio_input_flags_t) flags) &
+ if (isRecordThread && ((mFlags ^ flags) &
~AUDIO_INPUT_FLAG_FAST)) {
return false;
}
@@ -6958,7 +6977,7 @@
// --- audio_policy.conf file parsing
-audio_output_flags_t AudioPolicyManager::parseFlagNames(char *name)
+uint32_t AudioPolicyManager::parseOutputFlagNames(char *name)
{
uint32_t flag = 0;
@@ -6967,8 +6986,8 @@
char *flagName = strtok(name, "|");
while (flagName != NULL) {
if (strlen(flagName) != 0) {
- flag |= stringToEnum(sFlagNameToEnumTable,
- ARRAY_SIZE(sFlagNameToEnumTable),
+ flag |= stringToEnum(sOutputFlagNameToEnumTable,
+ ARRAY_SIZE(sOutputFlagNameToEnumTable),
flagName);
}
flagName = strtok(NULL, "|");
@@ -6980,7 +6999,25 @@
flag |= AUDIO_OUTPUT_FLAG_DIRECT;
}
- return (audio_output_flags_t)flag;
+ return flag;
+}
+
+uint32_t AudioPolicyManager::parseInputFlagNames(char *name)
+{
+ uint32_t flag = 0;
+
+ // it is OK to cast name to non const here as we are not going to use it after
+ // strtok() modifies it
+ char *flagName = strtok(name, "|");
+ while (flagName != NULL) {
+ if (strlen(flagName) != 0) {
+ flag |= stringToEnum(sInputFlagNameToEnumTable,
+ ARRAY_SIZE(sInputFlagNameToEnumTable),
+ flagName);
+ }
+ flagName = strtok(NULL, "|");
+ }
+ return flag;
}
audio_devices_t AudioPolicyManager::parseDeviceNames(char *name)
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index da0d95d..0ea7b97 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -283,8 +283,8 @@
Vector <audio_format_t> mFormats; // supported audio formats
Vector < sp<AudioGain> > mGains; // gain controllers
sp<HwModule> mModule; // audio HW module exposing this I/O stream
- audio_output_flags_t mFlags; // attribute flags (e.g primary output,
- // direct output...). For outputs only.
+ uint32_t mFlags; // attribute flags (e.g primary output,
+ // direct output...).
};
class AudioPortConfig: public virtual RefBase
@@ -387,7 +387,7 @@
uint32_t *updatedSamplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- audio_output_flags_t flags) const;
+ uint32_t flags) const;
void dump(int fd);
void log();
@@ -754,7 +754,8 @@
size_t size,
uint32_t value);
static bool stringToBool(const char *value);
- static audio_output_flags_t parseFlagNames(char *name);
+ static uint32_t parseOutputFlagNames(char *name);
+ static uint32_t parseInputFlagNames(char *name);
static audio_devices_t parseDeviceNames(char *name);
void loadHwModule(cnode *root);
void loadHwModules(cnode *root);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index fe2f299..48ec730 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1217,6 +1217,8 @@
{
SharedParameters::Lock l(mParameters);
if (l.mParameters.state < Parameters::PREVIEW) {
+ ALOGE("%s: Camera %d: Call autoFocus when preview is inactive (state = %d).",
+ __FUNCTION__, mCameraId, l.mParameters.state);
return INVALID_OPERATION;
}