Merge "camera2: fix an error reading a NULL string" into lmp-mr1-dev
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index f3b7fbb..1614525 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -90,7 +90,7 @@
static void setErrorCallback(audio_error_callback cb);
// helper function to obtain AudioFlinger service handle
- static const sp<IAudioFlinger>& get_audio_flinger();
+ static const sp<IAudioFlinger> get_audio_flinger();
static float linearToLog(int volume);
static int logToLinear(float volume);
@@ -270,7 +270,7 @@
// and output configuration cache (gOutputs)
static void clearAudioConfigCache();
- static const sp<IAudioPolicyService>& get_audio_policy_service();
+ static const sp<IAudioPolicyService> get_audio_policy_service();
// helpers for android.media.AudioManager.getProperty(), see description there for meaning
static uint32_t getPrimaryOutputSamplingRate();
@@ -373,10 +373,11 @@
friend class AudioFlingerClient;
friend class AudioPolicyServiceClient;
- static Mutex gLock; // protects all members except gAudioPolicyService,
- // gAudioPolicyServiceClient, and gAudioPortCallback
- static Mutex gLockAPS; // protects gAudioPolicyService and gAudioPolicyServiceClient
- static Mutex gLockAPC; // protects gAudioPortCallback
+ static Mutex gLock; // protects gAudioFlinger and gAudioErrorCallback,
+ static Mutex gLockCache; // protects gOutputs, gPrevInSamplingRate, gPrevInFormat,
+ // gPrevInChannelMask and gInBuffSize
+ static Mutex gLockAPS; // protects gAudioPolicyService and gAudioPolicyServiceClient
+ static Mutex gLockAPC; // protects gAudioPortCallback
static sp<IAudioFlinger> gAudioFlinger;
static audio_error_callback gAudioErrorCallback;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index fa1b20a..31dff36 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -450,7 +450,14 @@
ssize_t pollPosition(); // poll for state queue update, and return current position
StaticAudioTrackSingleStateQueue::Observer mObserver;
size_t mPosition; // server's current play position in frames, relative to 0
- size_t mEnd; // cached value computed from mState, safe for asynchronous read
+
+ size_t mFramesReadySafe; // Assuming size_t read/writes are atomic on 32 / 64 bit
+ // processors, this is a thread-safe version of
+ // mFramesReady.
+ int64_t mFramesReady; // The number of frames ready in the static buffer
+ // including loops. This is 64 bits since loop mode
+ // can cause a track to appear to have a large number
+ // of frames. INT64_MAX means an infinite loop.
bool mFramesReadyIsCalledByMultipleThreads;
StaticAudioTrackState mState;
};
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index fce4389..1f8e9b6 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -32,6 +32,7 @@
// client singleton for AudioFlinger binder interface
Mutex AudioSystem::gLock;
+Mutex AudioSystem::gLockCache;
Mutex AudioSystem::gLockAPS;
Mutex AudioSystem::gLockAPC;
sp<IAudioFlinger> AudioSystem::gAudioFlinger;
@@ -50,33 +51,40 @@
sp<AudioSystem::AudioPortCallback> AudioSystem::gAudioPortCallback;
// establish binder interface to AudioFlinger service
-const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
+const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
{
- Mutex::Autolock _l(gLock);
- if (gAudioFlinger == 0) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder;
- do {
- binder = sm->getService(String16("media.audio_flinger"));
- if (binder != 0)
- break;
- ALOGW("AudioFlinger not published, waiting...");
- usleep(500000); // 0.5 s
- } while (true);
- if (gAudioFlingerClient == NULL) {
- gAudioFlingerClient = new AudioFlingerClient();
- } else {
- if (gAudioErrorCallback) {
- gAudioErrorCallback(NO_ERROR);
+ sp<IAudioFlinger> af;
+ sp<AudioFlingerClient> afc;
+ {
+ Mutex::Autolock _l(gLock);
+ if (gAudioFlinger == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if (binder != 0)
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while (true);
+ if (gAudioFlingerClient == NULL) {
+ gAudioFlingerClient = new AudioFlingerClient();
+ } else {
+ if (gAudioErrorCallback) {
+ gAudioErrorCallback(NO_ERROR);
+ }
}
+ binder->linkToDeath(gAudioFlingerClient);
+ gAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
+ afc = gAudioFlingerClient;
}
- binder->linkToDeath(gAudioFlingerClient);
- gAudioFlinger = interface_cast<IAudioFlinger>(binder);
- LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
- gAudioFlinger->registerClient(gAudioFlingerClient);
+ af = gAudioFlinger;
}
-
- return gAudioFlinger;
+ if (afc != 0) {
+ af->registerClient(afc);
+ }
+ return af;
}
/* static */ status_t AudioSystem::checkAudioFlinger()
@@ -250,20 +258,20 @@
status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
uint32_t* samplingRate)
{
- OutputDescriptor *outputDesc;
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
- gLock.lock();
- outputDesc = AudioSystem::gOutputs.valueFor(output);
+ Mutex::Autolock _l(gLockCache);
+
+ OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
- gLock.unlock();
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
+ gLockCache.unlock();
*samplingRate = af->sampleRate(output);
+ gLockCache.lock();
} else {
ALOGV("getOutputSamplingRate() reading from output desc");
*samplingRate = outputDesc->samplingRate;
- gLock.unlock();
}
if (*samplingRate == 0) {
ALOGE("AudioSystem::getSamplingRate failed for output %d", output);
@@ -294,18 +302,18 @@
status_t AudioSystem::getFrameCount(audio_io_handle_t output,
size_t* frameCount)
{
- OutputDescriptor *outputDesc;
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
- gLock.lock();
- outputDesc = AudioSystem::gOutputs.valueFor(output);
+ Mutex::Autolock _l(gLockCache);
+
+ OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
- gLock.unlock();
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
+ gLockCache.unlock();
*frameCount = af->frameCount(output);
+ gLockCache.lock();
} else {
*frameCount = outputDesc->frameCount;
- gLock.unlock();
}
if (*frameCount == 0) {
ALOGE("AudioSystem::getFrameCount failed for output %d", output);
@@ -336,18 +344,18 @@
status_t AudioSystem::getLatency(audio_io_handle_t output,
uint32_t* latency)
{
- OutputDescriptor *outputDesc;
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
- gLock.lock();
- outputDesc = AudioSystem::gOutputs.valueFor(output);
+ Mutex::Autolock _l(gLockCache);
+
+ OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
- gLock.unlock();
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) return PERMISSION_DENIED;
+ gLockCache.unlock();
*latency = af->latency(output);
+ gLockCache.lock();
} else {
*latency = outputDesc->latency;
- gLock.unlock();
}
ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -358,24 +366,24 @@
status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask, size_t* buffSize)
{
- gLock.lock();
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ return PERMISSION_DENIED;
+ }
+ Mutex::Autolock _l(gLockCache);
// Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
size_t inBuffSize = gInBuffSize;
if ((inBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
|| (channelMask != gPrevInChannelMask)) {
- gLock.unlock();
- const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
+ gLockCache.unlock();
inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
+ gLockCache.lock();
if (inBuffSize == 0) {
ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
sampleRate, format, channelMask);
return BAD_VALUE;
}
// A benign race is possible here: we could overwrite a fresher cache entry
- gLock.lock();
// save the request params
gPrevInSamplingRate = sampleRate;
gPrevInFormat = format;
@@ -383,7 +391,6 @@
gInBuffSize = inBuffSize;
}
- gLock.unlock();
*buffSize = inBuffSize;
return NO_ERROR;
@@ -450,14 +457,21 @@
void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
{
- Mutex::Autolock _l(AudioSystem::gLock);
+ audio_error_callback cb = NULL;
+ {
+ Mutex::Autolock _l(AudioSystem::gLock);
+ AudioSystem::gAudioFlinger.clear();
+ cb = gAudioErrorCallback;
+ }
- AudioSystem::gAudioFlinger.clear();
- // clear output handles and stream to output map caches
- AudioSystem::gOutputs.clear();
+ {
+ // clear output handles and stream to output map caches
+ Mutex::Autolock _l(gLockCache);
+ AudioSystem::gOutputs.clear();
+ }
- if (gAudioErrorCallback) {
- gAudioErrorCallback(DEAD_OBJECT);
+ if (cb) {
+ cb(DEAD_OBJECT);
}
ALOGW("AudioFlinger server died!");
}
@@ -470,7 +484,7 @@
if (ioHandle == AUDIO_IO_HANDLE_NONE) return;
- Mutex::Autolock _l(AudioSystem::gLock);
+ Mutex::Autolock _l(AudioSystem::gLockCache);
switch (event) {
case STREAM_CONFIG_CHANGED:
@@ -539,29 +553,37 @@
// establish binder interface to AudioPolicy service
-const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
+const sp<IAudioPolicyService> AudioSystem::get_audio_policy_service()
{
- Mutex::Autolock _l(gLockAPS);
- if (gAudioPolicyService == 0) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder;
- do {
- binder = sm->getService(String16("media.audio_policy"));
- if (binder != 0)
- break;
- ALOGW("AudioPolicyService not published, waiting...");
- usleep(500000); // 0.5 s
- } while (true);
- if (gAudioPolicyServiceClient == NULL) {
- gAudioPolicyServiceClient = new AudioPolicyServiceClient();
+ sp<IAudioPolicyService> ap;
+ sp<AudioPolicyServiceClient> apc;
+ {
+ Mutex::Autolock _l(gLockAPS);
+ if (gAudioPolicyService == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_policy"));
+ if (binder != 0)
+ break;
+ ALOGW("AudioPolicyService not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while (true);
+ if (gAudioPolicyServiceClient == NULL) {
+ gAudioPolicyServiceClient = new AudioPolicyServiceClient();
+ }
+ binder->linkToDeath(gAudioPolicyServiceClient);
+ gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
+ LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
+ apc = gAudioPolicyServiceClient;
}
- binder->linkToDeath(gAudioPolicyServiceClient);
- gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
- LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
- gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
+ ap = gAudioPolicyService;
+ }
+ if (apc != 0) {
+ ap->registerClient(apc);
}
- return gAudioPolicyService;
+ return ap;
}
// ---------------------------------------------------------------------------
@@ -829,8 +851,11 @@
// called by restoreTrack_l(), which needs new IAudioFlinger and IAudioPolicyService instances
ALOGV("clearAudioConfigCache()");
{
- Mutex::Autolock _l(gLock);
+ Mutex::Autolock _l(gLockCache);
gOutputs.clear();
+ }
+ {
+ Mutex::Autolock _l(gLock);
gAudioFlinger.clear();
}
{
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 2f57b9d..c11050e 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2149,6 +2149,11 @@
mStreamType = AUDIO_STREAM_ALARM;
break;
}
+ audio_mode_t phoneState = AudioSystem::getPhoneState();
+ if (phoneState == AUDIO_MODE_IN_CALL || phoneState == AUDIO_MODE_IN_COMMUNICATION) {
+ mStreamType = AUDIO_STREAM_VOICE_CALL;
+ break;
+ }
} /// FALL THROUGH
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 561cb24..62362da 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -25,6 +25,12 @@
namespace android {
+// used to clamp a value to size_t. TODO: move to another file.
+template <typename T>
+size_t clampToSize(T x) {
+ return x > SIZE_MAX ? SIZE_MAX : x < 0 ? 0 : (size_t) x;
+}
+
audio_track_cblk_t::audio_track_cblk_t()
: mServer(0), mFutex(0), mMinimum(0),
mVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY), mSampleRate(0), mSendLevel(0), mFlags(0)
@@ -728,7 +734,8 @@
size_t frameCount, size_t frameSize)
: AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),
- mEnd(frameCount), mFramesReadyIsCalledByMultipleThreads(false)
+ mFramesReadySafe(frameCount), mFramesReady(frameCount),
+ mFramesReadyIsCalledByMultipleThreads(false)
{
mState.mLoopStart = 0;
mState.mLoopEnd = 0;
@@ -742,20 +749,11 @@
size_t StaticAudioTrackServerProxy::framesReady()
{
- // FIXME
- // This is racy if called by normal mixer thread,
- // as we're reading 2 independent variables without a lock.
- // Can't call mObserver.poll(), as we might be called from wrong thread.
- // If looping is enabled, should return a higher number (since includes non-contiguous).
- size_t position = mPosition;
+ // Can't call pollPosition() from multiple threads.
if (!mFramesReadyIsCalledByMultipleThreads) {
- ssize_t positionOrStatus = pollPosition();
- if (positionOrStatus >= 0) {
- position = (size_t) positionOrStatus;
- }
+ (void) pollPosition();
}
- size_t end = mEnd;
- return position < end ? end - position : 0;
+ return mFramesReadySafe;
}
ssize_t StaticAudioTrackServerProxy::pollPosition()
@@ -772,25 +770,35 @@
}
// ignore loopEnd
mPosition = position = loopStart;
- mEnd = mFrameCount;
+ mFramesReady = mFrameCount - mPosition;
mState.mLoopCount = 0;
valid = true;
- } else {
+ } else if (state.mLoopCount >= -1) {
if (loopStart < loopEnd && loopEnd <= mFrameCount &&
loopEnd - loopStart >= MIN_LOOP) {
if (!(loopStart <= position && position < loopEnd)) {
mPosition = position = loopStart;
}
- mEnd = loopEnd;
+ if (state.mLoopCount == -1) {
+ mFramesReady = INT64_MAX;
+ } else {
+ // mFramesReady is 64 bits to handle the effective number of frames
+ // that the static audio track contains, including loops.
+ // TODO: Later consider fixing overflow, but does not seem needed now
+ // as will not overflow if loopStart and loopEnd are Java "ints".
+ mFramesReady = int64_t(state.mLoopCount) * (loopEnd - loopStart)
+ + mFrameCount - mPosition;
+ }
mState = state;
valid = true;
}
}
- if (!valid) {
+ if (!valid || mPosition > mFrameCount) {
ALOGE("%s client pushed an invalid state, shutting down", __func__);
mIsShutdown = true;
return (ssize_t) NO_INIT;
}
+ mFramesReadySafe = clampToSize(mFramesReady);
// This may overflow, but client is not supposed to rely on it
mCblk->u.mStatic.mBufferPosition = (uint32_t) position;
}
@@ -815,9 +823,10 @@
return (status_t) positionOrStatus;
}
size_t position = (size_t) positionOrStatus;
+ size_t end = mState.mLoopCount != 0 ? mState.mLoopEnd : mFrameCount;
size_t avail;
- if (position < mEnd) {
- avail = mEnd - position;
+ if (position < end) {
+ avail = end - position;
size_t wanted = buffer->mFrameCount;
if (avail < wanted) {
buffer->mFrameCount = avail;
@@ -830,7 +839,10 @@
buffer->mFrameCount = 0;
buffer->mRaw = NULL;
}
- buffer->mNonContig = 0; // FIXME should be > 0 for looping
+ // As mFramesReady is the total remaining frames in the static audio track,
+ // it is always larger or equal to avail.
+ LOG_ALWAYS_FATAL_IF(mFramesReady < avail);
+ buffer->mNonContig = mFramesReady == INT64_MAX ? SIZE_MAX : clampToSize(mFramesReady - avail);
mUnreleased = avail;
return NO_ERROR;
}
@@ -838,6 +850,7 @@
void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
{
size_t stepCount = buffer->mFrameCount;
+ LOG_ALWAYS_FATAL_IF(!(stepCount <= mFramesReady));
LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
if (stepCount == 0) {
// prevent accidental re-use of buffer
@@ -854,11 +867,10 @@
ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount);
newPosition = mFrameCount;
} else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
+ newPosition = mState.mLoopStart;
if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {
- newPosition = mState.mLoopStart;
setFlags = CBLK_LOOP_CYCLE;
} else {
- mEnd = mFrameCount; // this is what allows playback to continue after the loop
setFlags = CBLK_LOOP_FINAL;
}
}
@@ -866,6 +878,10 @@
setFlags |= CBLK_BUFFER_END;
}
mPosition = newPosition;
+ if (mFramesReady != INT64_MAX) {
+ mFramesReady -= stepCount;
+ }
+ mFramesReadySafe = clampToSize(mFramesReady);
cblk->mServer += stepCount;
// This may overflow, but client is not supposed to rely on it
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index 2a8b2c6..81dad41 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -674,10 +674,14 @@
void Drm::binderDied(const wp<IBinder> &the_late_who)
{
+ mEventLock.lock();
+ mListener.clear();
+ mEventLock.unlock();
+
+ Mutex::Autolock autoLock(mLock);
delete mPlugin;
mPlugin = NULL;
closeFactory();
- mListener.clear();
}
} // namespace android
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index c120898..d461af3 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2125,6 +2125,7 @@
// immutable with respect to future writes.
//
// It is thus safe for another thread to read the AudioCache.
+ Mutex::Autolock lock(mLock);
mCommandComplete = true;
mSignal.signal();
}
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index d446cec..e7a26b6 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -56,6 +56,7 @@
mIsWidevine(false),
mUIDValid(uidValid),
mUID(uid),
+ mFd(-1),
mDrmManagerClient(NULL),
mMetaDataSize(-1ll),
mBitrate(-1ll),
@@ -70,7 +71,10 @@
mHttpSource.clear();
mUri.clear();
mUriHeaders.clear();
- mFd = -1;
+ if (mFd >= 0) {
+ close(mFd);
+ mFd = -1;
+ }
mOffset = 0;
mLength = 0;
setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
@@ -270,6 +274,7 @@
mLooper->unregisterHandler(id());
mLooper->stop();
}
+ resetDataSource();
}
void NuPlayer::GenericSource::prepareAsync() {
@@ -312,6 +317,7 @@
mIsWidevine = false;
mDataSource = new FileSource(mFd, mOffset, mLength);
+ mFd = -1;
}
if (mDataSource == NULL) {
@@ -999,11 +1005,12 @@
return -1;
}
-status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
+status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
msg->setInt32("trackIndex", trackIndex);
msg->setInt32("select", select);
+ msg->setInt64("timeUs", timeUs);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -1016,11 +1023,13 @@
void NuPlayer::GenericSource::onSelectTrack(sp<AMessage> msg) {
int32_t trackIndex, select;
+ int64_t timeUs;
CHECK(msg->findInt32("trackIndex", &trackIndex));
CHECK(msg->findInt32("select", &select));
+ CHECK(msg->findInt64("timeUs", &timeUs));
sp<AMessage> response = new AMessage;
- status_t err = doSelectTrack(trackIndex, select);
+ status_t err = doSelectTrack(trackIndex, select, timeUs);
response->setInt32("err", err);
uint32_t replyID;
@@ -1028,7 +1037,7 @@
response->postReply(replyID);
}
-status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select) {
+status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select, int64_t timeUs) {
if (trackIndex >= mSources.size()) {
return BAD_INDEX;
}
@@ -1081,6 +1090,23 @@
mFetchTimedTextDataGeneration++;
}
+ status_t eosResult; // ignored
+ if (mSubtitleTrack.mSource != NULL
+ && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchSubtitleDataGeneration);
+ msg->post();
+ }
+
+ if (mTimedTextTrack.mSource != NULL
+ && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, id());
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchTimedTextDataGeneration);
+ msg->post();
+ }
+
return OK;
} else if (!strncasecmp(mime, "audio/", 6) || !strncasecmp(mime, "video/", 6)) {
bool audio = !strncasecmp(mime, "audio/", 6);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 7a03df0..f2528a9 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -67,7 +67,7 @@
virtual size_t getTrackCount() const;
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual ssize_t getSelectedTrack(media_track_type type) const;
- virtual status_t selectTrack(size_t trackIndex, bool select);
+ virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
virtual status_t seekTo(int64_t seekTimeUs);
virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
@@ -164,7 +164,7 @@
ssize_t doGetSelectedTrack(media_track_type type) const;
void onSelectTrack(sp<AMessage> msg);
- status_t doSelectTrack(size_t trackIndex, bool select);
+ status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
void onSeek(sp<AMessage> msg);
status_t doSeek(int64_t seekTimeUs);
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 02e9caf..a26ef9e 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -139,7 +139,15 @@
return mLiveSession->getTrackInfo(trackIndex);
}
-status_t NuPlayer::HTTPLiveSource::selectTrack(size_t trackIndex, bool select) {
+ssize_t NuPlayer::HTTPLiveSource::getSelectedTrack(media_track_type type) const {
+ if (mLiveSession == NULL) {
+ return -1;
+ } else {
+ return mLiveSession->getSelectedTrack(type);
+ }
+}
+
+status_t NuPlayer::HTTPLiveSource::selectTrack(size_t trackIndex, bool select, int64_t /*timeUs*/) {
status_t err = mLiveSession->selectTrack(trackIndex, select);
if (err == OK) {
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 6b5f6af..bbb8981 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -42,7 +42,8 @@
virtual status_t getDuration(int64_t *durationUs);
virtual size_t getTrackCount() const;
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
- virtual status_t selectTrack(size_t trackIndex, bool select);
+ virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
+ virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
virtual status_t seekTo(int64_t seekTimeUs);
protected:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 47bd989..c01f16a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -33,6 +33,8 @@
#include "ATSParser.h"
+#include <cutils/properties.h>
+
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -165,8 +167,6 @@
mTimeDiscontinuityPending(false),
mFlushingAudio(NONE),
mFlushingVideo(NONE),
- mSkipRenderingAudioUntilMediaTimeUs(-1ll),
- mSkipRenderingVideoUntilMediaTimeUs(-1ll),
mNumFramesTotal(0ll),
mNumFramesDropped(0ll),
mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
@@ -455,8 +455,10 @@
size_t trackIndex;
int32_t select;
+ int64_t timeUs;
CHECK(msg->findSize("trackIndex", &trackIndex));
CHECK(msg->findInt32("select", &select));
+ CHECK(msg->findInt64("timeUs", &timeUs));
status_t err = INVALID_OPERATION;
@@ -470,7 +472,7 @@
}
if (trackIndex < inbandTracks) {
- err = mSource->selectTrack(trackIndex, select);
+ err = mSource->selectTrack(trackIndex, select, timeUs);
if (!select && err == OK) {
int32_t type;
@@ -606,8 +608,17 @@
instantiateDecoder(false, &mVideoDecoder);
}
- if (mAudioSink != NULL) {
- if (mOffloadAudio) {
+ // Don't try to re-open audio sink if there's an existing decoder.
+ if (mAudioSink != NULL && mAudioDecoder == NULL) {
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+ audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
+ bool canOffload = canOffloadStream(audioMeta, (videoFormat != NULL),
+ true /* is_streaming */, streamType);
+ if (canOffload) {
+ if (!mOffloadAudio) {
+ mRenderer->signalEnableOffloadAudio();
+ }
// open audio sink early under offload mode.
sp<AMessage> format = mSource->getFormat(true /*audio*/);
openAudioSink(format, true /*offloadOnly*/);
@@ -701,19 +712,14 @@
handleFlushComplete(audio, true /* isDecoder */);
finishFlushIfPossible();
- } else if (what == Decoder::kWhatOutputFormatChanged) {
+ } else if (what == Decoder::kWhatVideoSizeChanged) {
sp<AMessage> format;
CHECK(msg->findMessage("format", &format));
- if (audio) {
- openAudioSink(format, false /*offloadOnly*/);
- } else {
- // video
- sp<AMessage> inputFormat =
- mSource->getFormat(false /* audio */);
+ sp<AMessage> inputFormat =
+ mSource->getFormat(false /* audio */);
- updateVideoSize(inputFormat, format);
- }
+ updateVideoSize(inputFormat, format);
} else if (what == Decoder::kWhatShutdownCompleted) {
ALOGV("%s shutdown completed", audio ? "audio" : "video");
if (audio) {
@@ -779,7 +785,7 @@
break; // Finish anyways.
}
notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
- } else if (what == Decoder::kWhatDrainThisBuffer) {
+ } else if (what == Decoder::kWhatRenderBufferTime) {
renderBuffer(audio, msg);
} else {
ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
@@ -846,7 +852,7 @@
ALOGV("media rendering started");
notifyListener(MEDIA_STARTED, 0, 0);
} else if (what == Renderer::kWhatAudioOffloadTearDown) {
- ALOGV("Tear down audio offload, fall back to s/w path");
+ ALOGV("Tear down audio offload, fall back to s/w path if due to error.");
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
int32_t reason;
@@ -858,11 +864,11 @@
if (mVideoDecoder != NULL) {
mRenderer->flush(false /* audio */);
}
- mRenderer->signalDisableOffloadAudio();
- mOffloadAudio = false;
performSeek(positionUs, false /* needNotify */);
if (reason == Renderer::kDueToError) {
+ mRenderer->signalDisableOffloadAudio();
+ mOffloadAudio = false;
instantiateDecoder(true /* audio */, &mAudioDecoder);
}
}
@@ -967,8 +973,6 @@
mOffloadAudio = false;
mAudioEOS = false;
mVideoEOS = false;
- mSkipRenderingAudioUntilMediaTimeUs = -1;
- mSkipRenderingVideoUntilMediaTimeUs = -1;
mNumFramesTotal = 0;
mNumFramesDropped = 0;
mStarted = true;
@@ -1024,6 +1028,13 @@
mRenderer->setVideoFrameRate(rate);
}
+ if (mVideoDecoder != NULL) {
+ mVideoDecoder->setRenderer(mRenderer);
+ }
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->setRenderer(mRenderer);
+ }
+
postScanSources();
}
@@ -1182,16 +1193,27 @@
notify->setInt32("generation", mAudioDecoderGeneration);
if (mOffloadAudio) {
- *decoder = new DecoderPassThrough(notify);
+ *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
} else {
- *decoder = new Decoder(notify);
+ *decoder = new Decoder(notify, mSource, mRenderer);
}
} else {
sp<AMessage> notify = new AMessage(kWhatVideoNotify, id());
++mVideoDecoderGeneration;
notify->setInt32("generation", mVideoDecoderGeneration);
- *decoder = new Decoder(notify, mNativeWindow);
+ *decoder = new Decoder(notify, mSource, mRenderer, mNativeWindow);
+
+ // enable FRC if high-quality AV sync is requested, even if not
+ // queuing to native window, as this will even improve textureview
+ // playback.
+ {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("persist.sys.media.avsync", value, NULL) &&
+ (!strcmp("1", value) || !strcasecmp("true", value))) {
+ format->setInt32("auto-frc", 1);
+ }
+ }
}
(*decoder)->init();
(*decoder)->configure(format);
@@ -1280,33 +1302,6 @@
ALOGI("%s discontinuity (formatChange=%d, time=%d)",
audio ? "audio" : "video", formatChange, timeChange);
- if (audio) {
- mSkipRenderingAudioUntilMediaTimeUs = -1;
- } else {
- mSkipRenderingVideoUntilMediaTimeUs = -1;
- }
-
- if (timeChange) {
- sp<AMessage> extra;
- if (accessUnit->meta()->findMessage("extra", &extra)
- && extra != NULL) {
- int64_t resumeAtMediaTimeUs;
- if (extra->findInt64(
- "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
- ALOGI("suppressing rendering of %s until %lld us",
- audio ? "audio" : "video", resumeAtMediaTimeUs);
-
- if (audio) {
- mSkipRenderingAudioUntilMediaTimeUs =
- resumeAtMediaTimeUs;
- } else {
- mSkipRenderingVideoUntilMediaTimeUs =
- resumeAtMediaTimeUs;
- }
- }
- }
- }
-
mTimeDiscontinuityPending =
mTimeDiscontinuityPending || timeChange;
@@ -1350,6 +1345,12 @@
// This stream is unaffected by the discontinuity
return -EWOULDBLOCK;
}
+ } else if (err == ERROR_END_OF_STREAM
+ && doBufferAggregation && (mAggregateBuffer != NULL)) {
+ // send out the last bit of aggregated data
+ reply->setBuffer("buffer", mAggregateBuffer);
+ mAggregateBuffer.clear();
+ err = OK;
}
reply->setInt32("err", err);
@@ -1447,9 +1448,6 @@
void NuPlayer::renderBuffer(bool audio, const sp<AMessage> &msg) {
// ALOGV("renderBuffer %s", audio ? "audio" : "video");
- sp<AMessage> reply;
- CHECK(msg->findMessage("reply", &reply));
-
if ((audio && mFlushingAudio != NONE)
|| (!audio && mFlushingVideo != NONE)) {
// We're currently attempting to flush the decoder, in order
@@ -1460,40 +1458,15 @@
ALOGV("we're still flushing the %s decoder, sending its output buffer"
" right back.", audio ? "audio" : "video");
- reply->post();
return;
}
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
-
int64_t mediaTimeUs;
- CHECK(buffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
- int64_t &skipUntilMediaTimeUs =
- audio
- ? mSkipRenderingAudioUntilMediaTimeUs
- : mSkipRenderingVideoUntilMediaTimeUs;
-
- if (skipUntilMediaTimeUs >= 0) {
-
- if (mediaTimeUs < skipUntilMediaTimeUs) {
- ALOGV("dropping %s buffer at time %lld as requested.",
- audio ? "audio" : "video",
- mediaTimeUs);
-
- reply->post();
- return;
- }
-
- skipUntilMediaTimeUs = -1;
- }
+ CHECK(msg->findInt64("timeUs", &mediaTimeUs));
if (!audio && mCCDecoder->isSelected()) {
mCCDecoder->display(mediaTimeUs);
}
-
- mRenderer->queueBuffer(audio, buffer, reply);
}
void NuPlayer::updateVideoSize(
@@ -1593,7 +1566,6 @@
mScanSourcesPending = false;
decoder->signalFlush(newFormat);
- mRenderer->flush(audio);
FlushStatus newStatus =
needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER;
@@ -1682,10 +1654,11 @@
return err;
}
-status_t NuPlayer::selectTrack(size_t trackIndex, bool select) {
+status_t NuPlayer::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
msg->setSize("trackIndex", trackIndex);
msg->setInt32("select", select);
+ msg->setInt64("timeUs", timeUs);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 121f7dd..901cfbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -65,7 +65,7 @@
status_t setVideoScalingMode(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
status_t getSelectedTrack(int32_t type, Parcel* reply) const;
- status_t selectTrack(size_t trackIndex, bool select);
+ status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
status_t getCurrentPosition(int64_t *mediaUs);
void getStats(int64_t *mNumFramesTotal, int64_t *mNumFramesDropped);
@@ -181,9 +181,6 @@
FlushStatus mFlushingAudio;
FlushStatus mFlushingVideo;
- int64_t mSkipRenderingAudioUntilMediaTimeUs;
- int64_t mSkipRenderingVideoUntilMediaTimeUs;
-
int64_t mNumFramesTotal, mNumFramesDropped;
int32_t mVideoScalingMode;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 27f6131..e695c43 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -21,6 +21,9 @@
#include "NuPlayerDecoder.h"
+#include "NuPlayerRenderer.h"
+#include "NuPlayerSource.h"
+
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -35,9 +38,14 @@
NuPlayer::Decoder::Decoder(
const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer,
const sp<NativeWindowWrapper> &nativeWindow)
: mNotify(notify),
mNativeWindow(nativeWindow),
+ mSource(source),
+ mRenderer(renderer),
+ mSkipRenderingUntilMediaTimeUs(-1ll),
mBufferGeneration(0),
mPaused(true),
mComponentName("decoder") {
@@ -169,7 +177,9 @@
mInputBuffers.size(),
mOutputBuffers.size());
- requestCodecNotification();
+ if (mRenderer != NULL) {
+ requestCodecNotification();
+ }
mPaused = false;
}
@@ -191,6 +201,7 @@
}
mPendingInputMessages.clear();
+ mSkipRenderingUntilMediaTimeUs = -1;
}
void NuPlayer::Decoder::requestCodecNotification() {
@@ -217,6 +228,12 @@
msg->post();
}
+void NuPlayer::Decoder::setRenderer(const sp<Renderer> &renderer) {
+ sp<AMessage> msg = new AMessage(kWhatSetRenderer, id());
+ msg->setObject("renderer", renderer);
+ msg->post();
+}
+
void NuPlayer::Decoder::signalUpdateFormat(const sp<AMessage> &format) {
sp<AMessage> msg = new AMessage(kWhatUpdateFormat, id());
msg->setMessage("format", format);
@@ -342,8 +359,6 @@
}
}
-
-
if (buffer == NULL /* includes !hasBuffer */) {
int32_t streamErr = ERROR_END_OF_STREAM;
CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
@@ -375,6 +390,17 @@
handleError(streamErr);
}
} else {
+ sp<AMessage> extra;
+ if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
+ int64_t resumeAtMediaTimeUs;
+ if (extra->findInt64(
+ "resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
+ ALOGI("[%s] suppressing rendering until %lld us",
+ mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
+ mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
+ }
+ }
+
int64_t timeUs = 0;
uint32_t flags = 0;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
@@ -454,10 +480,27 @@
return false;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
- notify->setMessage("format", format);
- notify->post();
+ if (isVideo()) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatVideoSizeChanged);
+ notify->setMessage("format", format);
+ notify->post();
+ } else if (mRenderer != NULL) {
+ uint32_t flags;
+ int64_t durationUs;
+ bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+ if (!hasVideo &&
+ mSource->getDuration(&durationUs) == OK &&
+ durationUs
+ > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ } else {
+ flags = AUDIO_OUTPUT_FLAG_NONE;
+ }
+
+ mRenderer->openAudioSink(
+ format, false /* offloadOnly */, hasVideo, flags);
+ }
return true;
} else if (res == INFO_DISCONTINUITY) {
// nothing to do
@@ -485,21 +528,26 @@
reply->setSize("buffer-ix", bufferIx);
reply->setInt32("generation", mBufferGeneration);
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatDrainThisBuffer);
- notify->setBuffer("buffer", buffer);
- notify->setMessage("reply", reply);
- notify->post();
+ if (mSkipRenderingUntilMediaTimeUs >= 0) {
+ if (timeUs < mSkipRenderingUntilMediaTimeUs) {
+ ALOGV("[%s] dropping buffer at time %lld as requested.",
+ mComponentName.c_str(), (long long)timeUs);
- // FIXME: This should be handled after rendering is complete,
- // but Renderer needs it now
- if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- ALOGV("queueing eos [%s]", mComponentName.c_str());
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatEOS);
- notify->setInt32("err", ERROR_END_OF_STREAM);
- notify->post();
+ reply->post();
+ return true;
+ }
+
+ mSkipRenderingUntilMediaTimeUs = -1;
}
+
+ if (mRenderer != NULL) {
+ // send the buffer to renderer.
+ mRenderer->queueBuffer(!isVideo(), buffer, reply);
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ mRenderer->queueEOS(!isVideo(), ERROR_END_OF_STREAM);
+ }
+ }
+
return true;
}
@@ -508,6 +556,17 @@
int32_t render;
size_t bufferIx;
CHECK(msg->findSize("buffer-ix", &bufferIx));
+
+ if (isVideo()) {
+ int64_t timeUs;
+ sp<ABuffer> buffer = mOutputBuffers[bufferIx];
+ buffer->meta()->findInt64("timeUs", &timeUs);
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatRenderBufferTime);
+ notify->setInt64("timeUs", timeUs);
+ notify->post();
+ }
+
if (msg->findInt32("render", &render) && render) {
int64_t timestampNs;
CHECK(msg->findInt64("timestampNs", ×tampNs));
@@ -523,6 +582,10 @@
}
void NuPlayer::Decoder::onFlush() {
+ if (mRenderer != NULL) {
+ mRenderer->flush(!isVideo());
+ }
+
status_t err = OK;
if (mCodec != NULL) {
err = mCodec->flush();
@@ -594,6 +657,18 @@
break;
}
+ case kWhatSetRenderer:
+ {
+ bool hadNoRenderer = (mRenderer == NULL);
+ sp<RefBase> obj;
+ CHECK(msg->findObject("renderer", &obj));
+ mRenderer = static_cast<Renderer *>(obj.get());
+ if (hadNoRenderer && mRenderer != NULL) {
+ requestCodecNotification();
+ }
+ break;
+ }
+
case kWhatUpdateFormat:
{
sp<AMessage> format;
@@ -772,6 +847,10 @@
return seamless;
}
+bool NuPlayer::Decoder::isVideo() {
+ return mNativeWindow != NULL;
+}
+
struct CCData {
CCData(uint8_t type, uint8_t data1, uint8_t data2)
: mType(type), mData1(data1), mData2(data2) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index dba3eee..c6ceb4e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -30,11 +30,15 @@
struct NuPlayer::Decoder : public AHandler {
Decoder(const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer = NULL,
const sp<NativeWindowWrapper> &nativeWindow = NULL);
virtual void configure(const sp<AMessage> &format);
virtual void init();
+ virtual void setRenderer(const sp<Renderer> &renderer);
+
status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
virtual void signalFlush(const sp<AMessage> &format = NULL);
virtual void signalUpdateFormat(const sp<AMessage> &format);
@@ -45,8 +49,8 @@
enum {
kWhatFillThisBuffer = 'flTB',
- kWhatDrainThisBuffer = 'drTB',
- kWhatOutputFormatChanged = 'fmtC',
+ kWhatRenderBufferTime = 'rnBT',
+ kWhatVideoSizeChanged = 'viSC',
kWhatFlushCompleted = 'flsC',
kWhatShutdownCompleted = 'shDC',
kWhatEOS = 'eos ',
@@ -59,10 +63,10 @@
virtual void onMessageReceived(const sp<AMessage> &msg);
-private:
enum {
kWhatCodecNotify = 'cdcN',
kWhatConfigure = 'conf',
+ kWhatSetRenderer = 'setR',
kWhatGetInputBuffers = 'gInB',
kWhatInputBufferFilled = 'inpF',
kWhatRenderBuffer = 'rndr',
@@ -71,9 +75,13 @@
kWhatUpdateFormat = 'uFmt',
};
+private:
sp<AMessage> mNotify;
sp<NativeWindowWrapper> mNativeWindow;
+ sp<Source> mSource;
+ sp<Renderer> mRenderer;
+
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
sp<MediaCodec> mCodec;
@@ -89,6 +97,8 @@
Vector<bool> mInputBufferIsDequeued;
Vector<MediaBuffer *> mMediaBuffers;
+ int64_t mSkipRenderingUntilMediaTimeUs;
+
void handleError(int32_t err);
bool handleAnInputBuffer();
bool handleAnOutputBuffer();
@@ -110,6 +120,7 @@
bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
void rememberCodecSpecificData(const sp<AMessage> &format);
+ bool isVideo();
DISALLOW_EVIL_CONSTRUCTORS(Decoder);
};
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index f7aacdd..d2721ed 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -21,6 +21,9 @@
#include "NuPlayerDecoderPassThrough.h"
+#include "NuPlayerRenderer.h"
+#include "NuPlayerSource.h"
+
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -36,15 +39,21 @@
static const size_t kMaxPendingBuffers = 1 + (kMaxCachedBytes / NuPlayer::kAggregateBufferSizeBytes);
NuPlayer::DecoderPassThrough::DecoderPassThrough(
- const sp<AMessage> ¬ify)
- : Decoder(notify),
+ const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer)
+ : Decoder(notify, source),
mNotify(notify),
+ mSource(source),
+ mRenderer(renderer),
+ mSkipRenderingUntilMediaTimeUs(-1ll),
mBufferGeneration(0),
mReachedEOS(true),
mPendingBuffersToFill(0),
mPendingBuffersToDrain(0),
mCachedBytes(0),
mComponentName("pass through decoder") {
+ ALOGW_IF(renderer == NULL, "expect a non-NULL renderer");
mDecoderLooper = new ALooper;
mDecoderLooper->setName("NuPlayerDecoderPassThrough");
mDecoderLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
@@ -90,10 +99,17 @@
requestMaxBuffers();
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
- notify->setMessage("format", format);
- notify->post();
+ uint32_t flags;
+ int64_t durationUs;
+ if (mSource->getDuration(&durationUs) == OK &&
+ durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ } else {
+ flags = AUDIO_OUTPUT_FLAG_NONE;
+ }
+
+ mRenderer->openAudioSink(
+ format, true /* offloadOnly */, false /* hasVideo */, flags);
}
bool NuPlayer::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
@@ -138,25 +154,52 @@
msg->findBuffer("buffer", &buffer);
if (buffer == NULL) {
mReachedEOS = true;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatEOS);
- notify->setInt32("err", ERROR_END_OF_STREAM);
- notify->post();
+ if (mRenderer != NULL) {
+ mRenderer->queueEOS(true /* audio */, ERROR_END_OF_STREAM);
+ }
return;
}
- mCachedBytes += buffer->size();
+ sp<AMessage> extra;
+ if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
+ int64_t resumeAtMediaTimeUs;
+ if (extra->findInt64(
+ "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
+ ALOGI("[%s] suppressing rendering until %lld us",
+ mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
+ mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
+ }
+ }
+
+ int32_t bufferSize = buffer->size();
+ mCachedBytes += bufferSize;
+
+ if (mSkipRenderingUntilMediaTimeUs >= 0) {
+ int64_t timeUs = 0;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ if (timeUs < mSkipRenderingUntilMediaTimeUs) {
+ ALOGV("[%s] dropping buffer at time %lld as requested.",
+ mComponentName.c_str(), (long long)timeUs);
+
+ onBufferConsumed(bufferSize);
+ return;
+ }
+
+ mSkipRenderingUntilMediaTimeUs = -1;
+ }
+
+ if (mRenderer == NULL) {
+ onBufferConsumed(bufferSize);
+ return;
+ }
sp<AMessage> reply = new AMessage(kWhatBufferConsumed, id());
reply->setInt32("generation", mBufferGeneration);
- reply->setInt32("size", buffer->size());
+ reply->setInt32("size", bufferSize);
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatDrainThisBuffer);
- notify->setBuffer("buffer", buffer);
- notify->setMessage("reply", reply);
- notify->post();
+ mRenderer->queueBuffer(true /* audio */, buffer, reply);
+
++mPendingBuffersToDrain;
ALOGV("onInputBufferFilled: #ToFill = %zu, #ToDrain = %zu, cachedBytes = %zu",
mPendingBuffersToFill, mPendingBuffersToDrain, mCachedBytes);
@@ -172,6 +215,11 @@
void NuPlayer::DecoderPassThrough::onFlush() {
++mBufferGeneration;
+ mSkipRenderingUntilMediaTimeUs = -1;
+
+ if (mRenderer != NULL) {
+ mRenderer->flush(true /* audio */);
+ }
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatFlushCompleted);
@@ -192,6 +240,7 @@
void NuPlayer::DecoderPassThrough::onShutdown() {
++mBufferGeneration;
+ mSkipRenderingUntilMediaTimeUs = -1;
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatShutdownCompleted);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index fb20257..7742d30 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -25,7 +25,9 @@
namespace android {
struct NuPlayer::DecoderPassThrough : public Decoder {
- DecoderPassThrough(const sp<AMessage> ¬ify);
+ DecoderPassThrough(const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer);
virtual void configure(const sp<AMessage> &format);
virtual void init();
@@ -45,16 +47,15 @@
private:
enum {
kWhatRequestABuffer = 'reqB',
- kWhatConfigure = 'conf',
- kWhatInputBufferFilled = 'inpF',
kWhatBufferConsumed = 'bufC',
- kWhatFlush = 'flus',
- kWhatShutdown = 'shuD',
};
sp<AMessage> mNotify;
sp<ALooper> mDecoderLooper;
+ sp<Source> mSource;
+ sp<Renderer> mRenderer;
+
/** Returns true if a buffer was requested.
* Returns false if at EOS or cache already full.
*/
@@ -68,6 +69,8 @@
void requestMaxBuffers();
void onShutdown();
+ int64_t mSkipRenderingUntilMediaTimeUs;
+
int32_t mBufferGeneration;
bool mReachedEOS;
// TODO mPendingBuffersToFill and mPendingBuffersToDrain are only for
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index b42b480..e09567a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -480,13 +480,16 @@
case INVOKE_ID_SELECT_TRACK:
{
int trackIndex = request.readInt32();
- return mPlayer->selectTrack(trackIndex, true /* select */);
+ int msec = 0;
+ // getCurrentPosition should always return OK
+ getCurrentPosition(&msec);
+ return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
}
case INVOKE_ID_UNSELECT_TRACK:
{
int trackIndex = request.readInt32();
- return mPlayer->selectTrack(trackIndex, false /* select */);
+ return mPlayer->selectTrack(trackIndex, false /* select */, 0xdeadbeef /* not used */);
}
case INVOKE_ID_GET_SELECTED_TRACK:
@@ -625,6 +628,16 @@
case MEDIA_PLAYBACK_COMPLETE:
{
if (mState != STATE_RESET_IN_PROGRESS) {
+ if (mAutoLoop) {
+ audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+ if (mAudioSink != NULL) {
+ streamType = mAudioSink->getAudioStreamType();
+ }
+ if (streamType == AUDIO_STREAM_NOTIFICATION) {
+ ALOGW("disabling auto-loop for notification");
+ mAutoLoop = false;
+ }
+ }
if (mLooping || (mAutoLoop
&& (mAudioSink == NULL || mAudioSink->realtime()))) {
mPlayer->seekToAsync(0);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 73bc829..42288a3 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -144,6 +144,10 @@
(new AMessage(kWhatDisableOffloadAudio, id()))->post();
}
+void NuPlayer::Renderer::signalEnableOffloadAudio() {
+ (new AMessage(kWhatEnableOffloadAudio, id()))->post();
+}
+
void NuPlayer::Renderer::pause() {
(new AMessage(kWhatPause, id()))->post();
}
@@ -407,6 +411,12 @@
break;
}
+ case kWhatEnableOffloadAudio:
+ {
+ onEnableOffloadAudio();
+ break;
+ }
+
case kWhatPause:
{
onPause();
@@ -1133,6 +1143,12 @@
++mAudioQueueGeneration;
}
+void NuPlayer::Renderer::onEnableOffloadAudio() {
+ Mutex::Autolock autoLock(mLock);
+ mFlags |= FLAG_OFFLOAD_AUDIO;
+ ++mAudioQueueGeneration;
+}
+
void NuPlayer::Renderer::onPause() {
if (mPaused) {
ALOGW("Renderer::onPause() called while already paused!");
@@ -1416,6 +1432,9 @@
if (audioSinkChanged) {
onAudioSinkChanged();
}
+ if (offloadingAudio()) {
+ mAudioOffloadTornDown = false;
+ }
return offloadingAudio();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 7b46a59..985ec49 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -53,6 +53,7 @@
void signalAudioSinkChanged();
void signalDisableOffloadAudio();
+ void signalEnableOffloadAudio();
void pause();
void resume();
@@ -114,6 +115,7 @@
kWhatCloseAudioSink = 'clsA',
kWhatStopAudioSink = 'stpA',
kWhatDisableOffloadAudio = 'noOA',
+ kWhatEnableOffloadAudio = 'enOA',
kWhatSetVideoFrameRate = 'sVFR',
};
@@ -200,6 +202,7 @@
void onFlush(const sp<AMessage> &msg);
void onAudioSinkChanged();
void onDisableOffloadAudio();
+ void onEnableOffloadAudio();
void onPause();
void onResume();
void onSetVideoFrameRate(float fps);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 2f06c31..2b0ac47 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -97,7 +97,7 @@
return INVALID_OPERATION;
}
- virtual status_t selectTrack(size_t /* trackIndex */, bool /* select */) {
+ virtual status_t selectTrack(size_t /* trackIndex */, bool /* select */, int64_t /* timeUs*/) {
return INVALID_OPERATION;
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 0e9d734..1413635 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1268,6 +1268,24 @@
static_cast<NativeWindowWrapper *>(obj.get()));
sp<ANativeWindow> nativeWindow = windowWrapper->getNativeWindow();
+ // START of temporary support for automatic FRC - THIS WILL BE REMOVED
+ int32_t autoFrc;
+ if (msg->findInt32("auto-frc", &autoFrc)) {
+ bool enabled = autoFrc;
+ OMX_CONFIG_BOOLEANTYPE config;
+ InitOMXParams(&config);
+ config.bEnabled = (OMX_BOOL)enabled;
+ status_t temp = mOMX->setConfig(
+ mNode, (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
+ &config, sizeof(config));
+ if (temp == OK) {
+ outputFormat->setInt32("auto-frc", enabled);
+ } else if (enabled) {
+ ALOGI("codec does not support requested auto-frc (err %d)", temp);
+ }
+ }
+ // END of temporary support for automatic FRC
+
int32_t tunneled;
if (msg->findInt32("feature-tunneled-playback", &tunneled) &&
tunneled != 0) {
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 6a56729..007c090 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -878,6 +878,16 @@
return;
}
+ if (mFlags & AUTO_LOOPING) {
+ audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+ if (mAudioSink != NULL) {
+ streamType = mAudioSink->getAudioStreamType();
+ }
+ if (streamType == AUDIO_STREAM_NOTIFICATION) {
+ ALOGW("disabling auto-loop for notification");
+ modifyFlags(AUTO_LOOPING, CLEAR);
+ }
+ }
if ((mFlags & LOOPING)
|| ((mFlags & AUTO_LOOPING)
&& (mAudioSink == NULL || mAudioSink->realtime()))) {
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 874c118..5eb4652 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1164,6 +1164,14 @@
return err;
}
+ssize_t LiveSession::getSelectedTrack(media_track_type type) const {
+ if (mPlaylist == NULL) {
+ return -1;
+ } else {
+ return mPlaylist->getSelectedTrack(type);
+ }
+}
+
bool LiveSession::canSwitchUp() {
// Allow upwards bandwidth switch when a stream has buffered at least 10 seconds.
status_t err = OK;
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 7aacca6..896a8fc 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -19,6 +19,7 @@
#define LIVE_SESSION_H_
#include <media/stagefright/foundation/AHandler.h>
+#include <media/mediaplayer.h>
#include <utils/String8.h>
@@ -73,6 +74,7 @@
size_t getTrackCount() const;
sp<AMessage> getTrackInfo(size_t trackIndex) const;
status_t selectTrack(size_t index, bool select);
+ ssize_t getSelectedTrack(media_track_type /* type */) const;
bool isSeekable() const;
bool hasDynamicDuration() const;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 1651dee..eb62c7a 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -66,6 +66,9 @@
virtual ~MediaGroup();
private:
+
+ friend struct M3UParser;
+
struct Media {
AString mName;
AString mURI;
@@ -356,6 +359,38 @@
return mSelectedIndex;
}
+ssize_t M3UParser::getSelectedTrack(media_track_type type) const {
+ MediaGroup::Type groupType;
+ switch (type) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ groupType = MediaGroup::TYPE_VIDEO;
+ break;
+
+ case MEDIA_TRACK_TYPE_AUDIO:
+ groupType = MediaGroup::TYPE_AUDIO;
+ break;
+
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ groupType = MediaGroup::TYPE_SUBS;
+ break;
+
+ default:
+ return -1;
+ }
+
+ for (size_t i = 0, ii = 0; i < mMediaGroups.size(); ++i) {
+ sp<MediaGroup> group = mMediaGroups.valueAt(i);
+ size_t tracks = group->countTracks();
+ if (groupType != group->mType) {
+ ii += tracks;
+ } else if (group->mSelectedIndex >= 0) {
+ return ii + group->mSelectedIndex;
+ }
+ }
+
+ return -1;
+}
+
bool M3UParser::getTypeURI(size_t index, const char *key, AString *uri) const {
if (!mIsVariantPlaylist) {
*uri = mBaseURI;
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index d588afe..1cad060 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -21,6 +21,7 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/mediaplayer.h>
#include <utils/Vector.h>
namespace android {
@@ -46,6 +47,7 @@
size_t getTrackCount() const;
sp<AMessage> getTrackInfo(size_t index) const;
ssize_t getSelectedIndex() const;
+ ssize_t getSelectedTrack(media_track_type /* type */) const;
bool getTypeURI(size_t index, const char *key, AString *uri) const;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index e4ea034..d8eed5b 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -700,8 +700,7 @@
mRefreshState = (RefreshState)(mRefreshState + 1);
}
} else {
- ALOGE("failed to load playlist at url '%s'", mURI.c_str());
- notifyError(ERROR_IO);
+ ALOGE("failed to load playlist at url '%s'", uriDebugString(mURI).c_str());
return ERROR_IO;
}
} else {
@@ -724,26 +723,25 @@
}
void PlaylistFetcher::onDownloadNext() {
- if (refreshPlaylist() != OK) {
- return;
- }
-
- int32_t firstSeqNumberInPlaylist;
- if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
- "media-sequence", &firstSeqNumberInPlaylist)) {
- firstSeqNumberInPlaylist = 0;
- }
-
+ status_t err = refreshPlaylist();
+ int32_t firstSeqNumberInPlaylist = 0;
+ int32_t lastSeqNumberInPlaylist = 0;
bool discontinuity = false;
- const int32_t lastSeqNumberInPlaylist =
- firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+ if (mPlaylist != NULL) {
+ if (mPlaylist->meta() != NULL) {
+ mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist);
+ }
- if (mDiscontinuitySeq < 0) {
- mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+ lastSeqNumberInPlaylist =
+ firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+ if (mDiscontinuitySeq < 0) {
+ mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+ }
}
- if (mSeqNumber < 0) {
+ if (mPlaylist != NULL && mSeqNumber < 0) {
CHECK_GE(mStartTimeUs, 0ll);
if (mSegmentStartTimeUs < 0) {
@@ -785,19 +783,26 @@
}
}
+ // if mPlaylist is NULL then err must be non-OK; but the other way around might not be true
if (mSeqNumber < firstSeqNumberInPlaylist
- || mSeqNumber > lastSeqNumberInPlaylist) {
- if (!mPlaylist->isComplete() && mNumRetries < kMaxNumRetries) {
+ || mSeqNumber > lastSeqNumberInPlaylist
+ || err != OK) {
+ if ((err != OK || !mPlaylist->isComplete()) && mNumRetries < kMaxNumRetries) {
++mNumRetries;
- if (mSeqNumber > lastSeqNumberInPlaylist) {
+ if (mSeqNumber > lastSeqNumberInPlaylist || err != OK) {
+ // make sure we reach this retry logic on refresh failures
+ // by adding an err != OK clause to all enclosing if's.
+
// refresh in increasing fraction (1/2, 1/3, ...) of the
// playlist's target duration or 3 seconds, whichever is less
- int32_t targetDurationSecs;
- CHECK(mPlaylist->meta()->findInt32(
- "target-duration", &targetDurationSecs));
- int64_t delayUs = mPlaylist->size() * targetDurationSecs *
- 1000000ll / (1 + mNumRetries);
+ int64_t delayUs = kMaxMonitorDelayUs;
+ if (mPlaylist != NULL && mPlaylist->meta() != NULL) {
+ int32_t targetDurationSecs;
+ CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+ delayUs = mPlaylist->size() * targetDurationSecs *
+ 1000000ll / (1 + mNumRetries);
+ }
if (delayUs > kMaxMonitorDelayUs) {
delayUs = kMaxMonitorDelayUs;
}
@@ -809,13 +814,30 @@
return;
}
- // we've missed the boat, let's start from the lowest sequence
+ if (err != OK) {
+ notifyError(err);
+ return;
+ }
+
+ // we've missed the boat, let's start 3 segments prior to the latest sequence
// number available and signal a discontinuity.
ALOGI("We've missed the boat, restarting playback."
" mStartup=%d, was looking for %d in %d-%d",
mStartup, mSeqNumber, firstSeqNumberInPlaylist,
lastSeqNumberInPlaylist);
+ if (mStopParams != NULL) {
+ // we should have kept on fetching until we hit the boundaries in mStopParams,
+ // but since the segments we are supposed to fetch have already rolled off
+ // the playlist, i.e. we have already missed the boat, we inevitably have to
+ // skip.
+ for (size_t i = 0; i < mPacketSources.size(); i++) {
+ sp<ABuffer> formatChange = mSession->createFormatChangeBuffer();
+ mPacketSources.valueAt(i)->queueAccessUnit(formatChange);
+ }
+ stopAsync(/* clear = */ false);
+ return;
+ }
mSeqNumber = lastSeqNumberInPlaylist - 3;
if (mSeqNumber < firstSeqNumberInPlaylist) {
mSeqNumber = firstSeqNumberInPlaylist;
@@ -996,7 +1018,7 @@
return;
}
- status_t err = OK;
+ err = OK;
if (tsBuffer != NULL) {
AString method;
CHECK(buffer->meta()->findString("cipher-method", &method));
@@ -1256,6 +1278,11 @@
CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
if (mStartTimeUsNotify != NULL && timeUs > mStartTimeUs) {
+ int32_t firstSeqNumberInPlaylist;
+ if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+ "media-sequence", &firstSeqNumberInPlaylist)) {
+ firstSeqNumberInPlaylist = 0;
+ }
int32_t targetDurationSecs;
CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
@@ -1266,6 +1293,8 @@
// mStartTimeUs.
// mSegmentStartTimeUs >= 0
// mSegmentStartTimeUs is non-negative when adapting or switching tracks
+ // mSeqNumber > firstSeqNumberInPlaylist
+ // don't decrement mSeqNumber if it already points to the 1st segment
// timeUs - mStartTimeUs > targetDurationUs:
// This and the 2 above conditions should only happen when adapting in a live
// stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher
@@ -1275,6 +1304,7 @@
// stop as early as possible. The definition of being "too far ahead" is
// arbitrary; here we use targetDurationUs as threshold.
if (mStartup && mSegmentStartTimeUs >= 0
+ && mSeqNumber > firstSeqNumberInPlaylist
&& timeUs - mStartTimeUs > targetDurationUs) {
// we just guessed a starting timestamp that is too high when adapting in a
// live stream; re-adjust based on the actual timestamp extracted from the
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index c1dc0f9..482ccff 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -679,7 +679,7 @@
int64_t resumeAtMediaTimeUs =
mProgram->convertPTSToTimestamp(resumeAtPTS);
- extra->setInt64("resume-at-mediatimeUs", resumeAtMediaTimeUs);
+ extra->setInt64("resume-at-mediaTimeUs", resumeAtMediaTimeUs);
}
}
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index b9308fa..037c73b 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -611,15 +611,16 @@
// ExtendedAudioBufferProvider interface
-// Note that framesReady() takes a mutex on the control block using tryLock().
-// This could result in priority inversion if framesReady() is called by the normal mixer,
-// as the normal mixer thread runs at lower
-// priority than the client's callback thread: there is a short window within framesReady()
-// during which the normal mixer could be preempted, and the client callback would block.
-// Another problem can occur if framesReady() is called by the fast mixer:
-// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
-// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
+// framesReady() may return an approximation of the number of frames if called
+// from a different thread than the one calling Proxy->obtainBuffer() and
+// Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
+// AudioTrackServerProxy so be especially careful calling with FastTracks.
size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
+ if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
+ // Static tracks return zero frames immediately upon stopping (for FastTracks).
+ // The remainder of the buffer is not drained.
+ return 0;
+ }
return mAudioTrackServerProxy->framesReady();
}
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 584e170..20a72b0 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -3563,7 +3563,8 @@
// check if one opened input is not needed any more after disconnecting one device
for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
desc = mInputs.valueAt(input_index);
- if (!(desc->mProfile->mSupportedDevices.types() & mAvailableInputDevices.types())) {
+ if (!(desc->mProfile->mSupportedDevices.types() & mAvailableInputDevices.types() &
+ ~AUDIO_DEVICE_BIT_IN)) {
ALOGV("checkInputsForDevice(): disconnecting adding input %d",
mInputs.keyAt(input_index));
inputs.add(mInputs.keyAt(input_index));
@@ -3578,7 +3579,7 @@
profile_index < mHwModules[module_index]->mInputProfiles.size();
profile_index++) {
sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
- if (profile->mSupportedDevices.types() & device) {
+ if (profile->mSupportedDevices.types() & device & ~AUDIO_DEVICE_BIT_IN) {
ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
profile_index, module_index);
if (profile->mSamplingRates[0] == 0) {
@@ -3795,7 +3796,9 @@
}
bool isScoConnected =
- (mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) != 0;
+ ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
+ ~AUDIO_DEVICE_BIT_IN) != 0) ||
+ ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
// suspend A2DP output if:
// (NOT already suspended) &&
// ((SCO device is connected &&
@@ -3980,9 +3983,17 @@
// usage to strategy mapping
switch (attr->usage) {
+ case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ if (isStreamActive(AUDIO_STREAM_RING) || isStreamActive(AUDIO_STREAM_ALARM)) {
+ return (uint32_t) STRATEGY_SONIFICATION;
+ }
+ if (isInCall()) {
+ return (uint32_t) STRATEGY_PHONE;
+ }
+ // FALL THROUGH
+
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
return (uint32_t) STRATEGY_MEDIA;
@@ -4182,7 +4193,7 @@
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- (getA2dpOutput() != 0) && !mA2dpSuspended) {
+ (getA2dpOutput() != 0)) {
device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
if (device) break;
device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
@@ -4217,7 +4228,7 @@
// A2DP speaker when forcing to speaker output
if (!isInCall() &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- (getA2dpOutput() != 0) && !mA2dpSuspended) {
+ (getA2dpOutput() != 0)) {
device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
if (device) break;
}
@@ -4279,7 +4290,7 @@
}
if ((device2 == AUDIO_DEVICE_NONE) &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- (getA2dpOutput() != 0) && !mA2dpSuspended) {
+ (getA2dpOutput() != 0)) {
device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index dd4067f..6a4a669 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -149,7 +149,7 @@
void AudioPolicyService::registerClient(const sp<IAudioPolicyServiceClient>& client)
{
- Mutex::Autolock _l(mLock);
+ Mutex::Autolock _l(mNotificationClientsLock);
uid_t uid = IPCThreadState::self()->getCallingUid();
if (mNotificationClients.indexOfKey(uid) < 0) {
@@ -168,14 +168,17 @@
// removeNotificationClient() is called when the client process dies.
void AudioPolicyService::removeNotificationClient(uid_t uid)
{
- Mutex::Autolock _l(mLock);
-
- mNotificationClients.removeItem(uid);
-
+ {
+ Mutex::Autolock _l(mNotificationClientsLock);
+ mNotificationClients.removeItem(uid);
+ }
#ifndef USE_LEGACY_AUDIO_POLICY
+ {
+ Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
mAudioPolicyManager->clearAudioPatches(uid);
}
+ }
#endif
}
@@ -186,7 +189,7 @@
void AudioPolicyService::doOnAudioPortListUpdate()
{
- Mutex::Autolock _l(mLock);
+ Mutex::Autolock _l(mNotificationClientsLock);
for (size_t i = 0; i < mNotificationClients.size(); i++) {
mNotificationClients.valueAt(i)->onAudioPortListUpdate();
}
@@ -212,7 +215,7 @@
void AudioPolicyService::doOnAudioPatchListUpdate()
{
- Mutex::Autolock _l(mLock);
+ Mutex::Autolock _l(mNotificationClientsLock);
for (size_t i = 0; i < mNotificationClients.size(); i++) {
mNotificationClients.valueAt(i)->onAudioPatchListUpdate();
}
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 4e68ab1..f1db309 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -495,7 +495,7 @@
AudioPolicyClient *mAudioPolicyClient;
DefaultKeyedVector< uid_t, sp<NotificationClient> > mNotificationClients;
-
+ Mutex mNotificationClientsLock; // protects mNotificationClients
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
audio_mode_t mPhoneState;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index f3a88a1..6e7824e 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -929,13 +929,6 @@
"stop preview: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
}
- {
- // Ideally we should recover the override after recording stopped, but
- // right now recording stream will live until here, so we are forced to
- // recover here. TODO: find a better way to handle that (b/17495165)
- SharedParameters::Lock l(mParameters);
- l.mParameters.recoverOverriddenJpegSize();
- }
// no break
case Parameters::WAITING_FOR_PREVIEW_WINDOW: {
SharedParameters::Lock l(mParameters);
@@ -1206,6 +1199,28 @@
mCameraService->playSound(CameraService::SOUND_RECORDING);
+ // Remove recording stream to prevent it from slowing down takePicture later
+ if (!l.mParameters.recordingHint && l.mParameters.isJpegSizeOverridden()) {
+ res = stopStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ // Clean up recording stream
+ res = mStreamingProcessor->deleteRecordingStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete recording stream before "
+ "stop preview: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ l.mParameters.recoverOverriddenJpegSize();
+ }
+
res = startPreviewL(l.mParameters, true);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to return to preview",
@@ -1388,6 +1403,34 @@
return res;
}
l.mParameters.state = Parameters::STILL_CAPTURE;
+
+ // Remove recording stream to prevent video snapshot jpeg logic kicking in
+ if (l.mParameters.isJpegSizeOverridden() &&
+ mStreamingProcessor->getRecordingStreamId() != NO_STREAM) {
+ res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ // Clean up recording stream
+ res = mStreamingProcessor->deleteRecordingStream();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete recording stream before "
+ "stop preview: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ l.mParameters.recoverOverriddenJpegSize();
+ }
break;
case Parameters::RECORD:
// Good to go for video snapshot
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 312a78c..40d53b3 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -168,6 +168,19 @@
faceIds = entry.data.i32;
}
+ entry = frame.find(ANDROID_SCALER_CROP_REGION);
+ if (entry.count < 4) {
+ ALOGE("%s: Camera %d: Unable to read crop region (count = %d)",
+ __FUNCTION__, client->getCameraId(), entry.count);
+ return res;
+ }
+
+ Parameters::CropRegion scalerCrop = {
+ static_cast<float>(entry.data.i32[0]),
+ static_cast<float>(entry.data.i32[1]),
+ static_cast<float>(entry.data.i32[2]),
+ static_cast<float>(entry.data.i32[3])};
+
faces.setCapacity(metadata.number_of_faces);
size_t maxFaces = metadata.number_of_faces;
@@ -183,26 +196,30 @@
camera_face_t face;
- face.rect[0] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 0]);
- face.rect[1] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 1]);
- face.rect[2] = l.mParameters.arrayXToNormalized(faceRects[i*4 + 2]);
- face.rect[3] = l.mParameters.arrayYToNormalized(faceRects[i*4 + 3]);
+ face.rect[0] = l.mParameters.arrayXToNormalizedWithCrop(
+ faceRects[i*4 + 0], scalerCrop);
+ face.rect[1] = l.mParameters.arrayYToNormalizedWithCrop(
+ faceRects[i*4 + 1], scalerCrop);
+ face.rect[2] = l.mParameters.arrayXToNormalizedWithCrop(
+ faceRects[i*4 + 2], scalerCrop);
+ face.rect[3] = l.mParameters.arrayYToNormalizedWithCrop(
+ faceRects[i*4 + 3], scalerCrop);
face.score = faceScores[i];
if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
face.id = faceIds[i];
- face.left_eye[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 0]);
- face.left_eye[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 1]);
- face.right_eye[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 2]);
- face.right_eye[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 3]);
- face.mouth[0] =
- l.mParameters.arrayXToNormalized(faceLandmarks[i*6 + 4]);
- face.mouth[1] =
- l.mParameters.arrayYToNormalized(faceLandmarks[i*6 + 5]);
+ face.left_eye[0] = l.mParameters.arrayXToNormalizedWithCrop(
+ faceLandmarks[i*6 + 0], scalerCrop);
+ face.left_eye[1] = l.mParameters.arrayYToNormalizedWithCrop(
+ faceLandmarks[i*6 + 1], scalerCrop);
+ face.right_eye[0] = l.mParameters.arrayXToNormalizedWithCrop(
+ faceLandmarks[i*6 + 2], scalerCrop);
+ face.right_eye[1] = l.mParameters.arrayYToNormalizedWithCrop(
+ faceLandmarks[i*6 + 3], scalerCrop);
+ face.mouth[0] = l.mParameters.arrayXToNormalizedWithCrop(
+ faceLandmarks[i*6 + 4], scalerCrop);
+ face.mouth[1] = l.mParameters.arrayYToNormalizedWithCrop(
+ faceLandmarks[i*6 + 5], scalerCrop);
} else {
face.id = 0;
face.left_eye[0] = face.left_eye[1] = -2000;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 42a5507..234247b 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2203,6 +2203,10 @@
return OK;
}
+bool Parameters::isJpegSizeOverridden() {
+ return pictureSizeOverriden;
+}
+
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
switch(state) {
@@ -2619,58 +2623,6 @@
return (y + 1000) * (previewCrop.height - 1) / 2000;
}
-int Parameters::arrayXToCrop(int x) const {
- CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
- return x - previewCrop.left;
-}
-
-int Parameters::arrayYToCrop(int y) const {
- CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
- return y - previewCrop.top;
-}
-
-int Parameters::cropXToNormalized(int x) const {
- CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
- return x * 2000 / (previewCrop.width - 1) - 1000;
-}
-
-int Parameters::cropYToNormalized(int y) const {
- CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
- return y * 2000 / (previewCrop.height - 1) - 1000;
-}
-
-int Parameters::arrayXToNormalized(int width) const {
- int ret = cropXToNormalized(arrayXToCrop(width));
-
- ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of "
- "lower bounds %d", ret);
- ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of "
- "upper bounds %d", ret);
-
- // Work-around for HAL pre-scaling the coordinates themselves
- if (quirks.meteringCropRegion) {
- return width * 2000 / (fastInfo.arrayWidth - 1) - 1000;
- }
-
- return ret;
-}
-
-int Parameters::arrayYToNormalized(int height) const {
- int ret = cropYToNormalized(arrayYToCrop(height));
-
- ALOG_ASSERT(ret >= -1000, "Calculated normalized value out of lower bounds"
- " %d", ret);
- ALOG_ASSERT(ret <= 1000, "Calculated normalized value out of upper bounds"
- " %d", ret);
-
- // Work-around for HAL pre-scaling the coordinates themselves
- if (quirks.meteringCropRegion) {
- return height * 2000 / (fastInfo.arrayHeight - 1) - 1000;
- }
-
- return ret;
-}
-
int Parameters::normalizedXToArray(int x) const {
// Work-around for HAL pre-scaling the coordinates themselves
@@ -2690,6 +2642,54 @@
return cropYToArray(normalizedYToCrop(y));
}
+
+Parameters::CropRegion Parameters::calculatePreviewCrop(
+ const CropRegion &scalerCrop) const {
+ float left, top, width, height;
+ float previewAspect = static_cast<float>(previewWidth) / previewHeight;
+ float cropAspect = scalerCrop.width / scalerCrop.height;
+
+ if (previewAspect > cropAspect) {
+ width = scalerCrop.width;
+ height = cropAspect * scalerCrop.height / previewAspect;
+
+ left = scalerCrop.left;
+ top = scalerCrop.top + (scalerCrop.height - height) / 2;
+ } else {
+ width = previewAspect * scalerCrop.width / cropAspect;
+ height = scalerCrop.height;
+
+ left = scalerCrop.left + (scalerCrop.width - width) / 2;
+ top = scalerCrop.top;
+ }
+
+ CropRegion previewCrop = {left, top, width, height};
+
+ return previewCrop;
+}
+
+int Parameters::arrayXToNormalizedWithCrop(int x,
+ const CropRegion &scalerCrop) const {
+ // Work-around for HAL pre-scaling the coordinates themselves
+ if (quirks.meteringCropRegion) {
+ return x * 2000 / (fastInfo.arrayWidth - 1) - 1000;
+ } else {
+ CropRegion previewCrop = calculatePreviewCrop(scalerCrop);
+ return (x - previewCrop.left) * 2000 / (previewCrop.width - 1) - 1000;
+ }
+}
+
+int Parameters::arrayYToNormalizedWithCrop(int y,
+ const CropRegion &scalerCrop) const {
+ // Work-around for HAL pre-scaling the coordinates themselves
+ if (quirks.meteringCropRegion) {
+ return y * 2000 / (fastInfo.arrayHeight - 1) - 1000;
+ } else {
+ CropRegion previewCrop = calculatePreviewCrop(scalerCrop);
+ return (y - previewCrop.top) * 2000 / (previewCrop.height - 1) - 1000;
+ }
+}
+
status_t Parameters::getFilteredSizes(Size limit, Vector<Size> *sizes) {
if (info == NULL) {
ALOGE("%s: Static metadata is not initialized", __FUNCTION__);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 815cc55..7e5be84 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -266,6 +266,8 @@
status_t overrideJpegSizeByVideoSize();
// Recover overridden jpeg size. Called during stopRecording.
status_t recoverOverriddenJpegSize();
+ // if video snapshot size is currently overridden
+ bool isJpegSizeOverridden();
// Calculate the crop region rectangle based on current stream sizes
struct CropRegion {
@@ -325,13 +327,17 @@
// Note that this doesn't apply to the (deprecated) single FPS value.
static const int kFpsToApiScale = 1000;
- // Transform between (-1000,-1000)-(1000,1000) normalized coords from camera
- // API and HAL2 (0,0)-(activePixelArray.width/height) coordinates
- int arrayXToNormalized(int width) const;
- int arrayYToNormalized(int height) const;
+ // Transform from (-1000,-1000)-(1000,1000) normalized coords from camera
+ // API to HAL2 (0,0)-(activePixelArray.width/height) coordinates
int normalizedXToArray(int x) const;
int normalizedYToArray(int y) const;
+ // Transform from HAL3 (0,0)-(activePixelArray.width/height) coordinates to
+ // (-1000,-1000)-(1000,1000) normalized coordinates given a scaler crop
+ // region.
+ int arrayXToNormalizedWithCrop(int x, const CropRegion &scalerCrop) const;
+ int arrayYToNormalizedWithCrop(int y, const CropRegion &scalerCrop) const;
+
struct Range {
int min;
int max;
@@ -341,20 +347,20 @@
private:
- // Convert between HAL2 sensor array coordinates and
- // viewfinder crop-region relative array coordinates
+ // Convert from viewfinder crop-region relative array coordinates
+ // to HAL2 sensor array coordinates
int cropXToArray(int x) const;
int cropYToArray(int y) const;
- int arrayXToCrop(int x) const;
- int arrayYToCrop(int y) const;
- // Convert between viewfinder crop-region relative array coordinates
- // and camera API (-1000,1000)-(1000,1000) normalized coords
- int cropXToNormalized(int x) const;
- int cropYToNormalized(int y) const;
+ // Convert from camera API (-1000,1000)-(1000,1000) normalized coords
+ // to viewfinder crop-region relative array coordinates
int normalizedXToCrop(int x) const;
int normalizedYToCrop(int y) const;
+ // Given a scaler crop region, calculate preview crop region based on
+ // preview aspect ratio.
+ CropRegion calculatePreviewCrop(const CropRegion &scalerCrop) const;
+
Vector<Size> availablePreviewSizes;
Vector<Size> availableVideoSizes;
// Get size list (that are no larger than limit) from static metadata.