Merge "camera2: Fix video snapshot for HAL 2.* devices." into lmp-dev
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 253c557..f061d22 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -54,7 +54,8 @@
CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
- CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2004,
+ CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
+ CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
};
/**
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 2442219..9cc208e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -278,6 +278,7 @@
bool mPrepareSync;
status_t mPrepareStatus;
audio_stream_type_t mStreamType;
+ Parcel* mAudioAttributesParcel;
bool mLoop;
float mLeftVolume;
float mRightVolume;
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 7540e07..2e663ec 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -58,20 +58,22 @@
// drm/drm_framework_common.h
DRM_ERROR_BASE = -2000,
- ERROR_DRM_UNKNOWN = DRM_ERROR_BASE,
- ERROR_DRM_NO_LICENSE = DRM_ERROR_BASE - 1,
- ERROR_DRM_LICENSE_EXPIRED = DRM_ERROR_BASE - 2,
- ERROR_DRM_SESSION_NOT_OPENED = DRM_ERROR_BASE - 3,
- ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 4,
- ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5,
- ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6,
- ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7,
- ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8,
- ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9,
- ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10,
+ ERROR_DRM_UNKNOWN = DRM_ERROR_BASE,
+ ERROR_DRM_NO_LICENSE = DRM_ERROR_BASE - 1,
+ ERROR_DRM_LICENSE_EXPIRED = DRM_ERROR_BASE - 2,
+ ERROR_DRM_SESSION_NOT_OPENED = DRM_ERROR_BASE - 3,
+ ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 4,
+ ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5,
+ ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6,
+ ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7,
+ ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8,
+ ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9,
+ ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10,
+ ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION = DRM_ERROR_BASE - 11,
+ ERROR_DRM_LAST_USED_ERRORCODE = DRM_ERROR_BASE - 11,
- ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500,
- ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999,
+ ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500,
+ ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999,
// Heartbeat Error Codes
HEARTBEAT_ERROR_BASE = -3000,
@@ -100,7 +102,7 @@
// returns true if err is a recognized DRM error code
static inline bool isCryptoError(status_t err) {
- return (ERROR_DRM_RESOURCE_BUSY <= err && err <= ERROR_DRM_UNKNOWN)
+ return (ERROR_DRM_LAST_USED_ERRORCODE <= err && err <= ERROR_DRM_UNKNOWN)
|| (ERROR_DRM_VENDOR_MIN <= err && err <= ERROR_DRM_VENDOR_MAX);
}
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 3486d21..1742fbe 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -582,9 +582,13 @@
}
binder->linkToDeath(gAudioPolicyServiceClient);
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
- gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
gLock.unlock();
+ // Registering the client takes the AudioPolicyService lock.
+ // Don't hold the AudioSystem lock at the same time.
+ gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
} else {
+ // There exists a benign race condition where gAudioPolicyService
+ // is set, but gAudioPolicyServiceClient is not yet registered.
gLock.unlock();
}
return gAudioPolicyService;
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index d2e181b..e2e6042 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -87,6 +87,7 @@
{"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P},
{"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
{"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
+ {"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
};
#if LOG_NDEBUG
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 6cd377a..9611ac7 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -50,6 +50,7 @@
mListener = NULL;
mCookie = NULL;
mStreamType = AUDIO_STREAM_MUSIC;
+ mAudioAttributesParcel = NULL;
mCurrentPosition = -1;
mSeekPosition = -1;
mCurrentState = MEDIA_PLAYER_IDLE;
@@ -68,6 +69,10 @@
MediaPlayer::~MediaPlayer()
{
ALOGV("destructor");
+ if (mAudioAttributesParcel != NULL) {
+ delete mAudioAttributesParcel;
+ mAudioAttributesParcel = NULL;
+ }
AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
disconnect();
IPCThreadState::self()->flushCommands();
@@ -237,6 +242,9 @@
{
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
+ if (mAudioAttributesParcel != NULL) {
+ mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
+ }
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
@@ -662,8 +670,17 @@
if (mPlayer != NULL) {
return mPlayer->setParameter(key, request);
}
- ALOGV("setParameter: no active player");
- return INVALID_OPERATION;
+ switch (key) {
+ case KEY_PARAMETER_AUDIO_ATTRIBUTES:
+ // no player, save the marshalled audio attributes
+ if (mAudioAttributesParcel != NULL) { delete mAudioAttributesParcel; };
+ mAudioAttributesParcel = new Parcel();
+ mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
+ return OK;
+ default:
+ ALOGV("setParameter: no active player");
+ return INVALID_OPERATION;
+ }
}
status_t MediaPlayer::getParameter(int key, Parcel *reply)
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index dacb144..3e0fc0d 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -60,7 +60,7 @@
return OK;
}
-player_type MediaPlayerFactory::getDefaultPlayerType() {
+static player_type getDefaultPlayerType() {
char value[PROPERTY_VALUE_MAX];
if (property_get("media.stagefright.use-awesome", value, NULL)
&& (!strcmp("1", value) || !strcasecmp("true", value))) {
@@ -181,16 +181,19 @@
int64_t offset,
int64_t /*length*/,
float /*curScore*/) {
- char buf[20];
- lseek(fd, offset, SEEK_SET);
- read(fd, buf, sizeof(buf));
- lseek(fd, offset, SEEK_SET);
+ if (getDefaultPlayerType()
+ == STAGEFRIGHT_PLAYER) {
+ char buf[20];
+ lseek(fd, offset, SEEK_SET);
+ read(fd, buf, sizeof(buf));
+ lseek(fd, offset, SEEK_SET);
- uint32_t ident = *((uint32_t*)buf);
+ uint32_t ident = *((uint32_t*)buf);
- // Ogg vorbis?
- if (ident == 0x5367674f) // 'OggS'
- return 1.0;
+ // Ogg vorbis?
+ if (ident == 0x5367674f) // 'OggS'
+ return 1.0;
+ }
return 0.0;
}
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index 5ddde19..55ff918 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -71,7 +71,6 @@
static status_t registerFactory_l(IFactory* factory,
player_type type);
- static player_type getDefaultPlayerType();
static Mutex sLock;
static tFactoryMap sFactoryMap;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f257ef3..8e1987a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
#include "../../libstagefright/include/WVMExtractor.h"
@@ -318,7 +319,14 @@
}
if (mVideoTrack.mSource != NULL) {
- notifyVideoSizeChanged(getFormat(false /* audio */));
+ sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+ sp<AMessage> msg = new AMessage;
+ err = convertMetaDataToMessage(meta, &msg);
+ if(err != OK) {
+ notifyPreparedAndCleanup(err);
+ return;
+ }
+ notifyVideoSizeChanged(msg);
}
notifyFlagsChanged(
@@ -422,7 +430,7 @@
mAudioTrack.mPackets =
new AnotherPacketSource(mAudioTrack.mSource->getFormat());
- readBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
}
if (mVideoTrack.mSource != NULL) {
@@ -430,7 +438,7 @@
mVideoTrack.mPackets =
new AnotherPacketSource(mVideoTrack.mSource->getFormat());
- readBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
@@ -459,6 +467,8 @@
if (mDecryptHandle != NULL) {
mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
}
+ mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
+ mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
}
status_t NuPlayer::GenericSource::feedMoreTSData() {
@@ -615,6 +625,37 @@
}
break;
}
+
+ case kWhatGetFormat:
+ {
+ onGetFormatMeta(msg);
+ break;
+ }
+
+ case kWhatGetSelectedTrack:
+ {
+ onGetSelectedTrack(msg);
+ break;
+ }
+
+ case kWhatSelectTrack:
+ {
+ onSelectTrack(msg);
+ break;
+ }
+
+ case kWhatSeek:
+ {
+ onSeek(msg);
+ break;
+ }
+
+ case kWhatReadBuffer:
+ {
+ onReadBuffer(msg);
+ break;
+ }
+
default:
Source::onMessageReceived(msg);
break;
@@ -690,6 +731,34 @@
}
sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
+ sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+ msg->setInt32("audio", audio);
+
+ sp<AMessage> response;
+ void *format;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findPointer("format", &format));
+ return (MetaData *)format;
+ } else {
+ return NULL;
+ }
+}
+
+void NuPlayer::GenericSource::onGetFormatMeta(sp<AMessage> msg) const {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ sp<AMessage> response = new AMessage;
+ sp<MetaData> format = doGetFormatMeta(audio);
+ response->setPointer("format", format.get());
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
if (source == NULL) {
@@ -709,7 +778,7 @@
if (mIsWidevine && !audio) {
// try to read a buffer as we may not have been able to the last time
- readBuffer(MEDIA_TRACK_TYPE_VIDEO, -1ll);
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
status_t finalResult;
@@ -720,18 +789,7 @@
status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
if (!track->mPackets->hasBufferAvailable(&finalResult)) {
- readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
- }
-
- if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
- return result;
- }
-
- if (mSubtitleTrack.mSource != NULL) {
- CHECK(mSubtitleTrack.mPackets != NULL);
- }
- if (mTimedTextTrack.mSource != NULL) {
- CHECK(mTimedTextTrack.mPackets != NULL);
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
}
if (result != OK) {
@@ -825,6 +883,35 @@
}
ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+ sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+ msg->setInt32("type", type);
+
+ sp<AMessage> response;
+ int32_t index;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("index", &index));
+ return index;
+ } else {
+ return -1;
+ }
+}
+
+void NuPlayer::GenericSource::onGetSelectedTrack(sp<AMessage> msg) const {
+ int32_t tmpType;
+ CHECK(msg->findInt32("type", &tmpType));
+ media_track_type type = (media_track_type)tmpType;
+
+ sp<AMessage> response = new AMessage;
+ ssize_t index = doGetSelectedTrack(type);
+ response->setInt32("index", index);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
const Track *track = NULL;
switch (type) {
case MEDIA_TRACK_TYPE_VIDEO:
@@ -852,6 +939,34 @@
status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+ sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+ msg->setInt32("trackIndex", trackIndex);
+ msg->setInt32("select", trackIndex);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer::GenericSource::onSelectTrack(sp<AMessage> msg) {
+ int32_t trackIndex, select;
+ CHECK(msg->findInt32("trackIndex", &trackIndex));
+ CHECK(msg->findInt32("select", &select));
+
+ sp<AMessage> response = new AMessage;
+ status_t err = doSelectTrack(trackIndex, select);
+ response->setInt32("err", err);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select) {
if (trackIndex >= mSources.size()) {
return BAD_INDEX;
}
@@ -922,6 +1037,32 @@
}
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+ sp<AMessage> msg = new AMessage(kWhatSeek, id());
+ msg->setInt64("seekTimeUs", seekTimeUs);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer::GenericSource::onSeek(sp<AMessage> msg) {
+ int64_t seekTimeUs;
+ CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+
+ sp<AMessage> response = new AMessage;
+ status_t err = doSeek(seekTimeUs);
+ response->setInt32("err", err);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
if (mVideoTrack.mSource != NULL) {
int64_t actualTimeUs;
readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
@@ -1006,6 +1147,19 @@
return ab;
}
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
+ sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+ msg->setInt32("trackType", trackType);
+ msg->post();
+}
+
+void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
+ int32_t tmpType;
+ CHECK(msg->findInt32("trackType", &tmpType));
+ media_track_type trackType = (media_track_type)tmpType;
+ readBuffer(trackType);
+}
+
void NuPlayer::GenericSource::readBuffer(
media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
Track *track;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 1f13120..50ff98a 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -84,6 +84,11 @@
kWhatSendTimedTextData,
kWhatChangeAVSource,
kWhatPollBuffering,
+ kWhatGetFormat,
+ kWhatGetSelectedTrack,
+ kWhatSelectTrack,
+ kWhatSeek,
+ kWhatReadBuffer,
};
Vector<sp<MediaSource> > mSources;
@@ -140,6 +145,18 @@
void notifyPreparedAndCleanup(status_t err);
+ void onGetFormatMeta(sp<AMessage> msg) const;
+ sp<MetaData> doGetFormatMeta(bool audio) const;
+
+ void onGetSelectedTrack(sp<AMessage> msg) const;
+ ssize_t doGetSelectedTrack(media_track_type type) const;
+
+ void onSelectTrack(sp<AMessage> msg);
+ status_t doSelectTrack(size_t trackIndex, bool select);
+
+ void onSeek(sp<AMessage> msg);
+ status_t doSeek(int64_t seekTimeUs);
+
void onPrepareAsync();
void fetchTextData(
@@ -155,6 +172,8 @@
media_track_type trackType,
int64_t *actualTimeUs = NULL);
+ void postReadBuffer(media_track_type trackType);
+ void onReadBuffer(sp<AMessage> msg);
void readBuffer(
media_track_type trackType,
int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 1020cb3..df3e992 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -541,7 +541,13 @@
static_cast<NativeWindowWrapper *>(obj.get())));
if (obj != NULL) {
- mDeferredActions.push_back(new SeekAction(mCurrentPositionUs));
+ if (mStarted && mVideoDecoder != NULL) {
+ // Issue a seek to refresh the video screen only if started otherwise
+ // the extractor may not yet be started and will assert.
+ // If the video decoder is not set (perhaps audio only in this case)
+ // do not perform a seek as it is not needed.
+ mDeferredActions.push_back(new SeekAction(mCurrentPositionUs));
+ }
// If there is a new surface texture, instantiate decoders
// again if possible.
@@ -988,6 +994,8 @@
ALOGV("both audio and video are flushed now.");
+ mPendingAudioAccessUnit.clear();
+
if (mTimeDiscontinuityPending) {
mRenderer->signalTimeDiscontinuity();
mTimeDiscontinuityPending = false;
@@ -1236,7 +1244,8 @@
CHECK(msg->findMessage("reply", &reply));
if ((audio && mFlushingAudio != NONE)
- || (!audio && mFlushingVideo != NONE)) {
+ || (!audio && mFlushingVideo != NONE)
+ || mSource == NULL) {
reply->setInt32("err", INFO_DISCONTINUITY);
reply->post();
return OK;
@@ -1244,14 +1253,47 @@
sp<ABuffer> accessUnit;
+ // Aggregate smaller buffers into a larger buffer.
+ // The goal is to reduce power consumption.
+ // Unfortunately this does not work with the software AAC decoder.
+ // TODO optimize buffer size for power consumption
+ // The offload read buffer size is 32 KB but 24 KB uses less power.
+ const int kAudioBigBufferSizeBytes = 24 * 1024;
+ bool doBufferAggregation = (audio && mOffloadAudio);
+ sp<ABuffer> biggerBuffer;
+ bool needMoreData = false;
+ int numSmallBuffers = 0;
+ bool gotTime = false;
+
bool dropAccessUnit;
do {
- status_t err = mSource->dequeueAccessUnit(audio, &accessUnit);
+ status_t err;
+ // Did we save an accessUnit earlier because of a discontinuity?
+ if (audio && (mPendingAudioAccessUnit != NULL)) {
+ accessUnit = mPendingAudioAccessUnit;
+ mPendingAudioAccessUnit.clear();
+ err = mPendingAudioErr;
+ ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit");
+ } else {
+ err = mSource->dequeueAccessUnit(audio, &accessUnit);
+ }
if (err == -EWOULDBLOCK) {
- return err;
+ if (biggerBuffer == NULL) {
+ return err;
+ } else {
+ break; // Reply with data that we already have.
+ }
} else if (err != OK) {
if (err == INFO_DISCONTINUITY) {
+ if (biggerBuffer != NULL) {
+ // We already have some data so save this for later.
+ mPendingAudioErr = err;
+ mPendingAudioAccessUnit = accessUnit;
+ accessUnit.clear();
+ ALOGD("feedDecoderInputData() save discontinuity for later");
+ break;
+ }
int32_t type;
CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
@@ -1356,7 +1398,52 @@
dropAccessUnit = true;
++mNumFramesDropped;
}
- } while (dropAccessUnit);
+
+ size_t smallSize = accessUnit->size();
+ needMoreData = false;
+ if (doBufferAggregation && (biggerBuffer == NULL)
+ // Don't bother if only room for a few small buffers.
+ && (smallSize < (kAudioBigBufferSizeBytes / 3))) {
+ // Create a larger buffer for combining smaller buffers from the extractor.
+ biggerBuffer = new ABuffer(kAudioBigBufferSizeBytes);
+ biggerBuffer->setRange(0, 0); // start empty
+ }
+
+ if (biggerBuffer != NULL) {
+ int64_t timeUs;
+ bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
+ // Will the smaller buffer fit?
+ size_t bigSize = biggerBuffer->size();
+ size_t roomLeft = biggerBuffer->capacity() - bigSize;
+ // Should we save this small buffer for the next big buffer?
+ // If the first small buffer did not have a timestamp then save
+ // any buffer that does have a timestamp until the next big buffer.
+ if ((smallSize > roomLeft)
+ || (!gotTime && (numSmallBuffers > 0) && smallTimestampValid)) {
+ mPendingAudioErr = err;
+ mPendingAudioAccessUnit = accessUnit;
+ accessUnit.clear();
+ } else {
+ // Append small buffer to the bigger buffer.
+ memcpy(biggerBuffer->base() + bigSize, accessUnit->data(), smallSize);
+ bigSize += smallSize;
+ biggerBuffer->setRange(0, bigSize);
+
+ // Keep looping until we run out of room in the biggerBuffer.
+ needMoreData = true;
+
+ // Grab time from first small buffer if available.
+ if ((numSmallBuffers == 0) && smallTimestampValid) {
+ biggerBuffer->meta()->setInt64("timeUs", timeUs);
+ gotTime = true;
+ }
+
+ ALOGV("feedDecoderInputData() #%d, smallSize = %zu, bigSize = %zu, capacity = %zu",
+ numSmallBuffers, smallSize, bigSize, biggerBuffer->capacity());
+ numSmallBuffers++;
+ }
+ }
+ } while (dropAccessUnit || needMoreData);
// ALOGV("returned a valid buffer of %s data", audio ? "audio" : "video");
@@ -1372,7 +1459,13 @@
mCCDecoder->decode(accessUnit);
}
- reply->setBuffer("buffer", accessUnit);
+ if (biggerBuffer != NULL) {
+ ALOGV("feedDecoderInputData() reply with aggregated buffer, %d", numSmallBuffers);
+ reply->setBuffer("buffer", biggerBuffer);
+ } else {
+ reply->setBuffer("buffer", accessUnit);
+ }
+
reply->post();
return OK;
@@ -1739,6 +1832,9 @@
++mScanSourcesGeneration;
mScanSourcesPending = false;
+ ++mAudioDecoderGeneration;
+ ++mVideoDecoderGeneration;
+
if (mRendererLooper != NULL) {
if (mRenderer != NULL) {
mRendererLooper->unregisterHandler(mRenderer->id());
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 0c7f531..89ae11c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -158,6 +158,9 @@
// notion of time has changed.
bool mTimeDiscontinuityPending;
+ sp<ABuffer> mPendingAudioAccessUnit;
+ status_t mPendingAudioErr;
+
FlushStatus mFlushingAudio;
FlushStatus mFlushingVideo;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 09324ae..35cd514 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -627,11 +627,11 @@
case MEDIA_PLAYBACK_COMPLETE:
{
if (mLooping && mState != STATE_RESET_IN_PROGRESS) {
- mLock.unlock();
mPlayer->seekToAsync(0);
- mLock.lock();
break;
}
+ mPlayer->pause();
+ mState = STATE_PAUSED;
// fall through
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index aad6e93..067784b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -893,8 +893,10 @@
}
void NuPlayer::Renderer::onPause() {
- CHECK(!mPaused);
-
+ if (mPaused) {
+ ALOGW("Renderer::onPause() called while already paused!");
+ return;
+ }
{
Mutex::Autolock autoLock(mLock);
++mAudioQueueGeneration;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 19a5908..9b03b71 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2313,7 +2313,6 @@
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval;
- CHECK(ret > 1);
return ret;
}
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0064293..1729f93 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2810,7 +2810,6 @@
{
if (objectType == AOT_SBR || objectType == AOT_PS) {
- const int32_t extensionSamplingFrequency = br.getBits(4);
objectType = br.getBits(5);
if (objectType == AOT_ESCAPE) {
@@ -2828,9 +2827,30 @@
const int32_t coreCoderDelay = br.getBits(14);
}
- const int32_t extensionFlag = br.getBits(1);
+ int32_t extensionFlag = -1;
+ if (br.numBitsLeft() > 0) {
+ extensionFlag = br.getBits(1);
+ } else {
+ switch (objectType) {
+ // 14496-3 4.5.1.1 extensionFlag
+ case AOT_AAC_LC:
+ extensionFlag = 0;
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_SCAL:
+ case AOT_ER_BSAC:
+ case AOT_ER_AAC_LD:
+ extensionFlag = 1;
+ break;
+ default:
+ TRESPASS();
+ break;
+ }
+ ALOGW("csd missing extension flag; assuming %d for object type %u.",
+ extensionFlag, objectType);
+ }
- if (numChannels == 0 ) {
+ if (numChannels == 0) {
int32_t channelsEffectiveNum = 0;
int32_t channelsNum = 0;
const int32_t ElementInstanceTag = br.getBits(4);
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 78758da..a8806c8 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -994,7 +994,6 @@
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval - 1;
- CHECK(ret > 1);
return ret;
}
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index da50c56..1fdb244 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -338,7 +338,7 @@
status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
String16("TimedEventQueue"),
- String16("media"));
+ String16("media")); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
if (status == NO_ERROR) {
mWakeLockToken = binder;
@@ -363,7 +363,7 @@
CHECK(mWakeLockToken != 0);
if (mPowerManager != 0) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
}
mWakeLockToken.clear();
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 8b4dd6f..6dd9b92 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#define LOG_TAG "SoftAAC2"
//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAAC2"
#include <utils/Log.h>
#include "SoftAAC2.h"
@@ -68,7 +68,6 @@
mOutputBufferCount(0),
mSignalledError(false),
mLastInHeader(NULL),
- mCurrentInputTime(0),
mOutputPortSettingsChange(NONE) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
@@ -610,9 +609,24 @@
notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
return;
}
+
+ // insert buffer size and time stamp
+ mBufferSizes.add(inBufferLength[0]);
+ if (mLastInHeader != inHeader) {
+ mBufferTimestamps.add(inHeader->nTimeStamp);
+ mLastInHeader = inHeader;
+ } else {
+ int64_t currentTime = mBufferTimestamps.top();
+ currentTime += mStreamInfo->aacSamplesPerFrame *
+ 1000000ll / mStreamInfo->sampleRate;
+ mBufferTimestamps.add(currentTime);
+ }
} else {
inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
inBufferLength[0] = inHeader->nFilledLen;
+ mLastInHeader = inHeader;
+ mBufferTimestamps.add(inHeader->nTimeStamp);
+ mBufferSizes.add(inHeader->nFilledLen);
}
// Fill and decode
@@ -621,136 +635,130 @@
INT prevSampleRate = mStreamInfo->sampleRate;
INT prevNumChannels = mStreamInfo->numChannels;
- if (inHeader != mLastInHeader) {
- mLastInHeader = inHeader;
- mCurrentInputTime = inHeader->nTimeStamp;
- } else {
- if (mStreamInfo->sampleRate) {
- mCurrentInputTime += mStreamInfo->aacSamplesPerFrame *
- 1000000ll / mStreamInfo->sampleRate;
- } else {
- ALOGW("no sample rate yet");
- }
- }
- mAnchorTimes.add(mCurrentInputTime);
aacDecoder_Fill(mAACDecoder,
inBuffer,
inBufferLength,
bytesValid);
- // run DRC check
- mDrcWrap.submitStreamData(mStreamInfo);
- mDrcWrap.update();
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
- AAC_DECODER_ERROR decoderErr =
- aacDecoder_DecodeFrame(mAACDecoder,
- tmpOutBuffer,
- 2048 * MAX_CHANNEL_COUNT,
- 0 /* flags */);
+ UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
+ inHeader->nFilledLen -= inBufferUsedLength;
+ inHeader->nOffset += inBufferUsedLength;
- if (decoderErr != AAC_DEC_OK) {
- ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
- }
+ AAC_DECODER_ERROR decoderErr;
+ do {
+ int numconsumed = mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes;
+ decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ 0 /* flags */);
- if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
- ALOGE("AAC_DEC_NOT_ENOUGH_BITS should never happen");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
- }
-
- if (bytesValid[0] != 0) {
- ALOGE("bytesValid[0] != 0 should never happen");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
- }
-
- size_t numOutBytes =
- mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
-
- if (decoderErr == AAC_DEC_OK) {
- if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
- mStreamInfo->frameSize * mStreamInfo->numChannels)) {
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- return;
+ numconsumed = (mStreamInfo->numTotalBytes + mStreamInfo->numBadBytes) - numconsumed;
+ if (numconsumed != 0) {
+ mDecodedSizes.add(numconsumed);
}
- UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
- inHeader->nFilledLen -= inBufferUsedLength;
- inHeader->nOffset += inBufferUsedLength;
- } else {
- ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
- memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
+ if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+ break;
+ }
- if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
- mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
+
+ if (bytesValid[0] != 0) {
+ ALOGE("bytesValid[0] != 0 should never happen");
mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
return;
}
- // Discard input buffer.
- inHeader->nFilledLen = 0;
+ size_t numOutBytes =
+ mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
- aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
-
- // fall through
- }
-
- /*
- * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
- * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
- * rate system and the sampling rate in the final output is actually
- * doubled compared with the core AAC decoder sampling rate.
- *
- * Explicit signalling is done by explicitly defining SBR audio object
- * type in the bitstream. Implicit signalling is done by embedding
- * SBR content in AAC extension payload specific to SBR, and hence
- * requires an AAC decoder to perform pre-checks on actual audio frames.
- *
- * Thus, we could not say for sure whether a stream is
- * AAC+/eAAC+ until the first data frame is decoded.
- */
- if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1
- if (mStreamInfo->sampleRate != prevSampleRate ||
- mStreamInfo->numChannels != prevNumChannels) {
- ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
- prevSampleRate, mStreamInfo->sampleRate,
- prevNumChannels, mStreamInfo->numChannels);
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
-
- if (inHeader->nFilledLen == 0) {
- inInfo->mOwnedByUs = false;
- mInputBufferCount++;
- inQueue.erase(inQueue.begin());
- mLastInHeader = NULL;
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (decoderErr == AAC_DEC_OK) {
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
}
+ } else {
+ ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
+
+ memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
+
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+
+ // Discard input buffer.
+ inHeader->nFilledLen = 0;
+
+ aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+
+ // fall through
+ }
+
+ /*
+ * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
+ * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
+ * rate system and the sampling rate in the final output is actually
+ * doubled compared with the core AAC decoder sampling rate.
+ *
+ * Explicit signalling is done by explicitly defining SBR audio object
+ * type in the bitstream. Implicit signalling is done by embedding
+ * SBR content in AAC extension payload specific to SBR, and hence
+ * requires an AAC decoder to perform pre-checks on actual audio frames.
+ *
+ * Thus, we could not say for sure whether a stream is
+ * AAC+/eAAC+ until the first data frame is decoded.
+ */
+ if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1
+ if (mStreamInfo->sampleRate != prevSampleRate ||
+ mStreamInfo->numChannels != prevNumChannels) {
+ ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
+ prevSampleRate, mStreamInfo->sampleRate,
+ prevNumChannels, mStreamInfo->numChannels);
+
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ mInputBufferCount++;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
+ return;
+ }
+ } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
+ ALOGW("Invalid AAC stream");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
return;
}
- } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
- ALOGW("Invalid AAC stream");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- return;
- }
- if (inHeader->nFilledLen == 0) {
- inInfo->mOwnedByUs = false;
- mInputBufferCount++;
- inQueue.erase(inQueue.begin());
- mLastInHeader = NULL;
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
- } else {
- ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
- }
+ if (inHeader && inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ mInputBufferCount++;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ } else {
+ ALOGV("inHeader->nFilledLen = %d", inHeader ? inHeader->nFilledLen : 0);
+ }
+ } while (decoderErr == AAC_DEC_OK);
}
int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels;
@@ -809,8 +817,9 @@
INT_PCM *outBuffer =
reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
+ int samplesize = mStreamInfo->numChannels * sizeof(int16_t);
if (outHeader->nOffset
- + mStreamInfo->frameSize * mStreamInfo->numChannels * sizeof(int16_t)
+ + mStreamInfo->frameSize * samplesize
> outHeader->nAllocLen) {
ALOGE("buffer overflow");
mSignalledError = true;
@@ -818,17 +827,67 @@
return;
}
- int32_t ns = outputDelayRingBufferGetSamples(outBuffer,
- mStreamInfo->frameSize * mStreamInfo->numChannels); // TODO: check for overflow
- if (ns != mStreamInfo->frameSize * mStreamInfo->numChannels) {
- ALOGE("not a complete frame of samples available");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
+
+ int available = outputDelayRingBufferSamplesAvailable();
+ int numSamples = outHeader->nAllocLen / samplesize;
+ if (numSamples > available) {
+ numSamples = available;
+ }
+ int64_t currentTime = 0;
+ if (available) {
+
+ int numFrames = numSamples / (mStreamInfo->frameSize * mStreamInfo->numChannels);
+ numSamples = numFrames * (mStreamInfo->frameSize * mStreamInfo->numChannels);
+
+ ALOGV("%d samples available (%d), or %d frames",
+ numSamples, available, numFrames);
+ int64_t *nextTimeStamp = &mBufferTimestamps.editItemAt(0);
+ currentTime = *nextTimeStamp;
+ int32_t *currentBufLeft = &mBufferSizes.editItemAt(0);
+ for (int i = 0; i < numFrames; i++) {
+ int32_t decodedSize = mDecodedSizes.itemAt(0);
+ mDecodedSizes.removeAt(0);
+ ALOGV("decoded %d of %d", decodedSize, *currentBufLeft);
+ if (*currentBufLeft > decodedSize) {
+ // adjust/interpolate next time stamp
+ *currentBufLeft -= decodedSize;
+ *nextTimeStamp += mStreamInfo->aacSamplesPerFrame *
+ 1000000ll / mStreamInfo->sampleRate;
+ ALOGV("adjusted nextTimeStamp/size to %lld/%d",
+ *nextTimeStamp, *currentBufLeft);
+ } else {
+ // move to next timestamp in list
+ if (mBufferTimestamps.size() > 0) {
+ mBufferTimestamps.removeAt(0);
+ nextTimeStamp = &mBufferTimestamps.editItemAt(0);
+ mBufferSizes.removeAt(0);
+ currentBufLeft = &mBufferSizes.editItemAt(0);
+ ALOGV("moved to next time/size: %lld/%d",
+ *nextTimeStamp, *currentBufLeft);
+ }
+ // try to limit output buffer size to match input buffers
+ // (e.g when an input buffer contained 4 "sub" frames, output
+ // at most 4 decoded units in the corresponding output buffer)
+ // This is optional. Remove the next three lines to fill the output
+ // buffer with as many units as available.
+ numFrames = i + 1;
+ numSamples = numFrames * mStreamInfo->frameSize * mStreamInfo->numChannels;
+ break;
+ }
+ }
+
+ ALOGV("getting %d from ringbuffer", numSamples);
+ int32_t ns = outputDelayRingBufferGetSamples(outBuffer, numSamples);
+ if (ns != numSamples) {
+ ALOGE("not a complete frame of samples available");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
}
- outHeader->nFilledLen = mStreamInfo->frameSize * mStreamInfo->numChannels
- * sizeof(int16_t);
+ outHeader->nFilledLen = numSamples * sizeof(int16_t);
+
if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) {
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
mEndOfOutput = true;
@@ -836,13 +895,13 @@
outHeader->nFlags = 0;
}
- outHeader->nTimeStamp = mAnchorTimes.isEmpty() ? 0 : mAnchorTimes.itemAt(0);
- mAnchorTimes.removeAt(0);
+ outHeader->nTimeStamp = currentTime;
mOutputBufferCount++;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
+ ALOGV("out timestamp %lld / %d", outHeader->nTimeStamp, outHeader->nFilledLen);
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
@@ -877,8 +936,10 @@
outHeader->nFilledLen = 0;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- outHeader->nTimeStamp = mAnchorTimes.itemAt(0);
- mAnchorTimes.removeAt(0);
+ outHeader->nTimeStamp = mBufferTimestamps.itemAt(0);
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
mOutputBufferCount++;
outInfo->mOwnedByUs = false;
@@ -899,7 +960,9 @@
// depend on fragments from the last one decoded.
// drain all existing data
drainDecoder();
- mAnchorTimes.clear();
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
mLastInHeader = NULL;
} else {
while (outputDelayRingBufferSamplesAvailable() > 0) {
@@ -955,7 +1018,9 @@
mOutputDelayRingBufferReadPos = 0;
mEndOfInput = false;
mEndOfOutput = false;
- mAnchorTimes.clear();
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
mLastInHeader = NULL;
// To make the codec behave the same before and after a reset, we need to invalidate the
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 865bd15..9fcb598 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -59,8 +59,9 @@
size_t mOutputBufferCount;
bool mSignalledError;
OMX_BUFFERHEADERTYPE *mLastInHeader;
- int64_t mCurrentInputTime;
- Vector<int64_t> mAnchorTimes;
+ Vector<int32_t> mBufferSizes;
+ Vector<int32_t> mDecodedSizes;
+ Vector<int64_t> mBufferTimestamps;
CDrcPresModeWrapper mDrcWrap;
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 2d0a25f..7544052 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -593,10 +593,10 @@
status = BAD_VALUE;
break;
}
- status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->release_audio_patch(hwDevice, mPatches[index]->mHalHandle);
+ status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
}
} else {
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
@@ -632,7 +632,7 @@
}
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
AudioParameter param;
param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 942bff6..97b1753 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -662,12 +662,14 @@
binder,
getWakeLockTag(),
String16("media"),
- uid);
+ uid,
+ true /* FIXME force oneway contrary to .aidl */);
} else {
status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
getWakeLockTag(),
- String16("media"));
+ String16("media"),
+ true /* FIXME force oneway contrary to .aidl */);
}
if (status == NO_ERROR) {
mWakeLockToken = binder;
@@ -687,7 +689,8 @@
if (mWakeLockToken != 0) {
ALOGV("releaseWakeLock_l() %s", mName);
if (mPowerManager != 0) {
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0,
+ true /* FIXME force oneway contrary to .aidl */);
}
mWakeLockToken.clear();
}
@@ -723,7 +726,8 @@
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
status_t status;
- status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
+ status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
+ true /* FIXME force oneway contrary to .aidl */);
ALOGV("acquireWakeLock_l() %s status %d", mName, status);
}
}
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index a805923..6adcde4 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -485,7 +485,9 @@
// request to reuse existing output stream if one is already opened to reach the RX device
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(rxDevice, mOutputs);
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
@@ -524,7 +526,9 @@
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
// request to reuse existing output stream if one is already opened to reach the TX
// path output device
if (output != AUDIO_IO_HANDLE_NONE) {
@@ -1016,7 +1020,9 @@
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
- output = selectOutput(outputs, flags);
+ // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
+ flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+ output = selectOutput(outputs, flags, format);
}
ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
"format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
@@ -1027,7 +1033,8 @@
}
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
- audio_output_flags_t flags)
+ audio_output_flags_t flags,
+ audio_format_t format)
{
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevice()).
@@ -1050,6 +1057,17 @@
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
if (!outputDesc->isDuplicated()) {
+ // if a valid format is specified, skip output if not compatible
+ if (format != AUDIO_FORMAT_INVALID) {
+ if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if (format != outputDesc->mFormat) {
+ continue;
+ }
+ } else if (!audio_is_linear_pcm(format)) {
+ continue;
+ }
+ }
+
int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
if (commonFlags > maxCommonFlags) {
outputFlags = outputs[i];
@@ -2307,7 +2325,9 @@
mOutputs);
// if the sink device is reachable via an opened output stream, request to go via
// this output stream by adding a second source to the patch description
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isDuplicated()) {
@@ -4634,8 +4654,15 @@
// - one A2DP device + another device: happens with duplicated output. In this case
// retain the device on the A2DP output as the other must not correspond to an active
// selection if not the speaker.
+ // - HDMI-CEC system audio mode only output: give priority to available item in order.
if (device & AUDIO_DEVICE_OUT_SPEAKER) {
device = AUDIO_DEVICE_OUT_SPEAKER;
+ } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+ device = AUDIO_DEVICE_OUT_HDMI_ARC;
+ } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+ device = AUDIO_DEVICE_OUT_AUX_LINE;
+ } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+ device = AUDIO_DEVICE_OUT_SPDIF;
} else {
device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
}
@@ -6032,14 +6059,26 @@
return 0;
}
+ // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+ uint32_t samplingRate = UINT_MAX;
+ for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+ if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
+ samplingRate = mSamplingRates[i];
+ }
+ }
+ return (samplingRate == UINT_MAX) ? 0 : samplingRate;
+ }
+
uint32_t samplingRate = 0;
uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
// For mixed output and inputs, use max mixer sampling rates. Do not
// limit sampling rate otherwise
- if ((mType != AUDIO_PORT_TYPE_MIX) ||
- ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+ if (mType != AUDIO_PORT_TYPE_MIX) {
maxRate = UINT_MAX;
}
for (size_t i = 0; i < mSamplingRates.size(); i ++) {
@@ -6056,16 +6095,35 @@
if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
return AUDIO_CHANNEL_NONE;
}
-
audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
+
+ // For direct outputs, pick minimum channel count: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+ uint32_t channelCount = UINT_MAX;
+ for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+ uint32_t cnlCount;
+ if (mUseInChannelMask) {
+ cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+ } else {
+ cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+ }
+ if ((cnlCount < channelCount) && (cnlCount > 0)) {
+ channelMask = mChannelMasks[i];
+ channelCount = cnlCount;
+ }
+ }
+ return channelMask;
+ }
+
uint32_t channelCount = 0;
uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
// For mixed output and inputs, use max mixer channel count. Do not
// limit channel count otherwise
- if ((mType != AUDIO_PORT_TYPE_MIX) ||
- ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+ if (mType != AUDIO_PORT_TYPE_MIX) {
maxCount = UINT_MAX;
}
for (size_t i = 0; i < mChannelMasks.size(); i ++) {
@@ -6077,6 +6135,7 @@
}
if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
channelMask = mChannelMasks[i];
+ channelCount = cnlCount;
}
}
return channelMask;
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 57e015e..da0d95d 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -713,7 +713,8 @@
uint32_t delayMs);
audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
- audio_output_flags_t flags);
+ audio_output_flags_t flags,
+ audio_format_t format);
// samplingRate parameter is an in/out and so may be modified
sp<IOProfile> getInputProfile(audio_devices_t device,
uint32_t& samplingRate,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 40c8e5d..36a93b2 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1091,6 +1091,22 @@
res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
outputStreams);
+ // try to reconfigure jpeg to video size if configureStreams failed
+ if (res == BAD_VALUE) {
+
+ ALOGV("%s: Camera %d: configure still size to video size before recording"
+ , __FUNCTION__, mCameraId);
+ params.overrideJpegSizeByVideoSize();
+ res = updateProcessorStream(mJpegProcessor, params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't configure still image size to video size: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+ res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+ outputStreams);
+ }
+
if (res != OK) {
ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1130,6 +1146,7 @@
mCameraService->playSound(CameraService::SOUND_RECORDING);
+ l.mParameters.recoverOverriddenJpegSize();
res = startPreviewL(l.mParameters, true);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to return to preview",
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index cb9aca6..9849f4d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -445,11 +445,18 @@
if (mNewAEState) {
if (!mAeInPrecapture) {
// Waiting to see PRECAPTURE state
- if (mAETriggerId == mTriggerId &&
- mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
- ALOGV("%s: Got precapture start", __FUNCTION__);
- mAeInPrecapture = true;
- mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ if (mAETriggerId == mTriggerId) {
+ if (mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ ALOGV("%s: Got precapture start", __FUNCTION__);
+ mAeInPrecapture = true;
+ mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ } else if (mAEState == ANDROID_CONTROL_AE_STATE_CONVERGED ||
+ mAEState == ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED) {
+ // It is legal to transit to CONVERGED or FLASH_REQUIRED
+ // directly after a trigger.
+ ALOGV("%s: AE is already in good state, start capture", __FUNCTION__);
+ return STANDARD_CAPTURE;
+ }
}
} else {
// Waiting to see PRECAPTURE state end
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index e7f9a78..8d00590 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -249,6 +249,9 @@
// TODO: Pick maximum
pictureWidth = availableJpegSizes[0].width;
pictureHeight = availableJpegSizes[0].height;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureSizeOverriden = false;
params.setPictureSize(pictureWidth,
pictureHeight);
@@ -1381,8 +1384,8 @@
// PICTURE_SIZE
newParams.getPictureSize(&validatedParams.pictureWidth,
&validatedParams.pictureHeight);
- if (validatedParams.pictureWidth == pictureWidth ||
- validatedParams.pictureHeight == pictureHeight) {
+ if (validatedParams.pictureWidth != pictureWidth ||
+ validatedParams.pictureHeight != pictureHeight) {
Vector<Size> availablePictureSizes = getAvailableJpegSizes();
for (i = 0; i < availablePictureSizes.size(); i++) {
if ((availablePictureSizes[i].width ==
@@ -1798,6 +1801,7 @@
/** Update internal parameters */
*this = validatedParams;
+ updateOverriddenJpegSize();
/** Update external parameters calculated from the internal ones */
@@ -2115,6 +2119,52 @@
return OK;
}
+status_t Parameters::overrideJpegSizeByVideoSize() {
+ if (pictureSizeOverriden) {
+ ALOGV("Picture size has been overridden. Skip overriding");
+ return OK;
+ }
+
+ pictureSizeOverriden = true;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+ // This change of picture size is invisible to app layer.
+ // Do not update app visible params
+ return OK;
+}
+
+status_t Parameters::updateOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip checking");
+ return OK;
+ }
+
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+
+ if (pictureWidth <= videoWidth && pictureHeight <= videoHeight) {
+ // Picture size is now smaller than video size. No need to override anymore
+ return recoverOverriddenJpegSize();
+ }
+
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+
+ return OK;
+}
+
+status_t Parameters::recoverOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip recovering");
+ return OK;
+ }
+ pictureSizeOverriden = false;
+ pictureWidth = pictureWidthLastSet;
+ pictureHeight = pictureHeightLastSet;
+ return OK;
+}
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index d9d33c4..5e6e6ab 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -52,6 +52,9 @@
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
int pictureWidth, pictureHeight;
+ // Store the picture size before they are overriden by video snapshot
+ int pictureWidthLastSet, pictureHeightLastSet;
+ bool pictureSizeOverriden;
int32_t jpegThumbSize[2];
uint8_t jpegQuality, jpegThumbQuality;
@@ -253,6 +256,12 @@
// Add/update JPEG entries in metadata
status_t updateRequestJpeg(CameraMetadata *request) const;
+ /* Helper functions to override jpeg size for video snapshot */
+ // Override jpeg size by video size. Called during startRecording.
+ status_t overrideJpegSizeByVideoSize();
+ // Recover overridden jpeg size. Called during stopRecording.
+ status_t recoverOverriddenJpegSize();
+
// Calculate the crop region rectangle based on current stream sizes
struct CropRegion {
float left;
@@ -348,6 +357,12 @@
// Get max size (from the size array) that matches the given aspect ratio.
Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
+ // Helper function for overriding jpeg size for video snapshot
+ // Check if overridden jpeg size needs to be updated after Parameters::set.
+ // The behavior of this function is tailored to the implementation of Parameters::set.
+ // Do not use this function for other purpose.
+ status_t updateOverriddenJpegSize();
+
struct StreamConfiguration {
int32_t format;
int32_t width;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f78db5..fafe349 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -601,10 +601,18 @@
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
res = configureStreamsLocked();
+ // Stream configuration failed due to unsupported configuration.
+ // Device back to unconfigured state. Client might try other configuraitons
+ if (res == BAD_VALUE && mStatus == STATUS_UNCONFIGURED) {
+ CLOGE("No streams configured");
+ return NULL;
+ }
+ // Stream configuration failed for other reason. Fatal.
if (res != OK) {
SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res);
return NULL;
}
+ // Stream configuration successfully configure to empty stream configuration.
if (mStatus == STATUS_UNCONFIGURED) {
CLOGE("No streams configured");
return NULL;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 29ce38c..3c0e908 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -233,8 +233,7 @@
camera3_stream::usage = oldUsage;
camera3_stream::max_buffers = oldMaxBuffers;
- mState = STATE_CONSTRUCTED;
-
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
return OK;
}