Merge "AudioFlinger: fix repeated underruns for compressed audio" into mnc-dev
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index ffe974b..d2dc200 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -48,7 +48,8 @@
GET_CAMERA_INFO,
WAIT_UNTIL_IDLE,
FLUSH,
- PREPARE
+ PREPARE,
+ TEAR_DOWN
};
namespace {
@@ -365,6 +366,20 @@
return reply.readInt32();
}
+ virtual status_t tearDown(int streamId)
+ {
+ ALOGV("tearDown");
+ Parcel data, reply;
+
+ data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+ data.writeInt32(streamId);
+
+ remote()->transact(TEAR_DOWN, data, &reply);
+
+ reply.readExceptionCode();
+ return reply.readInt32();
+ }
+
private:
@@ -570,6 +585,13 @@
reply->writeInt32(prepare(streamId));
return NO_ERROR;
} break;
+ case TEAR_DOWN: {
+ CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+ int streamId = data.readInt32();
+ reply->writeNoException();
+ reply->writeInt32(tearDown(streamId));
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index b3dd140..a7bf8ab 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -138,6 +138,12 @@
* Preallocate buffers for a given output stream asynchronously.
*/
virtual status_t prepare(int streamId) = 0;
+
+ /**
+ * Free all unused buffers for a given output stream.
+ */
+ virtual status_t tearDown(int streamId) = 0;
+
};
// ----------------------------------------------------------------------------
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 50cf371..3074910 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -37,7 +37,8 @@
audio_source_t inputSource,
const String16 &opPackageName,
uint32_t sampleRate,
- uint32_t channels = 1);
+ uint32_t channels,
+ uint32_t outSampleRate = 0);
status_t initCheck() const;
@@ -78,11 +79,13 @@
status_t mInitCheck;
bool mStarted;
int32_t mSampleRate;
+ int32_t mOutSampleRate;
bool mTrackMaxAmplitude;
int64_t mStartTimeUs;
int16_t mMaxAmplitude;
int64_t mPrevSampleTimeUs;
+ int64_t mFirstSampleTimeUs;
int64_t mInitialReadTimeUs;
int64_t mNumFramesReceived;
int64_t mNumClientOwnedBuffers;
diff --git a/include/media/stagefright/MediaSync.h b/include/media/stagefright/MediaSync.h
index 1eef211..4b5cd05 100644
--- a/include/media/stagefright/MediaSync.h
+++ b/include/media/stagefright/MediaSync.h
@@ -37,6 +37,7 @@
class IGraphicBufferConsumer;
class IGraphicBufferProducer;
struct MediaClock;
+struct VideoFrameScheduler;
// MediaSync manages media playback and its synchronization to a media clock
// source. It can be also used for video-only playback.
@@ -103,6 +104,9 @@
// MediaClock::getMediaTime() and MediaClock::getRealTimeFor().
sp<const MediaClock> getMediaClock();
+ // Flush mediasync
+ void flush();
+
// Set the video frame rate hint - this is used by the video FrameScheduler
status_t setVideoFrameRateHint(float rate);
@@ -131,11 +135,10 @@
private:
enum {
- kWhatDrainVideo = 'dVid',
+ kWhatDrainVideo = 'dVid',
+ kWhatCheckFrameAvailable = 'cFrA',
};
- static const int MAX_OUTSTANDING_BUFFERS = 2;
-
// This is a thin wrapper class that lets us listen to
// IConsumerListener::onFrameAvailable from mInput.
class InputListener : public BnConsumerListener,
@@ -194,6 +197,8 @@
sp<IGraphicBufferConsumer> mInput;
sp<IGraphicBufferProducer> mOutput;
int mUsageFlagsFromOutput;
+ uint32_t mMaxAcquiredBufferCount; // max acquired buffer count
+ bool mReturnPendingInputFrame; // set while we are pending before acquiring an input frame
sp<AudioTrack> mAudioTrack;
uint32_t mNativeSampleRateInHz;
@@ -202,6 +207,7 @@
int64_t mNextBufferItemMediaUs;
List<BufferItem> mBufferItems;
+ sp<VideoFrameScheduler> mFrameScheduler;
// Keep track of buffers received from |mInput|. This is needed because
// it's possible the consumer of |mOutput| could return a different
@@ -242,8 +248,9 @@
// onBufferReleasedByOutput releases a buffer back to the input.
void onFrameAvailableFromInput();
- // Send |bufferItem| to the output for rendering.
- void renderOneBufferItem_l(const BufferItem &bufferItem);
+ // Send |bufferItem| to the output for rendering. If this is not the only
+ // buffer sent for rendering, check for any dropped frames in |checkInUs| us.
+ void renderOneBufferItem_l(const BufferItem &bufferItem, int64_t checkInUs);
// This implements the onBufferReleased callback from IProducerListener.
// It gets called from an OutputListener.
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/include/media/stagefright/VideoFrameScheduler.h
similarity index 93%
rename from media/libmediaplayerservice/VideoFrameScheduler.h
rename to include/media/stagefright/VideoFrameScheduler.h
index b1765c9..9d97dfd 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.h
+++ b/include/media/stagefright/VideoFrameScheduler.h
@@ -39,6 +39,9 @@
// returns the vsync period for the main display
nsecs_t getVsyncPeriod();
+ // returns the current frames-per-second, or 0.f if not primed
+ float getFrameRate();
+
void release();
static const size_t kHistorySize = 8;
@@ -54,8 +57,9 @@
void reset(float fps = -1);
// keep current estimate, but restart phase
void restart();
- // returns period
+ // returns period or 0 if not yet primed
nsecs_t addSample(nsecs_t time);
+ nsecs_t getPeriod() const;
private:
nsecs_t mPeriod;
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 1d7aed2..6a51a76 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -374,6 +374,9 @@
size_t increment = mFrameCountP2 << 1;
size_t mask = increment - 1;
audio_track_cblk_t* cblk = mCblk;
+ // mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
+ // Should newFlush = cblk->u.mStreaming.mRear? Only problem is
+ // if you want to flush twice to the same rear location after a 32 bit wrap.
int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
((cblk->u.mStreaming.mFlush & ~mask) + increment);
android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
@@ -613,9 +616,18 @@
front = cblk->u.mStreaming.mFront;
if (flush != mFlush) {
// effectively obtain then release whatever is in the buffer
- size_t mask = (mFrameCountP2 << 1) - 1;
+ const size_t overflowBit = mFrameCountP2 << 1;
+ const size_t mask = overflowBit - 1;
int32_t newFront = (front & ~mask) | (flush & mask);
ssize_t filled = rear - newFront;
+ if (filled >= (ssize_t)overflowBit) {
+ // front and rear offsets span the overflow bit of the p2 mask
+ // so rebasing newFront on the front offset is off by the overflow bit.
+ // adjust newFront to match rear offset.
+ ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+ newFront += overflowBit;
+ filled -= overflowBit;
+ }
// Rather than shutting down on a corrupt flush, just treat it as a full flush
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 7f0cca2..4d1b587 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -21,7 +21,6 @@
StagefrightPlayer.cpp \
StagefrightRecorder.cpp \
TestPlayerStub.cpp \
- VideoFrameScheduler.cpp \
LOCAL_SHARED_LIBRARIES := \
libbinder \
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 98abe9c..e521fae 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -75,8 +75,6 @@
mOutputFd(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mCaptureTimeLapse(false),
- mCaptureFps(0.0f),
mStarted(false) {
ALOGV("Constructor");
@@ -567,32 +565,32 @@
return OK;
}
-status_t StagefrightRecorder::setParamTimeLapseEnable(int32_t timeLapseEnable) {
- ALOGV("setParamTimeLapseEnable: %d", timeLapseEnable);
+status_t StagefrightRecorder::setParamCaptureFpsEnable(int32_t captureFpsEnable) {
+ ALOGV("setParamCaptureFpsEnable: %d", captureFpsEnable);
- if(timeLapseEnable == 0) {
- mCaptureTimeLapse = false;
- } else if (timeLapseEnable == 1) {
- mCaptureTimeLapse = true;
+ if(captureFpsEnable == 0) {
+ mCaptureFpsEnable = false;
+ } else if (captureFpsEnable == 1) {
+ mCaptureFpsEnable = true;
} else {
return BAD_VALUE;
}
return OK;
}
-status_t StagefrightRecorder::setParamTimeLapseFps(float fps) {
- ALOGV("setParamTimeLapseFps: %.2f", fps);
+status_t StagefrightRecorder::setParamCaptureFps(float fps) {
+ ALOGV("setParamCaptureFps: %.2f", fps);
int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
// Not allowing time more than a day
if (timeUs <= 0 || timeUs > 86400*1E6) {
- ALOGE("Time between time lapse frame capture (%lld) is out of range [0, 1 Day]", timeUs);
+ ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", timeUs);
return BAD_VALUE;
}
mCaptureFps = fps;
- mTimeBetweenTimeLapseFrameCaptureUs = timeUs;
+ mTimeBetweenCaptureUs = timeUs;
return OK;
}
@@ -715,14 +713,14 @@
return setParamVideoTimeScale(timeScale);
}
} else if (key == "time-lapse-enable") {
- int32_t timeLapseEnable;
- if (safe_strtoi32(value.string(), &timeLapseEnable)) {
- return setParamTimeLapseEnable(timeLapseEnable);
+ int32_t captureFpsEnable;
+ if (safe_strtoi32(value.string(), &captureFpsEnable)) {
+ return setParamCaptureFpsEnable(captureFpsEnable);
}
} else if (key == "time-lapse-fps") {
float fps;
if (safe_strtof(value.string(), &fps)) {
- return setParamTimeLapseFps(fps);
+ return setParamCaptureFps(fps);
}
} else {
ALOGE("setParameter: failed to find key %s", key.string());
@@ -910,12 +908,32 @@
}
sp<MediaSource> StagefrightRecorder::createAudioSource() {
+ int32_t sourceSampleRate = mSampleRate;
+
+ if (mCaptureFpsEnable && mCaptureFps >= mFrameRate) {
+ // Upscale the sample rate for slow motion recording.
+ // Fail audio source creation if source sample rate is too high, as it could
+ // cause out-of-memory due to large input buffer size. And audio recording
+ // probably doesn't make sense in the scenario, since the slow-down factor
+ // is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
+ const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+ sourceSampleRate =
+ (mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
+ if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+ ALOGE("source sample rate out of range! "
+ "(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
+ mSampleRate, mCaptureFps, mFrameRate);
+ return NULL;
+ }
+ }
+
sp<AudioSource> audioSource =
new AudioSource(
mAudioSource,
mOpPackageName,
- mSampleRate,
- mAudioChannels);
+ sourceSampleRate,
+ mAudioChannels,
+ mSampleRate);
status_t err = audioSource->initCheck();
@@ -1207,7 +1225,7 @@
mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
false /* decoder */, true /* hwCodec */, &codecs);
- if (!mCaptureTimeLapse) {
+ if (!mCaptureFpsEnable) {
// Dont clip for time lapse capture as encoder will have enough
// time to encode because of slow capture rate of time lapse.
clipVideoBitRate();
@@ -1420,17 +1438,17 @@
Size videoSize;
videoSize.width = mVideoWidth;
videoSize.height = mVideoHeight;
- if (mCaptureTimeLapse) {
- if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
+ if (mCaptureFpsEnable) {
+ if (mTimeBetweenCaptureUs < 0) {
ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
- mTimeBetweenTimeLapseFrameCaptureUs);
+ mTimeBetweenCaptureUs);
return BAD_VALUE;
}
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenTimeLapseFrameCaptureUs);
+ mTimeBetweenCaptureUs);
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
@@ -1521,14 +1539,13 @@
format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
// set up time lapse/slow motion for surface source
- if (mCaptureTimeLapse) {
- if (mTimeBetweenTimeLapseFrameCaptureUs <= 0) {
- ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
- mTimeBetweenTimeLapseFrameCaptureUs);
+ if (mCaptureFpsEnable) {
+ if (mTimeBetweenCaptureUs <= 0) {
+ ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
+ mTimeBetweenCaptureUs);
return BAD_VALUE;
}
- format->setInt64("time-lapse",
- mTimeBetweenTimeLapseFrameCaptureUs);
+ format->setInt64("time-lapse", mTimeBetweenCaptureUs);
}
}
@@ -1547,7 +1564,7 @@
}
format->setInt32("priority", 0 /* realtime */);
- if (mCaptureTimeLapse) {
+ if (mCaptureFpsEnable) {
format->setFloat("operating-rate", mCaptureFps);
}
@@ -1647,13 +1664,15 @@
// This help make sure that the "recoding" sound is suppressed for
// camcorder applications in the recorded files.
// TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
- if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
+ // disable audio for time lapse recording
+ bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
+ if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
err = setupAudioEncoder(writer);
if (err != OK) return err;
mTotalBitRate += mAudioBitRate;
}
- if (mCaptureTimeLapse) {
+ if (mCaptureFpsEnable) {
mp4writer->setCaptureRate(mCaptureFps);
}
@@ -1734,7 +1753,7 @@
ALOGV("stop");
status_t err = OK;
- if (mCaptureTimeLapse && mCameraSourceTimeLapse != NULL) {
+ if (mCaptureFpsEnable && mCameraSourceTimeLapse != NULL) {
mCameraSourceTimeLapse->startQuickReadReturns();
mCameraSourceTimeLapse = NULL;
}
@@ -1809,8 +1828,9 @@
mMaxFileDurationUs = 0;
mMaxFileSizeBytes = 0;
mTrackEveryTimeDurationUs = 0;
- mCaptureTimeLapse = false;
- mTimeBetweenTimeLapseFrameCaptureUs = -1;
+ mCaptureFpsEnable = false;
+ mCaptureFps = 0.0f;
+ mTimeBetweenCaptureUs = -1;
mCameraSourceTimeLapse = NULL;
mIsMetaDataStoredInVideoBuffers = false;
mEncoderProfiles = MediaProfiles::getInstance();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 8af9278..da00bc7 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -111,12 +111,11 @@
int32_t mStartTimeOffsetMs;
int32_t mTotalBitRate;
- bool mCaptureTimeLapse;
+ bool mCaptureFpsEnable;
float mCaptureFps;
- int64_t mTimeBetweenTimeLapseFrameCaptureUs;
+ int64_t mTimeBetweenCaptureUs;
sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
-
String8 mParams;
bool mIsMetaDataStoredInVideoBuffers;
@@ -157,8 +156,8 @@
status_t setParamAudioNumberOfChannels(int32_t channles);
status_t setParamAudioSamplingRate(int32_t sampleRate);
status_t setParamAudioTimeScale(int32_t timeScale);
- status_t setParamTimeLapseEnable(int32_t timeLapseEnable);
- status_t setParamTimeLapseFps(float fps);
+ status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
+ status_t setParamCaptureFps(float fps);
status_t setParamVideoEncodingBitRate(int32_t bitRate);
status_t setParamVideoIFramesInterval(int32_t seconds);
status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 13a7d94..767417b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -29,8 +29,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
-
-#include <VideoFrameScheduler.h>
+#include <media/stagefright/VideoFrameScheduler.h>
#include <inttypes.h>
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 5210fc8..58ff113 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -138,7 +138,9 @@
}
void NuPlayer::RTSPSource::resume() {
- mHandler->resume();
+ if (mHandler != NULL) {
+ mHandler->resume();
+ }
}
status_t NuPlayer::RTSPSource::feedMoreTSData() {
@@ -295,13 +297,19 @@
sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
msg->setInt32("generation", ++mSeekGeneration);
msg->setInt64("timeUs", seekTimeUs);
- msg->post(200000ll);
- return OK;
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
}
void NuPlayer::RTSPSource::performSeek(int64_t seekTimeUs) {
if (mState != CONNECTED) {
+ finishSeek(INVALID_OPERATION);
return;
}
@@ -320,9 +328,11 @@
} else if (msg->what() == kWhatPerformSeek) {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
+ CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
if (generation != mSeekGeneration) {
// obsolete.
+ finishSeek(OK);
return;
}
@@ -368,6 +378,37 @@
case MyHandler::kWhatSeekDone:
{
mState = CONNECTED;
+ if (mSeekReplyID != NULL) {
+ // Unblock seekTo here in case we attempted to seek in a live stream
+ finishSeek(OK);
+ }
+ break;
+ }
+
+ case MyHandler::kWhatSeekPaused:
+ {
+ sp<AnotherPacketSource> source = getSource(true /* audio */);
+ if (source != NULL) {
+ source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+ /* extra */ NULL,
+ /* discard */ true);
+ }
+ source = getSource(false /* video */);
+ if (source != NULL) {
+ source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+ /* extra */ NULL,
+ /* discard */ true);
+ };
+
+ status_t err = OK;
+ msg->findInt32("err", &err);
+ finishSeek(err);
+
+ if (err == OK) {
+ int64_t timeUs;
+ CHECK(msg->findInt64("time", &timeUs));
+ mHandler->continueSeekAfterPause(timeUs);
+ }
break;
}
@@ -700,5 +741,12 @@
return true;
}
+void NuPlayer::RTSPSource::finishSeek(status_t err) {
+ CHECK(mSeekReplyID != NULL);
+ sp<AMessage> seekReply = new AMessage;
+ seekReply->setInt32("err", err);
+ seekReply->postReply(mSeekReplyID);
+ mSeekReplyID = NULL;
+}
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 5f2cf33..6438a1e 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -116,6 +116,8 @@
int64_t mEOSTimeoutAudio;
int64_t mEOSTimeoutVideo;
+ sp<AReplyToken> mSeekReplyID;
+
sp<AnotherPacketSource> getSource(bool audio);
void onConnected();
@@ -131,6 +133,7 @@
void setError(status_t err);
void startBufferingIfNecessary();
bool stopBufferingIfNecessary();
+ void finishSeek(status_t err);
DISALLOW_EVIL_CONSTRUCTORS(RTSPSource);
};
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 69128bd..b86c749 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -64,6 +64,7 @@
TimedEventQueue.cpp \
Utils.cpp \
VBRISeeker.cpp \
+ VideoFrameScheduler.cpp \
WAVExtractor.cpp \
WVMExtractor.cpp \
XINGSeeker.cpp \
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 34f0148..3505844 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -50,15 +50,19 @@
}
AudioSource::AudioSource(
- audio_source_t inputSource, const String16 &opPackageName, uint32_t sampleRate,
- uint32_t channelCount)
+ audio_source_t inputSource, const String16 &opPackageName,
+ uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate)
: mStarted(false),
mSampleRate(sampleRate),
+ mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
mPrevSampleTimeUs(0),
+ mFirstSampleTimeUs(-1ll),
mNumFramesReceived(0),
mNumClientOwnedBuffers(0) {
- ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
+ ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
+ sampleRate, outSampleRate, channelCount);
CHECK(channelCount == 1 || channelCount == 2);
+ CHECK(sampleRate > 0);
size_t minFrameCount;
status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
@@ -261,6 +265,15 @@
(int16_t *) buffer->data(), buffer->range_length() >> 1);
}
+ if (mSampleRate != mOutSampleRate) {
+ if (mFirstSampleTimeUs < 0) {
+ mFirstSampleTimeUs = timeUs;
+ }
+ timeUs = mFirstSampleTimeUs + (timeUs - mFirstSampleTimeUs)
+ * (int64_t)mSampleRate / (int64_t)mOutSampleRate;
+ buffer->meta_data()->setInt64(kKeyTime, timeUs);
+ }
+
*out = buffer;
return OK;
}
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 52077a7..0df3ec9 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -25,6 +25,7 @@
#include <media/AudioTrack.h>
#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MediaSync.h>
+#include <media/stagefright/VideoFrameScheduler.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -50,6 +51,8 @@
mReleaseCondition(),
mNumOutstandingBuffers(0),
mUsageFlagsFromOutput(0),
+ mMaxAcquiredBufferCount(1),
+ mReturnPendingInputFrame(false),
mNativeSampleRateInHz(0),
mNumFramesWritten(0),
mHasAudio(false),
@@ -121,6 +124,11 @@
ALOGE("setSurface: failed to connect (%d)", status);
return status;
}
+
+ if (mFrameScheduler == NULL) {
+ mFrameScheduler = new VideoFrameScheduler();
+ mFrameScheduler->init();
+ }
}
if (mOutput != NULL) {
@@ -209,6 +217,12 @@
bufferConsumer->setConsumerUsageBits(mUsageFlagsFromOutput);
*outBufferProducer = bufferProducer;
mInput = bufferConsumer;
+
+ // set undequeued buffer count
+ int minUndequeuedBuffers;
+ mOutput->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
+ mMaxAcquiredBufferCount = minUndequeuedBuffers;
+ bufferConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
}
return status;
}
@@ -232,6 +246,7 @@
mNextBufferItemMediaUs = -1;
}
mPlaybackRate = rate;
+ // TODO: update frame scheduler with this info
mMediaClock->setPlaybackRate(rate);
onDrainVideo_l();
}
@@ -325,13 +340,44 @@
mInput->setConsumerName(String8(name.c_str()));
}
+void MediaSync::flush() {
+ Mutex::Autolock lock(mMutex);
+ if (mFrameScheduler != NULL) {
+ mFrameScheduler->restart();
+ }
+ while (!mBufferItems.empty()) {
+ BufferItem *bufferItem = &*mBufferItems.begin();
+ returnBufferToInput_l(bufferItem->mGraphicBuffer, bufferItem->mFence);
+ mBufferItems.erase(mBufferItems.begin());
+ }
+ mNextBufferItemMediaUs = -1;
+ mNumFramesWritten = 0;
+ mReturnPendingInputFrame = true;
+ mReleaseCondition.signal();
+ mMediaClock->clearAnchor();
+}
+
status_t MediaSync::setVideoFrameRateHint(float rate) {
- // ignored until we add the FrameScheduler
- return rate >= 0.f ? OK : BAD_VALUE;
+ Mutex::Autolock lock(mMutex);
+ if (rate < 0.f) {
+ return BAD_VALUE;
+ }
+ if (mFrameScheduler != NULL) {
+ mFrameScheduler->init(rate);
+ }
+ return OK;
}
float MediaSync::getVideoFrameRate() {
- // we don't know the frame rate
+ Mutex::Autolock lock(mMutex);
+ if (mFrameScheduler != NULL) {
+ float fps = mFrameScheduler->getFrameRate();
+ if (fps > 0.f) {
+ return fps;
+ }
+ }
+
+ // we don't have or know the frame rate
return -1.f;
}
@@ -470,7 +516,7 @@
CHECK_EQ(res, (status_t)OK);
numFramesPlayedAt = nowUs;
numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
- //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt);
+ //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
}
//can't be negative until 12.4 hrs, test.
@@ -510,18 +556,30 @@
int64_t itemMediaUs = bufferItem->mTimestamp / 1000;
int64_t itemRealUs = getRealTime(itemMediaUs, nowUs);
- if (itemRealUs <= nowUs) {
+ // adjust video frame PTS based on vsync
+ itemRealUs = mFrameScheduler->schedule(itemRealUs * 1000) / 1000;
+ int64_t oneVsyncUs = (mFrameScheduler->getVsyncPeriod() / 1000);
+ int64_t twoVsyncsUs = oneVsyncUs * 2;
+
+ // post 2 display refreshes before rendering is due
+ if (itemRealUs <= nowUs + twoVsyncsUs) {
+ ALOGV("adjusting PTS from %lld to %lld",
+ (long long)bufferItem->mTimestamp / 1000, (long long)itemRealUs);
+ bufferItem->mTimestamp = itemRealUs * 1000;
+ bufferItem->mIsAutoTimestamp = false;
+
if (mHasAudio) {
if (nowUs - itemRealUs <= kMaxAllowedVideoLateTimeUs) {
- renderOneBufferItem_l(*bufferItem);
+ renderOneBufferItem_l(*bufferItem, nowUs + oneVsyncUs - itemRealUs);
} else {
// too late.
returnBufferToInput_l(
bufferItem->mGraphicBuffer, bufferItem->mFence);
+ mFrameScheduler->restart();
}
} else {
// always render video buffer in video-only mode.
- renderOneBufferItem_l(*bufferItem);
+ renderOneBufferItem_l(*bufferItem, nowUs + oneVsyncUs - itemRealUs);
// smooth out videos >= 10fps
mMediaClock->updateAnchor(
@@ -534,7 +592,7 @@
if (mNextBufferItemMediaUs == -1
|| mNextBufferItemMediaUs > itemMediaUs) {
sp<AMessage> msg = new AMessage(kWhatDrainVideo, this);
- msg->post(itemRealUs - nowUs);
+ msg->post(itemRealUs - nowUs - twoVsyncsUs);
mNextBufferItemMediaUs = itemMediaUs;
}
break;
@@ -545,10 +603,18 @@
void MediaSync::onFrameAvailableFromInput() {
Mutex::Autolock lock(mMutex);
+ const static nsecs_t kAcquireWaitTimeout = 2000000000; // 2 seconds
+
+ mReturnPendingInputFrame = false;
+
// If there are too many outstanding buffers, wait until a buffer is
// released back to the input in onBufferReleased.
- while (mNumOutstandingBuffers >= MAX_OUTSTANDING_BUFFERS) {
- mReleaseCondition.wait(mMutex);
+ // NOTE: BufferQueue allows dequeuing maxAcquiredBufferCount + 1 buffers
+ while (mNumOutstandingBuffers > mMaxAcquiredBufferCount
+ && !mIsAbandoned && !mReturnPendingInputFrame) {
+ if (mReleaseCondition.waitRelative(mMutex, kAcquireWaitTimeout) != OK) {
+ ALOGI("still waiting to release a buffer before acquire");
+ }
// If the sync is abandoned while we are waiting, the release
// condition variable will be broadcast, and we should just return
@@ -582,12 +648,21 @@
if (mBuffersFromInput.indexOfKey(bufferItem.mGraphicBuffer->getId()) >= 0) {
// Something is wrong since this buffer should be at our hands, bail.
+ ALOGE("received buffer multiple times from input");
mInput->consumerDisconnect();
onAbandoned_l(true /* isInput */);
return;
}
mBuffersFromInput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
+ // If flush happened while waiting for a buffer to be released, simply return it
+ // TRICKY: do it here after it is detached so that we don't have to cache mGraphicBuffer.
+ if (mReturnPendingInputFrame) {
+ mReturnPendingInputFrame = false;
+ returnBufferToInput_l(bufferItem.mGraphicBuffer, bufferItem.mFence);
+ return;
+ }
+
mBufferItems.push_back(bufferItem);
if (mBufferItems.size() == 1) {
@@ -595,7 +670,7 @@
}
}
-void MediaSync::renderOneBufferItem_l( const BufferItem &bufferItem) {
+void MediaSync::renderOneBufferItem_l(const BufferItem &bufferItem, int64_t checkInUs) {
IGraphicBufferProducer::QueueBufferInput queueInput(
bufferItem.mTimestamp,
bufferItem.mIsAutoTimestamp,
@@ -635,6 +710,12 @@
mBuffersSentToOutput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
ALOGV("queued buffer %#llx to output", (long long)bufferItem.mGraphicBuffer->getId());
+
+ // If we have already queued more than one buffer, check for any free buffers in case
+ // one of them were dropped - as BQ does not signal onBufferReleased in that case.
+ if (mBuffersSentToOutput.size() > 1) {
+ (new AMessage(kWhatCheckFrameAvailable, this))->post(checkInUs);
+ }
}
void MediaSync::onBufferReleasedByOutput(sp<IGraphicBufferProducer> &output) {
@@ -646,32 +727,38 @@
sp<GraphicBuffer> buffer;
sp<Fence> fence;
- status_t status = mOutput->detachNextBuffer(&buffer, &fence);
- ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
+ status_t status;
+ // NOTE: This is a workaround for a BufferQueue bug where onBufferReleased is
+ // called only for released buffers, but not for buffers that were dropped during
+ // acquire. Dropped buffers can still be detached as they are on the free list.
+ // TODO: remove if released callback happens also for dropped buffers
+ while ((status = mOutput->detachNextBuffer(&buffer, &fence)) != NO_MEMORY) {
+ ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
- if (status == NO_INIT) {
- // If the output has been abandoned, we can't do anything else,
- // since buffer is invalid.
- onAbandoned_l(false /* isInput */);
- return;
+ if (status == NO_INIT) {
+ // If the output has been abandoned, we can't do anything else,
+ // since buffer is invalid.
+ onAbandoned_l(false /* isInput */);
+ return;
+ }
+
+ ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
+
+ // If we've been abandoned, we can't return the buffer to the input, so just
+ // move on.
+ if (mIsAbandoned) {
+ return;
+ }
+
+ ssize_t ix = mBuffersSentToOutput.indexOfKey(buffer->getId());
+ if (ix < 0) {
+ // The buffer is unknown, maybe leftover, ignore.
+ return;
+ }
+ mBuffersSentToOutput.removeItemsAt(ix);
+
+ returnBufferToInput_l(buffer, fence);
}
-
- ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
-
- // If we've been abandoned, we can't return the buffer to the input, so just
- // move on.
- if (mIsAbandoned) {
- return;
- }
-
- ssize_t ix = mBuffersSentToOutput.indexOfKey(buffer->getId());
- if (ix < 0) {
- // The buffer is unknown, maybe leftover, ignore.
- return;
- }
- mBuffersSentToOutput.removeItemsAt(ix);
-
- returnBufferToInput_l(buffer, fence);
}
void MediaSync::returnBufferToInput_l(
@@ -679,6 +766,7 @@
ssize_t ix = mBuffersFromInput.indexOfKey(buffer->getId());
if (ix < 0) {
// The buffer is unknown, something is wrong, bail.
+ ALOGE("output returned unknown buffer");
mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
onAbandoned_l(false /* isInput */);
return;
@@ -741,6 +829,12 @@
break;
}
+ case kWhatCheckFrameAvailable:
+ {
+ onBufferReleasedByOutput(mOutput);
+ break;
+ }
+
default:
TRESPASS();
break;
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
similarity index 96%
rename from media/libmediaplayerservice/VideoFrameScheduler.cpp
rename to media/libstagefright/VideoFrameScheduler.cpp
index ce5f5fe..5fe9bf9 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -28,8 +28,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AUtils.h>
-
-#include "VideoFrameScheduler.h"
+#include <media/stagefright/VideoFrameScheduler.h>
namespace android {
@@ -56,7 +55,7 @@
static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
static const size_t kPrecision = 12;
-static const size_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
static const int64_t kMultiplesThresholdDiv = 4; // 25%
static const int64_t kReFitThresholdDiv = 100; // 1%
static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s; // 1 sec
@@ -258,7 +257,8 @@
mPhase = firstTime;
}
}
- ALOGV("priming[%zu] phase:%lld period:%lld", numSamplesToUse, mPhase, mPeriod);
+ ALOGV("priming[%zu] phase:%lld period:%lld",
+ numSamplesToUse, (long long)mPhase, (long long)mPeriod);
}
nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
@@ -316,6 +316,10 @@
return mPeriod;
}
+nsecs_t VideoFrameScheduler::PLL::getPeriod() const {
+ return mPrimed ? mPeriod : 0;
+}
+
/* ======================================================================= */
/* Frame Scheduler */
/* ======================================================================= */
@@ -382,6 +386,14 @@
return kDefaultVsyncPeriod;
}
+float VideoFrameScheduler::getFrameRate() {
+ nsecs_t videoPeriod = mPll.getPeriod();
+ if (videoPeriod > 0) {
+ return 1e9 / videoPeriod;
+ }
+ return 0.f;
+}
+
nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
nsecs_t origRenderTime = renderTime;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index e64a7a1..0d0baf3 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -98,6 +98,7 @@
enum {
kWhatConnected = 'conn',
kWhatDisconnected = 'disc',
+ kWhatSeekPaused = 'spau',
kWhatSeekDone = 'sdon',
kWhatAccessUnit = 'accU',
@@ -220,6 +221,12 @@
msg->post();
}
+ void continueSeekAfterPause(int64_t timeUs) {
+ sp<AMessage> msg = new AMessage('see1', this);
+ msg->setInt64("time", timeUs);
+ msg->post();
+ }
+
bool isSeekable() const {
return mSeekable;
}
@@ -1180,7 +1187,7 @@
mCheckPending = true;
++mCheckGeneration;
- sp<AMessage> reply = new AMessage('see1', this);
+ sp<AMessage> reply = new AMessage('see0', this);
reply->setInt64("time", timeUs);
if (mPausing) {
@@ -1203,9 +1210,26 @@
break;
}
- case 'see1':
+ case 'see0':
{
// Session is paused now.
+ status_t err = OK;
+ msg->findInt32("result", &err);
+
+ int64_t timeUs;
+ CHECK(msg->findInt64("time", &timeUs));
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatSeekPaused);
+ notify->setInt32("err", err);
+ notify->setInt64("time", timeUs);
+ notify->post();
+ break;
+
+ }
+
+ case 'see1':
+ {
for (size_t i = 0; i < mTracks.size(); ++i) {
TrackInfo *info = &mTracks.editItemAt(i);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index e109595..36e99dd 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -764,16 +764,22 @@
// first capture latency on HAL3 devices, and potentially on some HAL2
// devices. So create it unconditionally at preview start. As a drawback,
// this increases gralloc memory consumption for applications that don't
- // ever take a picture.
+ // ever take a picture. Do not enter this mode when jpeg stream will slow
+ // down preview.
// TODO: Find a better compromise, though this likely would involve HAL
// changes.
int lastJpegStreamId = mJpegProcessor->getStreamId();
- res = updateProcessorStream(mJpegProcessor, params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't pre-configure still image "
- "stream: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
+ // If jpeg stream will slow down preview, make sure we remove it before starting preview
+ if (params.slowJpegMode) {
+ mJpegProcessor->deleteStream();
+ } else {
+ res = updateProcessorStream(mJpegProcessor, params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pre-configure still image "
+ "stream: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
}
bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
@@ -1453,9 +1459,12 @@
}
ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
-
int lastJpegStreamId = mJpegProcessor->getStreamId();
- res = updateProcessorStream(mJpegProcessor, l.mParameters);
+ // slowJpegMode will create jpeg stream in CaptureSequencer before capturing
+ if (!l.mParameters.slowJpegMode) {
+ res = updateProcessorStream(mJpegProcessor, l.mParameters);
+ }
+
// If video snapshot fail to configureStream, try override video snapshot size to
// video size
if (res == BAD_VALUE && l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
@@ -1943,6 +1952,39 @@
return mStreamingProcessor->stopStream();
}
+status_t Camera2Client::createJpegStreamL(Parameters ¶ms) {
+ status_t res = OK;
+ int lastJpegStreamId = mJpegProcessor->getStreamId();
+ if (lastJpegStreamId != NO_STREAM) {
+ return INVALID_OPERATION;
+ }
+
+ res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ res = mDevice->flush();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable flush device: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+
+ // Ideally we don't need this, but current camera device
+ // status tracking mechanism demands it.
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting device drain failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+
+ res = updateProcessorStream(mJpegProcessor, params);
+ return res;
+}
+
const int32_t Camera2Client::kPreviewRequestIdStart;
const int32_t Camera2Client::kPreviewRequestIdEnd;
const int32_t Camera2Client::kRecordingRequestIdStart;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index c288313..d50bf63 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -129,6 +129,9 @@
status_t stopStream();
+ // For the slowJpegMode to create jpeg stream when precapture sequence is done
+ status_t createJpegStreamL(camera2::Parameters ¶ms);
+
static size_t calculateBufferSize(int width, int height,
int format, int stride);
@@ -145,6 +148,9 @@
static const char* kAutofocusLabel;
static const char* kTakepictureLabel;
+ // Used with stream IDs
+ static const int NO_STREAM = -1;
+
private:
/** ICamera interface-related private members */
typedef camera2::Parameters Parameters;
@@ -177,9 +183,6 @@
void setPreviewCallbackFlagL(Parameters ¶ms, int flag);
status_t updateRequests(Parameters ¶ms);
- // Used with stream IDs
- static const int NO_STREAM = -1;
-
template <typename ProcessorT>
status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
template <typename ProcessorT,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index d847e0f..5f7fd74 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -504,6 +504,17 @@
* - recording (if recording enabled)
*/
outputStreams.push(client->getPreviewStreamId());
+
+ int captureStreamId = client->getCaptureStreamId();
+ if (captureStreamId == Camera2Client::NO_STREAM) {
+ res = client->createJpegStreamL(l.mParameters);
+ if (res != OK || client->getCaptureStreamId() == Camera2Client::NO_STREAM) {
+ ALOGE("%s: Camera %d: cannot create jpeg stream for slowJpeg mode: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+ }
+
outputStreams.push(client->getCaptureStreamId());
if (l.mParameters.previewCallbackFlags &
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index c3a6842..442eb75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -889,15 +889,30 @@
previewCallbackOneShot = false;
previewCallbackSurface = false;
+ Size maxJpegSize = getMaxSize(getAvailableJpegSizes());
+ int64_t minFrameDurationNs = getJpegStreamMinFrameDurationNs(maxJpegSize);
+
+ slowJpegMode = false;
+ if (minFrameDurationNs > kSlowJpegModeThreshold) {
+ slowJpegMode = true;
+ // Slow jpeg devices does not support video snapshot without
+ // slowing down preview.
+ // TODO: support video size video snapshot only?
+ params.set(CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED,
+ CameraParameters::FALSE);
+ }
+
char value[PROPERTY_VALUE_MAX];
property_get("camera.disable_zsl_mode", value, "0");
- if (!strcmp(value,"1")) {
+ if (!strcmp(value,"1") || slowJpegMode) {
ALOGI("Camera %d: Disabling ZSL mode", cameraId);
zslMode = false;
} else {
zslMode = true;
}
+ ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
+
lightFx = LIGHTFX_NONE;
state = STOPPED;
@@ -2778,6 +2793,17 @@
return maxSize;
}
+Parameters::Size Parameters::getMaxSize(const Vector<Parameters::Size> &sizes) {
+ Size maxSize = {-1, -1};
+ for (size_t i = 0; i < sizes.size(); i++) {
+ if (sizes[i].width > maxSize.width ||
+ (sizes[i].width == maxSize.width && sizes[i].height > maxSize.height )) {
+ maxSize = sizes[i];
+ }
+ }
+ return maxSize;
+}
+
Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() {
const int STREAM_CONFIGURATION_SIZE = 4;
const int STREAM_FORMAT_OFFSET = 0;
@@ -2792,7 +2818,7 @@
camera_metadata_ro_entry_t availableStreamConfigs =
staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+ for (size_t i = 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
@@ -2803,11 +2829,52 @@
return scs;
}
+int64_t Parameters::getJpegStreamMinFrameDurationNs(Parameters::Size size) {
+ if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+ const int STREAM_DURATION_SIZE = 4;
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_DURATION_OFFSET = 3;
+ camera_metadata_ro_entry_t availableStreamMinDurations =
+ staticInfo(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+ for (size_t i = 0; i < availableStreamMinDurations.count; i+= STREAM_DURATION_SIZE) {
+ int64_t format = availableStreamMinDurations.data.i64[i + STREAM_FORMAT_OFFSET];
+ int64_t width = availableStreamMinDurations.data.i64[i + STREAM_WIDTH_OFFSET];
+ int64_t height = availableStreamMinDurations.data.i64[i + STREAM_HEIGHT_OFFSET];
+ int64_t duration = availableStreamMinDurations.data.i64[i + STREAM_DURATION_OFFSET];
+ if (format == HAL_PIXEL_FORMAT_BLOB && width == size.width && height == size.height) {
+ return duration;
+ }
+ }
+ } else {
+ Vector<Size> availableJpegSizes = getAvailableJpegSizes();
+ size_t streamIdx = availableJpegSizes.size();
+ for (size_t i = 0; i < availableJpegSizes.size(); i++) {
+ if (availableJpegSizes[i].width == size.width &&
+ availableJpegSizes[i].height == size.height) {
+ streamIdx = i;
+ break;
+ }
+ }
+ if (streamIdx != availableJpegSizes.size()) {
+ camera_metadata_ro_entry_t jpegMinDurations =
+ staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS);
+ if (streamIdx < jpegMinDurations.count) {
+ return jpegMinDurations.data.i64[streamIdx];
+ }
+ }
+ }
+ ALOGE("%s: cannot find min frame duration for jpeg size %dx%d",
+ __FUNCTION__, size.width, size.height);
+ return -1;
+}
+
SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
SortedVector<int32_t> outputFormats; // Non-duplicated output formats
if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
Vector<StreamConfiguration> scs = getStreamConfigurations();
- for (size_t i=0; i < scs.size(); i++) {
+ for (size_t i = 0; i < scs.size(); i++) {
const StreamConfiguration &sc = scs[i];
if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
outputFormats.add(sc.format);
@@ -2815,7 +2882,7 @@
}
} else {
camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
- for (size_t i=0; i < availableFormats.count; i++) {
+ for (size_t i = 0; i < availableFormats.count; i++) {
outputFormats.add(availableFormats.data.i32[i]);
}
}
@@ -2826,7 +2893,7 @@
Vector<Parameters::Size> jpegSizes;
if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
Vector<StreamConfiguration> scs = getStreamConfigurations();
- for (size_t i=0; i < scs.size(); i++) {
+ for (size_t i = 0; i < scs.size(); i++) {
const StreamConfiguration &sc = scs[i];
if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
sc.format == HAL_PIXEL_FORMAT_BLOB) {
@@ -2840,7 +2907,7 @@
const int HEIGHT_OFFSET = 1;
camera_metadata_ro_entry_t availableJpegSizes =
staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
- for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
+ for (size_t i = 0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET];
int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET];
Size sz = {width, height};
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 46d48bc..972d007 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -166,6 +166,9 @@
bool previewCallbackSurface;
bool zslMode;
+ // Whether the jpeg stream is slower than 30FPS and can slow down preview.
+ // When slowJpegMode is true, zslMode must be false to avoid slowing down preview.
+ bool slowJpegMode;
// Overall camera state
enum State {
@@ -190,6 +193,8 @@
static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
// Aspect ratio tolerance
static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
+ // Threshold for slow jpeg mode
+ static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
// Full static camera info, object owned by someone else, such as
// Camera2Device.
@@ -377,15 +382,23 @@
int32_t height;
int32_t isInput;
};
+
// Helper function extract available stream configuration
// Only valid since device HAL version 3.2
// returns an empty Vector if device HAL version does support it
Vector<StreamConfiguration> getStreamConfigurations();
+ // Helper function to get minimum frame duration for a jpeg size
+ // return -1 if input jpeg size cannot be found in supported size list
+ int64_t getJpegStreamMinFrameDurationNs(Parameters::Size size);
+
// Helper function to get non-duplicated available output formats
SortedVector<int32_t> getAvailableOutputFormats();
// Helper function to get available output jpeg sizes
Vector<Size> getAvailableJpegSizes();
+ // Helper function to get maximum size in input Size vector.
+ // The maximum size is defined by comparing width first, when width ties comparing height.
+ Size getMaxSize(const Vector<Size>& sizes);
int mDeviceVersion;
};
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 3b83f63..c717a56 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -719,6 +719,38 @@
return res;
}
+status_t CameraDeviceClient::tearDown(int streamId) {
+ ATRACE_CALL();
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = OK;
+ if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ // Guard against trying to prepare non-created streams
+ ssize_t index = NAME_NOT_FOUND;
+ for (size_t i = 0; i < mStreamMap.size(); ++i) {
+ if (streamId == mStreamMap.valueAt(i)) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index == NAME_NOT_FOUND) {
+ ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+ "created yet", __FUNCTION__, mCameraId, streamId);
+ return BAD_VALUE;
+ }
+
+ // Also returns BAD_VALUE if stream ID was not valid or if the stream is in
+ // use
+ res = mDevice->tearDown(streamId);
+
+ return res;
+}
+
+
status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
String8 result;
result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 0f485ca..1f8b39d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -111,6 +111,9 @@
// Prepare stream by preallocating its buffers
virtual status_t prepare(int streamId);
+ // Tear down stream resources by freeing its unused buffers
+ virtual status_t tearDown(int streamId);
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 06177e3..cd25949 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -289,6 +289,11 @@
virtual status_t prepare(int streamId) = 0;
/**
+ * Free stream resources by dumping its unused gralloc buffers.
+ */
+ virtual status_t tearDown(int streamId) = 0;
+
+ /**
* Get the HAL device version.
*/
virtual uint32_t getDeviceVersion() = 0;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index dfe5565..c9c990c 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -626,6 +626,12 @@
return NO_INIT;
}
+status_t Camera2Device::tearDown(int streamId) {
+ ATRACE_CALL();
+ ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
+ return NO_INIT;
+}
+
uint32_t Camera2Device::getDeviceVersion() {
ATRACE_CALL();
return mDeviceVersion;
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index c9f3a2c..34c1ded 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -85,8 +85,9 @@
buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
// Flush implemented as just a wait
virtual status_t flush(int64_t *lastFrameNumber = NULL);
- // Prepare is a no-op
+ // Prepare and tearDown are no-ops
virtual status_t prepare(int streamId);
+ virtual status_t tearDown(int streamId);
virtual uint32_t getDeviceVersion();
virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9e73b5c..3afbd89 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1384,6 +1384,37 @@
return mPreparerThread->prepare(stream);
}
+status_t Camera3Device::tearDown(int streamId) {
+ ATRACE_CALL();
+ ALOGV("%s: Camera %d: Tearing down stream %d", __FUNCTION__, mId, streamId);
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ // Teardown can only be accomplished on devices that don't require register_stream_buffers,
+ // since we cannot call register_stream_buffers except right after configure_streams.
+ if (mHal3Device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
+ ALOGE("%s: Unable to tear down streams on device HAL v%x",
+ __FUNCTION__, mHal3Device->common.version);
+ return NO_INIT;
+ }
+
+ sp<Camera3StreamInterface> stream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d does not exist", streamId);
+ return BAD_VALUE;
+ }
+
+ stream = mOutputStreams.editValueAt(outputStreamIdx);
+
+ if (stream->hasOutstandingBuffers() || mRequestThread->isStreamPending(stream)) {
+ CLOGE("Stream %d is a target of a in-progress request", streamId);
+ return BAD_VALUE;
+ }
+
+ return stream->tearDown();
+}
+
uint32_t Camera3Device::getDeviceVersion() {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 31b6132..140da98 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -141,6 +141,8 @@
virtual status_t prepare(int streamId);
+ virtual status_t tearDown(int streamId);
+
virtual uint32_t getDeviceVersion();
virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 4c40bb6..2527fd6 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -364,6 +364,61 @@
return res;
}
+status_t Camera3Stream::tearDown() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ status_t res = OK;
+
+ // This function should be only called when the stream is configured.
+ if (mState != STATE_CONFIGURED) {
+ ALOGE("%s: Stream %d: Can't tear down stream if stream is not in "
+ "CONFIGURED state %d", __FUNCTION__, mId, mState);
+ return INVALID_OPERATION;
+ }
+
+ // If any buffers have been handed to the HAL, the stream cannot be torn down.
+ if (getHandoutOutputBufferCountLocked() > 0) {
+ ALOGE("%s: Stream %d: Can't tear down a stream that has outstanding buffers",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ // Free buffers by disconnecting and then reconnecting to the buffer queue
+ // Only unused buffers will be dropped immediately; buffers that have been filled
+ // and are waiting to be acquired by the consumer and buffers that are currently
+ // acquired will be freed once they are released by the consumer.
+
+ res = disconnectLocked();
+ if (res != OK) {
+ if (res == -ENOTCONN) {
+ // queue has been disconnected, nothing left to do, so exit with success
+ return OK;
+ }
+ ALOGE("%s: Stream %d: Unable to disconnect to tear down buffers: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+
+ mState = STATE_IN_CONFIG;
+
+ res = configureQueueLocked();
+ if (res != OK) {
+ ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ mState = STATE_ERROR;
+ return res;
+ }
+
+ // Reset prepared state, since we've reconnected to the queue and can prepare again.
+ mPrepared = false;
+ mStreamUnpreparable = false;
+
+ mState = STATE_CONFIGURED;
+
+ return OK;
+}
+
status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 0543c66..bab2177 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -247,6 +247,20 @@
status_t cancelPrepare();
/**
+ * Tear down memory for this stream. This frees all unused gralloc buffers
+ * allocated for this stream, but leaves it ready for operation afterward.
+ *
+ * May only be called in the CONFIGURED state, and keeps the stream in
+ * the CONFIGURED state.
+ *
+ * Returns:
+ * OK if teardown succeeded.
+ * INVALID_OPERATION if not in the CONFIGURED state
+ * NO_INIT in case of a serious error from the HAL device
+ */
+ status_t tearDown();
+
+ /**
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 6c87a45..c086eaf 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -152,6 +152,20 @@
virtual status_t cancelPrepare() = 0;
/**
+ * Tear down memory for this stream. This frees all unused gralloc buffers
+ * allocated for this stream, but leaves it ready for operation afterward.
+ *
+ * May only be called in the CONFIGURED state, and keeps the stream in
+ * the CONFIGURED state.
+ *
+ * Returns:
+ * OK if teardown succeeded.
+ * INVALID_OPERATION if not in the CONFIGURED state
+ * NO_INIT in case of a serious error from the HAL device
+ */
+ virtual status_t tearDown() = 0;
+
+ /**
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*