Merge "Add adaptive playback support to VPX decoder." into lmp-dev
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
index f059b3d..670480b 100644
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -42,9 +42,13 @@
* Error codes for CAMERA_MSG_ERROR
*/
enum CameraErrorCode {
+ ERROR_CAMERA_INVALID_ERROR = -1, // To indicate all invalid error codes
ERROR_CAMERA_DISCONNECTED = 0,
ERROR_CAMERA_DEVICE = 1,
- ERROR_CAMERA_SERVICE = 2
+ ERROR_CAMERA_SERVICE = 2,
+ ERROR_CAMERA_REQUEST = 3,
+ ERROR_CAMERA_RESULT = 4,
+ ERROR_CAMERA_BUFFER = 5,
};
// One way
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index eb31c77..da4c20c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -234,7 +234,7 @@
status_t setComponentRole(bool isEncoder, const char *mime);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
- status_t configureTunneledVideoPlayback(int64_t audioHwSync,
+ status_t configureTunneledVideoPlayback(int32_t audioHwSync,
const sp<ANativeWindow> &nativeWindow);
status_t setVideoPortFormatType(
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3be0651..37bc418 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,7 +70,7 @@
LOCAL_STATIC_LIBRARIES += libinstantssq
-LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE:= libmedia
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 0c7e590c..adc066d 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -28,6 +28,7 @@
libcamera_client \
libcrypto \
libcutils \
+ libdrmframework \
liblog \
libdl \
libgui \
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index cdb7e69..f257ef3 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
#include "../../libstagefright/include/WVMExtractor.h"
@@ -49,6 +50,7 @@
mIsWidevine(false),
mUIDValid(uidValid),
mUID(uid),
+ mDrmManagerClient(NULL),
mMetaDataSize(-1ll),
mBitrate(-1ll),
mPollBufferingGeneration(0) {
@@ -57,12 +59,18 @@
}
void NuPlayer::GenericSource::resetDataSource() {
+ mAudioTimeUs = 0;
+ mVideoTimeUs = 0;
mHTTPService.clear();
mUri.clear();
mUriHeaders.clear();
mFd = -1;
mOffset = 0;
mLength = 0;
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mDecryptHandle = NULL;
+ mDrmManagerClient = NULL;
+ mStarted = false;
}
status_t NuPlayer::GenericSource::setDataSource(
@@ -130,6 +138,10 @@
return UNKNOWN_ERROR;
}
+ if (extractor->getDrmFlag()) {
+ checkDrmStatus(mDataSource);
+ }
+
sp<MetaData> fileMeta = extractor->getMetaData();
if (fileMeta != NULL) {
int64_t duration;
@@ -141,13 +153,17 @@
int32_t totalBitrate = 0;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
+ sp<MediaSource> track = extractor->getTrack(i);
+
sp<MetaData> meta = extractor->getTrackMetaData(i);
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
- sp<MediaSource> track = extractor->getTrack(i);
-
+ // Do the string compare immediately with "mime",
+ // we can't assume "mime" would stay valid after another
+ // extractor operation, some extractors might modify meta
+ // during getTrack() and make it invalid.
if (!strncasecmp(mime, "audio/", 6)) {
if (mAudioTrack.mSource == NULL) {
mAudioTrack.mIndex = i;
@@ -199,6 +215,28 @@
return OK;
}
+void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
+ dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
+ if (mDecryptHandle != NULL) {
+ CHECK(mDrmManagerClient);
+ if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
+ sp<AMessage> msg = dupNotify();
+ msg->setInt32("what", kWhatDrmNoLicense);
+ msg->post();
+ }
+ }
+}
+
+int64_t NuPlayer::GenericSource::getLastReadPosition() {
+ if (mAudioTrack.mSource != NULL) {
+ return mAudioTimeUs;
+ } else if (mVideoTrack.mSource != NULL) {
+ return mVideoTimeUs;
+ } else {
+ return 0;
+ }
+}
+
status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
if (mIsWidevine && !audio) {
@@ -394,6 +432,33 @@
readBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
+
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::stop() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::pause() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::resume() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
+ if (mDecryptHandle != NULL) {
+ mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
+ }
}
status_t NuPlayer::GenericSource::feedMoreTSData() {
@@ -868,6 +933,10 @@
readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
}
+ setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
+ if (!mStarted) {
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ }
return OK;
}
@@ -985,6 +1054,14 @@
options.clearSeekTo();
if (err == OK) {
+ int64_t timeUs;
+ CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ mAudioTimeUs = timeUs;
+ } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ mVideoTimeUs = timeUs;
+ }
+
// formatChange && seeking: track whose source is changed during selection
// formatChange && !seeking: track whose source is not changed during selection
// !formatChange: normal seek
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 663bfae..1f13120 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -27,6 +27,8 @@
namespace android {
+class DecryptHandle;
+class DrmManagerClient;
struct AnotherPacketSource;
struct ARTSPController;
struct DataSource;
@@ -49,6 +51,9 @@
virtual void prepareAsync();
virtual void start();
+ virtual void stop();
+ virtual void pause();
+ virtual void resume();
virtual status_t feedMoreTSData();
@@ -90,7 +95,9 @@
};
Track mAudioTrack;
+ int64_t mAudioTimeUs;
Track mVideoTrack;
+ int64_t mVideoTimeUs;
Track mSubtitleTrack;
Track mTimedTextTrack;
@@ -111,6 +118,9 @@
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<WVMExtractor> mWVMExtractor;
+ DrmManagerClient *mDrmManagerClient;
+ sp<DecryptHandle> mDecryptHandle;
+ bool mStarted;
String8 mContentType;
AString mSniffedMIME;
off64_t mMetaDataSize;
@@ -122,6 +132,9 @@
void resetDataSource();
status_t initFromDataSource();
+ void checkDrmStatus(const sp<DataSource>& dataSource);
+ int64_t getLastReadPosition();
+ void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
status_t prefillCacheIfNecessary();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index f4cd02c..76d25de 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -280,7 +280,7 @@
msg->setObject(
"native-window",
new NativeWindowWrapper(
- new Surface(bufferProducer)));
+ new Surface(bufferProducer, true /* controlledByApp */)));
}
msg->post();
@@ -1915,6 +1915,12 @@
break;
}
+ case Source::kWhatDrmNoLicense:
+ {
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
+ break;
+ }
+
default:
TRESPASS();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index d1aac50..5aaf48c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -220,6 +220,8 @@
void NuPlayer::Decoder::handleError(int32_t err)
{
+ mCodec->release();
+
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatError);
notify->setInt32("err", err);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 2f60072..2423f5f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -287,8 +287,9 @@
// fall through
case STATE_PAUSED:
+ mState = STATE_STOPPED;
notifyListener_l(MEDIA_STOPPED);
- // fall through
+ break;
case STATE_PREPARED:
case STATE_STOPPED:
@@ -314,6 +315,8 @@
return OK;
case STATE_RUNNING:
+ setPauseStartedTimeIfNeeded();
+ mState = STATE_PAUSED;
notifyListener_l(MEDIA_PAUSED);
mPlayer->pause();
break;
@@ -322,9 +325,6 @@
return INVALID_OPERATION;
}
- setPauseStartedTimeIfNeeded();
- mState = STATE_PAUSED;
-
return OK;
}
@@ -560,8 +560,10 @@
void NuPlayerDriver::notifyPosition(int64_t positionUs) {
Mutex::Autolock autoLock(mLock);
- mPositionUs = positionUs;
- mNotifyTimeRealUs = ALooper::GetNowUs();
+ if (isPlaying()) {
+ mPositionUs = positionUs;
+ mNotifyTimeRealUs = ALooper::GetNowUs();
+ }
}
void NuPlayerDriver::notifySeekComplete() {
@@ -675,15 +677,17 @@
mAsyncResult = err;
if (err == OK) {
+ // update state before notifying client, so that if client calls back into NuPlayerDriver
+ // in response, NuPlayerDriver has the right state
+ mState = STATE_PREPARED;
if (mIsAsyncPrepare) {
notifyListener_l(MEDIA_PREPARED);
}
- mState = STATE_PREPARED;
} else {
+ mState = STATE_UNPREPARED;
if (mIsAsyncPrepare) {
notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
}
- mState = STATE_UNPREPARED;
}
mCondition.broadcast();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a3c976d..49941f8 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -315,7 +315,7 @@
size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
Mutex::Autolock autoLock(mLock);
- if (!offloadingAudio()) {
+ if (!offloadingAudio() || mPaused) {
return 0;
}
@@ -410,8 +410,11 @@
if (entry->mBuffer == NULL) {
// EOS
-
- notifyEOS(true /* audio */, entry->mFinalResult);
+ int64_t postEOSDelayUs = 0;
+ if (mAudioSink->needsTrailingPadding()) {
+ postEOSDelayUs = getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency();
+ }
+ notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
mAudioQueue.erase(mAudioQueue.begin());
entry = NULL;
@@ -421,26 +424,11 @@
if (entry->mOffset == 0) {
int64_t mediaTimeUs;
CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-
mAnchorTimeMediaUs = mediaTimeUs;
- uint32_t numFramesPlayed;
- CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
-
- uint32_t numFramesPendingPlayout =
- mNumFramesWritten - numFramesPlayed;
-
- int64_t realTimeOffsetUs =
- (mAudioSink->latency() / 2 /* XXX */
- + numFramesPendingPlayout
- * mAudioSink->msecsPerFrame()) * 1000ll;
-
- // ALOGI("realTimeOffsetUs = %lld us", realTimeOffsetUs);
-
- mAnchorTimeRealUs =
- ALooper::GetNowUs() + realTimeOffsetUs;
+ mAnchorTimeRealUs = ALooper::GetNowUs()
+ + getAudioPendingPlayoutUs() + 1000 * mAudioSink->latency() / 2;
}
size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -494,6 +482,14 @@
return !mAudioQueue.empty();
}
+int64_t NuPlayer::Renderer::getAudioPendingPlayoutUs() {
+ uint32_t numFramesPlayed;
+ CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), (status_t)OK);
+
+ uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+ return numFramesPendingPlayout * mAudioSink->msecsPerFrame() * 1000;
+}
+
void NuPlayer::Renderer::postDrainVideoQueue() {
if (mDrainVideoQueuePending || mSyncQueues || mPaused) {
return;
@@ -607,12 +603,12 @@
notify->post();
}
-void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult) {
+void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatEOS);
notify->setInt32("audio", static_cast<int32_t>(audio));
notify->setInt32("finalResult", finalResult);
- notify->post();
+ notify->post(delayUs);
}
void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
@@ -891,6 +887,7 @@
++mAudioQueueGeneration;
++mVideoQueueGeneration;
prepareForMediaRenderingStart();
+ mPaused = true;
}
mDrainAudioQueuePending = false;
@@ -902,8 +899,6 @@
ALOGV("now paused audio queue has %d entries, video has %d entries",
mAudioQueue.size(), mVideoQueue.size());
-
- mPaused = true;
}
void NuPlayer::Renderer::onResume() {
@@ -915,9 +910,9 @@
mAudioSink->start();
}
+ Mutex::Autolock autoLock(mLock);
mPaused = false;
- Mutex::Autolock autoLock(mLock);
if (!mAudioQueue.empty()) {
postDrainAudioQueue_l();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 1cba1a0..8da6458 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -129,6 +129,7 @@
size_t fillAudioBuffer(void *buffer, size_t size);
bool onDrainAudioQueue();
+ int64_t getAudioPendingPlayoutUs();
void postDrainAudioQueue_l(int64_t delayUs = 0);
void onDrainVideoQueue();
@@ -146,7 +147,7 @@
void onResume();
void onAudioOffloadTearDown();
- void notifyEOS(bool audio, status_t finalResult);
+ void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
void notifyFlushComplete(bool audio);
void notifyPosition();
void notifyVideoLateBy(int64_t lateByUs);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 45657c2..7ccf3b1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -51,6 +51,7 @@
kWhatSubtitleData,
kWhatTimedTextData,
kWhatQueueDecoderShutdown,
+ kWhatDrmNoLicense,
};
// The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e4e463a..19a5908 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1245,13 +1245,13 @@
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
- int64_t audioHwSync = 0;
- if (!msg->findInt64("audio-hw-sync", &audioHwSync)) {
+ int32_t audioHwSync = 0;
+ if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
- ALOGE("configureTunneledVideoPlayback(%" PRId64 ",%p) failed!",
+ ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
@@ -1898,7 +1898,7 @@
}
status_t ACodec::configureTunneledVideoPlayback(
- int64_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+ int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
native_handle_t* sidebandHandle;
status_t err = mOMX->configureVideoTunnelMode(
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 76f730f..fc2dd30 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -733,13 +733,15 @@
case CONFIGURING:
{
- setState(INITIALIZED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : INITIALIZED);
break;
}
case STARTING:
{
- setState(CONFIGURED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : CONFIGURED);
break;
}
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 9b930bc..c97be28 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,18 +16,89 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
- <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" />
- <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
- <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-983040" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-139264" />
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
</Decoders>
<Encoders>
- <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
- <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+ <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level2 -->
+ <Limit name="size" min="2x2" max="896x896" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-11880" />
+ <Limit name="bitrate" range="1-2000000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 0c181ff..0f44b52 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -72,15 +72,27 @@
}
void ALooperRoster::unregisterStaleHandlers() {
- Mutex::Autolock autoLock(mLock);
- for (size_t i = mHandlers.size(); i-- > 0;) {
- const HandlerInfo &info = mHandlers.valueAt(i);
+ Vector<sp<ALooper> > activeLoopers;
+ {
+ Mutex::Autolock autoLock(mLock);
- sp<ALooper> looper = info.mLooper.promote();
- if (looper == NULL) {
- ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
- mHandlers.removeItemsAt(i);
+ for (size_t i = mHandlers.size(); i-- > 0;) {
+ const HandlerInfo &info = mHandlers.valueAt(i);
+
+ sp<ALooper> looper = info.mLooper.promote();
+ if (looper == NULL) {
+ ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
+ mHandlers.removeItemsAt(i);
+ } else {
+ // At this point 'looper' might be the only sp<> keeping
+ // the object alive. To prevent it from going out of scope
+ // and having ~ALooper call this method again recursively
+ // and then deadlocking because of the Autolock above, add
+ // it to a Vector which will go out of scope after the lock
+ // has been released.
+ activeLoopers.add(looper);
+ }
}
}
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 80cb2d0..4d5d79e 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -754,6 +754,9 @@
if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
// If this is a live session, start 3 segments from the end on connect
mSeqNumber = lastSeqNumberInPlaylist - 3;
+ if (mSeqNumber < firstSeqNumberInPlaylist) {
+ mSeqNumber = firstSeqNumberInPlaylist;
+ }
} else {
mSeqNumber = getSeqNumberForTime(mStartTimeUs);
mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7ac2c0c..fd28ea1 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1068,6 +1068,9 @@
// compute everything we need...
int countActiveTracks = 0;
+ // TODO: fix all16BitsStereNoResample logic to
+ // either properly handle muted tracks (it should ignore them)
+ // or remove altogether as an obsolete optimization.
bool all16BitsStereoNoResample = true;
bool resampling = false;
bool volumeRamp = false;
@@ -1152,8 +1155,15 @@
if (countActiveTracks == 1) {
const int i = 31 - __builtin_clz(state->enabledTracks);
track_t& t = state->tracks[i];
- state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ if ((t.needs & NEEDS_MUTE) == 0) {
+ // The check prevents a muted track from acquiring a process hook.
+ //
+ // This is dangerous if the track is MONO as that requires
+ // special case handling due to implicit channel duplication.
+ // Stereo or Multichannel should actually be fine here.
+ state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ }
}
}
}
@@ -1188,6 +1198,7 @@
if (countActiveTracks == 1) {
const int i = 31 - __builtin_clz(state->enabledTracks);
track_t& t = state->tracks[i];
+ // Muted single tracks handled by allMuted above.
state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
}
@@ -1745,9 +1756,10 @@
if (in == NULL || (((uintptr_t)in) & 3)) {
memset(out, 0, numFrames
* t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
- ALOGE_IF((((uintptr_t)in) & 3), "process stereo track: input buffer alignment pb: "
- "buffer %p track %d, channels %d, needs %08x",
- in, i, t.channelCount, t.needs);
+ ALOGE_IF((((uintptr_t)in) & 3),
+ "process__OneTrack16BitsStereoNoResampling: misaligned buffer"
+ " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+ in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]);
return;
}
size_t outFrames = b.frameCount;
@@ -2173,6 +2185,10 @@
/* Returns the proper process hook for mixing tracks. Currently works only for
* PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO. This should be
+ * prevented by the caller.
*/
AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index cc0e965..c45acd0 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -98,8 +98,12 @@
inputDesc = new EffectVector(audioSession);
mInputs.add(input, inputDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
inputDesc = mInputs.valueAt(idx);
}
+ inputDesc->mRefCount++;
+
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -133,10 +137,14 @@
return status;
}
EffectVector *inputDesc = mInputs.valueAt(index);
- setProcessorEnabled(inputDesc, false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
- ALOGV("releaseInputEffects(): all effects released");
+ inputDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
+ if (inputDesc->mRefCount == 0) {
+ setProcessorEnabled(inputDesc, false);
+ delete inputDesc;
+ mInputs.removeItemsAt(index);
+ ALOGV("releaseInputEffects(): all effects released");
+ }
return status;
}
@@ -223,8 +231,12 @@
procDesc = new EffectVector(audioSession);
mOutputSessions.add(audioSession, procDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
procDesc = mOutputSessions.valueAt(idx);
}
+ procDesc->mRefCount++;
+
+ ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -262,12 +274,16 @@
}
EffectVector *procDesc = mOutputSessions.valueAt(index);
- setProcessorEnabled(procDesc, false);
- procDesc->mEffects.clear();
- delete procDesc;
- mOutputSessions.removeItemsAt(index);
- ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
- audioSession);
+ procDesc->mRefCount--;
+ ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+ if (procDesc->mRefCount == 0) {
+ setProcessorEnabled(procDesc, false);
+ procDesc->mEffects.clear();
+ delete procDesc;
+ mOutputSessions.removeItemsAt(index);
+ ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
+ audioSession);
+ }
return status;
}
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
index 351cb1a..dbe0d0e 100644
--- a/services/audiopolicy/AudioPolicyEffects.h
+++ b/services/audiopolicy/AudioPolicyEffects.h
@@ -131,9 +131,11 @@
// class to store voctor of AudioEffects
class EffectVector {
public:
- EffectVector(int session) : mSessionId(session) {}
+ EffectVector(int session) : mSessionId(session), mRefCount(0) {}
/*virtual*/ ~EffectVector() {}
const int mSessionId;
+ // AudioPolicyManager keeps mLock, no need for lock on reference count here
+ int mRefCount;
Vector< sp<AudioEffect> >mEffects;
};
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 14fdec5..a805923 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -271,7 +271,13 @@
return INVALID_OPERATION;
}
- ALOGV("setDeviceConnectionState() disconnecting device %x", device);
+ ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
+
+ // Set Disconnect to HALs
+ AudioParameter param = AudioParameter(address);
+ param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+ mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
// remove device from available output devices
mAvailableOutputDevices.remove(devDesc);
@@ -368,8 +374,17 @@
ALOGW("setDeviceConnectionState() device not connected: %d", device);
return INVALID_OPERATION;
}
+
+ ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
+
+ // Set Disconnect to HALs
+ AudioParameter param = AudioParameter(address);
+ param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+ mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
checkInputsForDevice(device, state, inputs, address);
mAvailableInputDevices.remove(devDesc);
+
} break;
default:
@@ -1282,21 +1297,23 @@
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
bool isSoundTrigger = false;
+ audio_source_t halInputSource = inputSource;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
ALOGV("SoundTrigger capture on session %d input %d", session, input);
+ } else {
+ halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
-
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
&input,
&config,
&device,
String8(""),
- inputSource,
+ halInputSource,
flags);
// only accept input with the exact requested set of parameters
@@ -1454,19 +1471,31 @@
return;
}
- mpClientInterface->closeInput(input);
- mInputs.removeItem(input);
- nextAudioPortGeneration();
+ closeInput(input);
mpClientInterface->onAudioPortListUpdate();
ALOGV("releaseInput() exit");
}
void AudioPolicyManager::closeAllInputs() {
+ bool patchRemoved = false;
+
for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+ sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
+ ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ if (patch_index >= 0) {
+ sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
+ status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ mAudioPatches.removeItemsAt(patch_index);
+ patchRemoved = true;
+ }
mpClientInterface->closeInput(mInputs.keyAt(input_index));
}
mInputs.clear();
nextAudioPortGeneration();
+
+ if (patchRemoved) {
+ mpClientInterface->onAudioPatchListUpdate();
+ }
}
void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
@@ -3497,6 +3526,16 @@
}
}
+ nextAudioPortGeneration();
+
+ ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+ if (index >= 0) {
+ sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+ status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ mAudioPatches.removeItemsAt(index);
+ mpClientInterface->onAudioPatchListUpdate();
+ }
+
AudioParameter param;
param.add(String8("closing"), String8("true"));
mpClientInterface->setParameters(output, param.toString());
@@ -3504,7 +3543,30 @@
mpClientInterface->closeOutput(output);
mOutputs.removeItem(output);
mPreviousOutputs = mOutputs;
+}
+
+void AudioPolicyManager::closeInput(audio_io_handle_t input)
+{
+ ALOGV("closeInput(%d)", input);
+
+ sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+ if (inputDesc == NULL) {
+ ALOGW("closeInput() unknown input %d", input);
+ return;
+ }
+
nextAudioPortGeneration();
+
+ ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ if (index >= 0) {
+ sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+ status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ mAudioPatches.removeItemsAt(index);
+ mpClientInterface->onAudioPatchListUpdate();
+ }
+
+ mpClientInterface->closeInput(input);
+ mInputs.removeItem(input);
}
SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device,
@@ -3874,7 +3936,7 @@
if (((mAvailableInputDevices.types() &
AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
(((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- (hwOutputDesc->mAudioPort->mModule->mHalVersion <
+ (hwOutputDesc->getAudioPort()->mModule->mHalVersion <
AUDIO_DEVICE_API_VERSION_3_0))) {
availableOutputDeviceTypes = availablePrimaryOutputDevices();
}
@@ -4257,6 +4319,20 @@
mpClientInterface->onAudioPatchListUpdate();
}
}
+
+ // inform all input as well
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
+ if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+ AudioParameter inputCmd = AudioParameter();
+ ALOGV("%s: inform input %d of device:%d", __func__,
+ inputDescriptor->mIoHandle, device);
+ inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+ mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+ inputCmd.toString(),
+ delayMs);
+ }
+ }
}
// update stream volumes according to new device
@@ -5070,7 +5146,6 @@
mStrategyMutedByDevice[i] = false;
}
if (profile != NULL) {
- mAudioPort = profile;
mFlags = profile->mFlags;
mSamplingRate = profile->pickSamplingRate();
mFormat = profile->pickFormat();
@@ -5253,7 +5328,6 @@
mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
{
if (profile != NULL) {
- mAudioPort = profile;
mSamplingRate = profile->pickSamplingRate();
mFormat = profile->pickFormat();
mChannelMask = profile->pickChannelMask();
@@ -6273,33 +6347,34 @@
localBackupConfig.config_mask = config->config_mask;
toAudioPortConfig(&localBackupConfig);
- if (mAudioPort == 0) {
+ sp<AudioPort> audioport = getAudioPort();
+ if (audioport == 0) {
status = NO_INIT;
goto exit;
}
if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- status = mAudioPort->checkExactSamplingRate(config->sample_rate);
+ status = audioport->checkExactSamplingRate(config->sample_rate);
if (status != NO_ERROR) {
goto exit;
}
mSamplingRate = config->sample_rate;
}
if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- status = mAudioPort->checkExactChannelMask(config->channel_mask);
+ status = audioport->checkExactChannelMask(config->channel_mask);
if (status != NO_ERROR) {
goto exit;
}
mChannelMask = config->channel_mask;
}
if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- status = mAudioPort->checkFormat(config->format);
+ status = audioport->checkFormat(config->format);
if (status != NO_ERROR) {
goto exit;
}
mFormat = config->format;
}
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
- status = mAudioPort->checkGain(&config->gain, config->gain.index);
+ status = audioport->checkGain(&config->gain, config->gain.index);
if (status != NO_ERROR) {
goto exit;
}
@@ -6486,7 +6561,6 @@
NULL),
mDeviceType(type), mAddress(""), mId(0)
{
- mAudioPort = this;
if (mGains.size() > 0) {
mGains[0]->getDefaultConfig(&mGain);
}
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index e3e3172..57e015e 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -297,7 +297,7 @@
struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const = 0;
- sp<AudioPort> mAudioPort;
+ virtual sp<AudioPort> getAudioPort() const = 0;
uint32_t mSamplingRate;
audio_format_t mFormat;
audio_channel_mask_t mChannelMask;
@@ -330,6 +330,7 @@
bool equals(const sp<DeviceDescriptor>& other) const;
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
+ virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
virtual void toAudioPort(struct audio_port *port) const;
@@ -462,6 +463,7 @@
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
+ virtual sp<AudioPort> getAudioPort() const { return mProfile; }
void toAudioPort(struct audio_port *port) const;
audio_port_handle_t mId;
@@ -506,6 +508,7 @@
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
+ virtual sp<AudioPort> getAudioPort() const { return mProfile; }
void toAudioPort(struct audio_port *port) const;
};
@@ -646,6 +649,9 @@
// close an output and its companion duplicating output.
void closeOutput(audio_io_handle_t output);
+ // close an input.
+ void closeInput(audio_io_handle_t input);
+
// checks and if necessary changes outputs used for all strategies.
// must be called every time a condition that affects the output choice for a given strategy
// changes: connected device, phone state, force use...
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 2f485b9..9d6ab23 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -47,6 +47,7 @@
device3/Camera3InputStream.cpp \
device3/Camera3OutputStream.cpp \
device3/Camera3ZslStream.cpp \
+ device3/Camera3DummyStream.cpp \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index b388079..2d31275 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -78,8 +78,12 @@
ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)",
__FUNCTION__, pipelineMaxDepth);
- mBufferQueueDepth = pipelineMaxDepth + 1;
- mFrameListDepth = pipelineMaxDepth + 1;
+ // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
+ // earlier than metadata which causes the buffer corresponding to oldest metadata being
+ // removed.
+ mFrameListDepth = pipelineMaxDepth;
+ mBufferQueueDepth = mFrameListDepth + 1;
+
mZslQueue.insertAt(0, mBufferQueueDepth);
mFrameList.insertAt(0, mFrameListDepth);
@@ -554,13 +558,15 @@
}
void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
- Mutex::Autolock l(mInputMutex);
// ignore output buffers
if (bufferInfo.mOutput) {
return;
}
+ // Lock mutex only once we know this is an input buffer returned to avoid
+ // potential deadlock
+ Mutex::Autolock l(mInputMutex);
// TODO: Verify that the buffer is in our queue by looking at timestamp
// theoretically unnecessary unless we change the following assumptions:
// -- only 1 buffer reprocessed at a time (which is the case now)
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 0d33406..6f78db5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
+#include "device3/Camera3DummyStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -181,6 +182,7 @@
mHal3Device = device;
mStatus = STATUS_UNCONFIGURED;
mNextStreamId = 0;
+ mDummyStreamId = NO_STREAM;
mNeedConfig = true;
mPauseStateNotify = false;
@@ -1145,6 +1147,7 @@
ALOGW("%s: Replacing old callback listener", __FUNCTION__);
}
mListener = listener;
+ mRequestThread->setNotifyCallback(listener);
return OK;
}
@@ -1268,9 +1271,15 @@
ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
Mutex::Autolock il(mInterfaceLock);
+ NotificationListener* listener;
+ {
+ Mutex::Autolock l(mOutputLock);
+ listener = mListener;
+ }
+
{
Mutex::Autolock l(mLock);
- mRequestThread->clear(/*out*/frameNumber);
+ mRequestThread->clear(listener, /*out*/frameNumber);
}
status_t res;
@@ -1411,6 +1420,15 @@
return OK;
}
+ // Workaround for device HALv3.2 or older spec bug - zero streams requires
+ // adding a dummy stream instead.
+ // TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
+ if (mOutputStreams.size() == 0) {
+ addDummyStreamLocked();
+ } else {
+ tryRemoveDummyStreamLocked();
+ }
+
// Start configuring the streams
ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
@@ -1458,7 +1476,42 @@
res = mHal3Device->ops->configure_streams(mHal3Device, &config);
ATRACE_END();
- if (res != OK) {
+ if (res == BAD_VALUE) {
+ // HAL rejected this set of streams as unsupported, clean up config
+ // attempt and return to unconfigured state
+ if (mInputStream != NULL && mInputStream->isConfiguring()) {
+ res = mInputStream->cancelConfiguration();
+ if (res != OK) {
+ SET_ERR_L("Can't cancel configuring input stream %d: %s (%d)",
+ mInputStream->getId(), strerror(-res), res);
+ return res;
+ }
+ }
+
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ sp<Camera3OutputStreamInterface> outputStream =
+ mOutputStreams.editValueAt(i);
+ if (outputStream->isConfiguring()) {
+ res = outputStream->cancelConfiguration();
+ if (res != OK) {
+ SET_ERR_L(
+ "Can't cancel configuring output stream %d: %s (%d)",
+ outputStream->getId(), strerror(-res), res);
+ return res;
+ }
+ }
+ }
+
+ // Return state to that at start of call, so that future configures
+ // properly clean things up
+ mStatus = STATUS_UNCONFIGURED;
+ mNeedConfig = true;
+
+ ALOGV("%s: Camera %d: Stream configuration failed", __FUNCTION__, mId);
+ return BAD_VALUE;
+ } else if (res != OK) {
+ // Some other kind of error from configure_streams - this is not
+ // expected
SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
strerror(-res), res);
return res;
@@ -1498,7 +1551,7 @@
mNeedConfig = false;
- if (config.num_streams > 0) {
+ if (mDummyStreamId == NO_STREAM) {
mStatus = STATUS_CONFIGURED;
} else {
mStatus = STATUS_UNCONFIGURED;
@@ -1512,6 +1565,69 @@
return OK;
}
+status_t Camera3Device::addDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId != NO_STREAM) {
+ // Should never be adding a second dummy stream when one is already
+ // active
+ SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+
+ sp<Camera3OutputStreamInterface> dummyStream =
+ new Camera3DummyStream(mNextStreamId);
+
+ res = mOutputStreams.add(mNextStreamId, dummyStream);
+ if (res < 0) {
+ SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ mDummyStreamId = mNextStreamId;
+ mNextStreamId++;
+
+ return OK;
+}
+
+status_t Camera3Device::tryRemoveDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId == NO_STREAM) return OK;
+ if (mOutputStreams.size() == 1) return OK;
+
+ ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+
+ // Ok, have a dummy stream and there's at least one other output stream,
+ // so remove the dummy
+
+ sp<Camera3StreamInterface> deletedStream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+ return INVALID_OPERATION;
+ }
+
+ deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
+ mOutputStreams.removeItemsAt(outputStreamIdx);
+
+ // Free up the stream endpoint so that it can be used by some other stream
+ res = deletedStream->disconnect();
+ if (res != OK) {
+ SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+ // fall through since we want to still list the stream as deleted.
+ }
+ mDeletedStreams.add(deletedStream);
+ mDummyStreamId = NO_STREAM;
+
+ return res;
+}
+
void Camera3Device::setErrorState(const char *fmt, ...) {
Mutex::Autolock l(mLock);
va_list args;
@@ -1544,14 +1660,20 @@
// But only do error state transition steps for the first error
if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
- // Save stack trace. View by dumping it later.
- CameraTraces::saveTrace();
- // TODO: consider adding errorCause and client pid/procname
-
mErrorCause = errorCause;
mRequestThread->setPaused(true);
mStatus = STATUS_ERROR;
+
+ // Notify upstream about a device error
+ if (mListener != NULL) {
+ mListener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+ CaptureResultExtras());
+ }
+
+ // Save stack trace. View by dumping it later.
+ CameraTraces::saveTrace();
+ // TODO: consider adding errorCause and client pid/procname
}
/**
@@ -2022,84 +2144,11 @@
switch (msg->type) {
case CAMERA3_MSG_ERROR: {
- int streamId = 0;
- if (msg->message.error.error_stream != NULL) {
- Camera3Stream *stream =
- Camera3Stream::cast(
- msg->message.error.error_stream);
- streamId = stream->getId();
- }
- ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
- mId, __FUNCTION__, msg->message.error.frame_number,
- streamId, msg->message.error.error_code);
-
- CaptureResultExtras resultExtras;
- // Set request error status for the request in the in-flight tracking
- {
- Mutex::Autolock l(mInFlightLock);
- ssize_t idx = mInFlightMap.indexOfKey(msg->message.error.frame_number);
- if (idx >= 0) {
- InFlightRequest &r = mInFlightMap.editValueAt(idx);
- r.requestStatus = msg->message.error.error_code;
- resultExtras = r.resultExtras;
- } else {
- resultExtras.frameNumber = msg->message.error.frame_number;
- ALOGE("Camera %d: %s: cannot find in-flight request on frame %" PRId64
- " error", mId, __FUNCTION__, resultExtras.frameNumber);
- }
- }
-
- if (listener != NULL) {
- if (msg->message.error.error_code == CAMERA3_MSG_ERROR_DEVICE) {
- listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
- resultExtras);
- }
- } else {
- ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
- }
+ notifyError(msg->message.error, listener);
break;
}
case CAMERA3_MSG_SHUTTER: {
- ssize_t idx;
- uint32_t frameNumber = msg->message.shutter.frame_number;
- nsecs_t timestamp = msg->message.shutter.timestamp;
- // Verify ordering of shutter notifications
- {
- Mutex::Autolock l(mOutputLock);
- // TODO: need to track errors for tighter bounds on expected frame number.
- if (frameNumber < mNextShutterFrameNumber) {
- SET_ERR("Shutter notification out-of-order. Expected "
- "notification for frame %d, got frame %d",
- mNextShutterFrameNumber, frameNumber);
- break;
- }
- mNextShutterFrameNumber = frameNumber + 1;
- }
-
- CaptureResultExtras resultExtras;
-
- // Set timestamp for the request in the in-flight tracking
- // and get the request ID to send upstream
- {
- Mutex::Autolock l(mInFlightLock);
- idx = mInFlightMap.indexOfKey(frameNumber);
- if (idx >= 0) {
- InFlightRequest &r = mInFlightMap.editValueAt(idx);
- r.captureTimestamp = timestamp;
- resultExtras = r.resultExtras;
- }
- }
- if (idx < 0) {
- SET_ERR("Shutter notification for non-existent frame number %d",
- frameNumber);
- break;
- }
- ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
- mId, __FUNCTION__, frameNumber, resultExtras.requestId, timestamp);
- // Call listener, if any
- if (listener != NULL) {
- listener->notifyShutter(resultExtras, timestamp);
- }
+ notifyShutter(msg->message.shutter, listener);
break;
}
default:
@@ -2108,6 +2157,121 @@
}
}
+void Camera3Device::notifyError(const camera3_error_msg_t &msg,
+ NotificationListener *listener) {
+
+ // Map camera HAL error codes to ICameraDeviceCallback error codes
+ // Index into this with the HAL error code
+ static const ICameraDeviceCallbacks::CameraErrorCode
+ halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
+ // 0 = Unused error code
+ ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
+ // 1 = CAMERA3_MSG_ERROR_DEVICE
+ ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+ // 2 = CAMERA3_MSG_ERROR_REQUEST
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ // 3 = CAMERA3_MSG_ERROR_RESULT
+ ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
+ // 4 = CAMERA3_MSG_ERROR_BUFFER
+ ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
+ };
+
+ ICameraDeviceCallbacks::CameraErrorCode errorCode =
+ ((msg.error_code >= 0) &&
+ (msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
+ halErrorMap[msg.error_code] :
+ ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
+
+ int streamId = 0;
+ if (msg.error_stream != NULL) {
+ Camera3Stream *stream =
+ Camera3Stream::cast(msg.error_stream);
+ streamId = stream->getId();
+ }
+ ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+ mId, __FUNCTION__, msg.frame_number,
+ streamId, msg.error_code);
+
+ CaptureResultExtras resultExtras;
+ switch (errorCode) {
+ case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+ // SET_ERR calls notifyError
+ SET_ERR("Camera HAL reported serious device error");
+ break;
+ case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ {
+ Mutex::Autolock l(mInFlightLock);
+ ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
+ if (idx >= 0) {
+ InFlightRequest &r = mInFlightMap.editValueAt(idx);
+ r.requestStatus = msg.error_code;
+ resultExtras = r.resultExtras;
+ } else {
+ resultExtras.frameNumber = msg.frame_number;
+ ALOGE("Camera %d: %s: cannot find in-flight request on "
+ "frame %" PRId64 " error", mId, __FUNCTION__,
+ resultExtras.frameNumber);
+ }
+ }
+ if (listener != NULL) {
+ listener->notifyError(errorCode, resultExtras);
+ } else {
+ ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
+ }
+ break;
+ default:
+ // SET_ERR calls notifyError
+ SET_ERR("Unknown error message from HAL: %d", msg.error_code);
+ break;
+ }
+}
+
+void Camera3Device::notifyShutter(const camera3_shutter_msg_t &msg,
+ NotificationListener *listener) {
+ ssize_t idx;
+ // Verify ordering of shutter notifications
+ {
+ Mutex::Autolock l(mOutputLock);
+ // TODO: need to track errors for tighter bounds on expected frame number.
+ if (msg.frame_number < mNextShutterFrameNumber) {
+ SET_ERR("Shutter notification out-of-order. Expected "
+ "notification for frame %d, got frame %d",
+ mNextShutterFrameNumber, msg.frame_number);
+ return;
+ }
+ mNextShutterFrameNumber = msg.frame_number + 1;
+ }
+
+ CaptureResultExtras resultExtras;
+
+ // Set timestamp for the request in the in-flight tracking
+ // and get the request ID to send upstream
+ {
+ Mutex::Autolock l(mInFlightLock);
+ idx = mInFlightMap.indexOfKey(msg.frame_number);
+ if (idx >= 0) {
+ InFlightRequest &r = mInFlightMap.editValueAt(idx);
+ r.captureTimestamp = msg.timestamp;
+ resultExtras = r.resultExtras;
+ }
+ }
+ if (idx < 0) {
+ SET_ERR("Shutter notification for non-existent frame number %d",
+ msg.frame_number);
+ return;
+ }
+ ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
+ mId, __FUNCTION__,
+ msg.frame_number, resultExtras.requestId, msg.timestamp);
+ // Call listener, if any
+ if (listener != NULL) {
+ listener->notifyShutter(resultExtras, msg.timestamp);
+ }
+}
+
+
CameraMetadata Camera3Device::getLatestRequestLocked() {
ALOGV("%s", __FUNCTION__);
@@ -2144,6 +2308,12 @@
mStatusId = statusTracker->addComponent();
}
+void Camera3Device::RequestThread::setNotifyCallback(
+ NotificationListener *listener) {
+ Mutex::Autolock l(mRequestLock);
+ mListener = listener;
+}
+
void Camera3Device::RequestThread::configurationComplete() {
Mutex::Autolock l(mRequestLock);
mReconfigured = true;
@@ -2266,20 +2436,26 @@
return OK;
}
-status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) {
+status_t Camera3Device::RequestThread::clear(
+ NotificationListener *listener,
+ /*out*/int64_t *lastFrameNumber) {
Mutex::Autolock l(mRequestLock);
ALOGV("RequestThread::%s:", __FUNCTION__);
+
mRepeatingRequests.clear();
- // Decrement repeating frame count for those requests never sent to device
- // TODO: Remove this after we have proper error handling so these requests
- // will generate an error callback. This might be the only place calling
- // isRepeatingRequestLocked. If so, isRepeatingRequestLocked should also be removed.
- const RequestList &requests = mRequestQueue;
- for (RequestList::const_iterator it = requests.begin();
- it != requests.end(); ++it) {
- if (isRepeatingRequestLocked(*it)) {
- mRepeatingLastFrameNumber--;
+ // Send errors for all requests pending in the request queue, including
+ // pending repeating requests
+ if (listener != NULL) {
+ for (RequestList::iterator it = mRequestQueue.begin();
+ it != mRequestQueue.end(); ++it) {
+ // Set the frame number this request would have had, if it
+ // had been submitted; this frame number will not be reused.
+ // The requestId and burstId fields were set when the request was
+ // submitted originally (in convertMetadataListToRequestListLocked)
+ (*it)->mResultExtras.frameNumber = mFrameNumber++;
+ listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ (*it)->mResultExtras);
}
}
mRequestQueue.clear();
@@ -2421,8 +2597,17 @@
request.input_buffer = &inputBuffer;
res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
if (res != OK) {
+ // Can't get input buffer from gralloc queue - this could be due to
+ // disconnected queue or other producer misbehavior, so not a fatal
+ // error
ALOGE("RequestThread: Can't get input buffer, skipping request:"
" %s (%d)", strerror(-res), res);
+ Mutex::Autolock l(mRequestLock);
+ if (mListener != NULL) {
+ mListener->notifyError(
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ nextRequest->mResultExtras);
+ }
cleanUpFailedRequest(request, nextRequest, outputBuffers);
return true;
}
@@ -2438,8 +2623,17 @@
res = nextRequest->mOutputStreams.editItemAt(i)->
getBuffer(&outputBuffers.editItemAt(i));
if (res != OK) {
+ // Can't get output buffer from gralloc queue - this could be due to
+ // abandoned queue or other consumer misbehavior, so not a fatal
+ // error
ALOGE("RequestThread: Can't get output buffer, skipping request:"
" %s (%d)", strerror(-res), res);
+ Mutex::Autolock l(mRequestLock);
+ if (mListener != NULL) {
+ mListener->notifyError(
+ ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ nextRequest->mResultExtras);
+ }
cleanUpFailedRequest(request, nextRequest, outputBuffers);
return true;
}
@@ -2450,6 +2644,7 @@
// Log request in the in-flight queue
sp<Camera3Device> parent = mParent.promote();
if (parent == NULL) {
+ // Should not happen, and nowhere to send errors to, so just log it
CLOGE("RequestThread: Parent is gone");
cleanUpFailedRequest(request, nextRequest, outputBuffers);
return false;
@@ -2485,6 +2680,9 @@
ATRACE_END();
if (res != OK) {
+ // Should only get a failure here for malformed requests or device-level
+ // errors, so consider all errors fatal. Bad metadata failures should
+ // come through notify.
SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
" device: %s (%d)", request.frame_number, strerror(-res), res);
cleanUpFailedRequest(request, nextRequest, outputBuffers);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 915c024..b99ed7e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -151,6 +151,8 @@
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
static const ssize_t kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
+ // Constant to use for stream ID when one doesn't exist
+ static const int NO_STREAM = -1;
// A lock to enforce serialization on the input/configure side
// of the public interface.
@@ -196,6 +198,8 @@
int mNextStreamId;
bool mNeedConfig;
+ int mDummyStreamId;
+
// Whether to send state updates upstream
// Pause when doing transparent reconfiguration
bool mPauseStateNotify;
@@ -291,6 +295,17 @@
status_t configureStreamsLocked();
/**
+ * Add a dummy stream to the current stream set as a workaround for
+ * not allowing 0 streams in the camera HAL spec.
+ */
+ status_t addDummyStreamLocked();
+
+ /**
+ * Remove a dummy stream if the current config includes real streams.
+ */
+ status_t tryRemoveDummyStreamLocked();
+
+ /**
* Set device into an error state due to some fatal failure, and set an
* error message to indicate why. Only the first call's message will be
* used. The message is also sent to the log.
@@ -346,6 +361,8 @@
sp<camera3::StatusTracker> statusTracker,
camera3_device_t *hal3Device);
+ void setNotifyCallback(NotificationListener *listener);
+
/**
* Call after stream (re)-configuration is completed.
*/
@@ -369,7 +386,8 @@
/**
* Remove all queued and repeating requests, and pending triggers
*/
- status_t clear(/*out*/
+ status_t clear(NotificationListener *listener,
+ /*out*/
int64_t *lastFrameNumber = NULL);
/**
@@ -452,6 +470,8 @@
wp<camera3::StatusTracker> mStatusTracker;
camera3_device_t *mHal3Device;
+ NotificationListener *mListener;
+
const int mId; // The camera ID
int mStatusId; // The RequestThread's component ID for
// status tracking
@@ -611,6 +631,12 @@
void notify(const camera3_notify_msg *msg);
+ // Specific notify handlers
+ void notifyError(const camera3_error_msg_t &msg,
+ NotificationListener *listener);
+ void notifyShutter(const camera3_shutter_msg_t &msg,
+ NotificationListener *listener);
+
/**
* Static callback forwarding methods from HAL to instance
*/
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
new file mode 100644
index 0000000..6656b09
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DummyStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3DummyStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3DummyStream::Camera3DummyStream(int id) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
+ /*maxSize*/0, DUMMY_FORMAT) {
+
+}
+
+Camera3DummyStream::~Camera3DummyStream() {
+
+}
+
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Dummy\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3DummyStream::setTransform(int transform) {
+ ATRACE_CALL();
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::configureQueueLocked() {
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::disconnectLocked() {
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+ *usage = DUMMY_USAGE;
+ return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
new file mode 100644
index 0000000..3e42623
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+namespace camera3 {
+
+/**
+ * A dummy output stream class, to be used as a placeholder when no valid
+ * streams are configured by the client.
+ * This is necessary because camera HAL v3.2 or older disallow configuring
+ * 0 output streams, while the public camera2 API allows for it.
+ */
+class Camera3DummyStream :
+ public Camera3IOStreamBase,
+ public Camera3OutputStreamInterface {
+
+ public:
+ /**
+ * Set up a dummy stream; doesn't actually connect to anything, and uses
+ * a default dummy format and size.
+ */
+ Camera3DummyStream(int id);
+
+ virtual ~Camera3DummyStream();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ status_t setTransform(int transform);
+
+ protected:
+
+ /**
+ * Note that we release the lock briefly in this function
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ virtual status_t disconnectLocked();
+
+ private:
+
+ // Default dummy parameters; 320x240 is a required size for all devices,
+ // otherwise act like a SurfaceView would.
+ static const int DUMMY_WIDTH = 320;
+ static const int DUMMY_HEIGHT = 240;
+ static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3DummyStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 3f6254f..29ce38c 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -209,6 +209,35 @@
return res;
}
+status_t Camera3Stream::cancelConfiguration() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ switch (mState) {
+ case STATE_ERROR:
+ ALOGE("%s: In error state", __FUNCTION__);
+ return INVALID_OPERATION;
+ case STATE_IN_CONFIG:
+ case STATE_IN_RECONFIG:
+ // OK
+ break;
+ case STATE_CONSTRUCTED:
+ case STATE_CONFIGURED:
+ ALOGE("%s: Cannot cancel configuration that hasn't been started",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ default:
+ ALOGE("%s: Unknown state", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ camera3_stream::usage = oldUsage;
+ camera3_stream::max_buffers = oldMaxBuffers;
+
+ mState = STATE_CONSTRUCTED;
+
+ return OK;
+}
+
status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index a77f27c..d0e1337 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -159,6 +159,13 @@
status_t finishConfiguration(camera3_device *hal3Device);
/**
+ * Cancels the stream configuration process. This returns the stream to the
+ * initial state, allowing it to be configured again later.
+ * This is done if the HAL rejects the proposed combined stream configuration
+ */
+ status_t cancelConfiguration();
+
+ /**
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index c93ae15..da989cd 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -82,6 +82,13 @@
virtual status_t finishConfiguration(camera3_device *hal3Device) = 0;
/**
+ * Cancels the stream configuration process. This returns the stream to the
+ * initial state, allowing it to be configured again later.
+ * This is done if the HAL rejects the proposed combined stream configuration
+ */
+ virtual status_t cancelConfiguration() = 0;
+
+ /**
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 2502e0d..b5aaee3 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -249,7 +249,7 @@
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
default:
- return eventMemory;
+ return eventMemory;
}
size_t size = event->data_offset + event->data_size;
@@ -653,7 +653,6 @@
{
ALOGV("onCallbackEvent type %d", event->mType);
- AutoMutex lock(mLock);
sp<IMemory> eventMemory = event->mMemory;
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
@@ -668,34 +667,53 @@
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
(struct sound_trigger_recognition_event *)eventMemory->pointer();
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(recognitionEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ if (model->mState != Model::STATE_ACTIVE) {
+ ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+ return;
+ }
- sp<Model> model = getModel(recognitionEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ recognitionEvent->capture_session = model->mCaptureSession;
+ model->mState = Model::STATE_IDLE;
+ client = mClient;
}
- if (model->mState != Model::STATE_ACTIVE) {
- ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
- return;
+ if (client != 0) {
+ client->onRecognitionEvent(eventMemory);
}
-
- recognitionEvent->capture_session = model->mCaptureSession;
- mClient->onRecognitionEvent(eventMemory);
- model->mState = Model::STATE_IDLE;
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
(struct sound_trigger_model_event *)eventMemory->pointer();
-
- sp<Model> model = getModel(soundmodelEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(soundmodelEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ client = mClient;
}
- mClient->onSoundModelEvent(eventMemory);
+ if (client != 0) {
+ client->onSoundModelEvent(eventMemory);
+ }
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- mClient->onServiceStateChange(eventMemory);
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+ if (client != 0) {
+ client->onServiceStateChange(eventMemory);
+ }
} break;
default:
LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);