Merge commit '2381f06f374ee0cb8bca0edf5388394432b00e6d' into HEAD
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index f447c5b..7765914 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -133,11 +133,19 @@
}
status_t CameraMetadata::append(const CameraMetadata &other) {
+ return append(other.mBuffer);
+}
+
+status_t CameraMetadata::append(const camera_metadata_t* other) {
if (mLocked) {
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
- return append_camera_metadata(mBuffer, other.mBuffer);
+ size_t extraEntries = get_camera_metadata_entry_count(other);
+ size_t extraData = get_camera_metadata_data_count(other);
+ resizeIfNeeded(extraEntries, extraData);
+
+ return append_camera_metadata(mBuffer, other);
}
size_t CameraMetadata::entryCount() const {
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index fe2bd19..1254d3c 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -99,6 +99,11 @@
status_t append(const CameraMetadata &other);
/**
+ * Append metadata from a raw camera_metadata buffer
+ */
+ status_t append(const camera_metadata* other);
+
+ /**
* Number of metadata entries.
*/
size_t entryCount() const;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index f2f9c22..f379ee5 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -187,7 +187,8 @@
int notificationFrames = 0,
int sessionId = 0,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL);
+ const audio_offload_info_t *offloadInfo = NULL,
+ int uid = -1);
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
@@ -211,7 +212,8 @@
int notificationFrames = 0,
int sessionId = 0,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL);
+ const audio_offload_info_t *offloadInfo = NULL,
+ int uid = -1);
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
@@ -248,7 +250,8 @@
bool threadCanCallJava = false,
int sessionId = 0,
transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL);
+ const audio_offload_info_t *offloadInfo = NULL,
+ int uid = -1);
/* Result of constructing the AudioTrack. This must be checked for successful initialization
* before using any AudioTrack API (except for set()), because using
@@ -751,6 +754,7 @@
sp<DeathNotifier> mDeathNotifier;
uint32_t mSequence; // incremented for each new IAudioTrack attempt
audio_io_handle_t mOutput; // cached output io handle
+ int mClientUid;
};
class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index eaf7780..899d79f 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -74,6 +74,7 @@
// output: server's description of IAudioTrack for display in logs.
// Don't attempt to parse, as the format could change.
String8& name,
+ int clientUid,
status_t *status) = 0;
virtual sp<IAudioRecord> openRecord(
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 3b151ef..cc244f0 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -99,6 +99,7 @@
virtual status_t getPosition(uint32_t *position) const = 0;
virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
virtual int getSessionId() const = 0;
+ virtual audio_stream_type_t getAudioStreamType() const = 0;
// If no callback is specified, use the "write" API below to submit
// audio data.
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index a8ffd4a..e796ab3 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -264,6 +264,7 @@
status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
status_t setupH263EncoderParameters(const sp<AMessage> &msg);
status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
+ status_t setupVPXEncoderParameters(const sp<AMessage> &msg);
status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 6b7a63c..34213be 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -41,7 +41,8 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs);
+ int64_t timeBetweenTimeLapseFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers = true);
virtual ~CameraSourceTimeLapse();
@@ -116,7 +117,8 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs);
+ int64_t timeBetweenTimeLapseFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers = true);
// Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
// It only handles the case when mLastReadBufferCopy is signalled. Otherwise
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 742bc0e..157b1aa 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -80,7 +80,6 @@
const sp<DataSource> &source, String8 *mimeType,
float *confidence, sp<AMessage> *meta);
- static void RegisterSniffer(SnifferFunc func);
static void RegisterDefaultSniffers();
// for DRM
@@ -101,6 +100,9 @@
private:
static Mutex gSnifferMutex;
static List<SnifferFunc> gSniffers;
+ static bool gSniffersRegistered;
+
+ static void RegisterSniffer_l(SnifferFunc func);
DataSource(const DataSource &);
DataSource &operator=(const DataSource &);
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index c24f612..bbad271 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -57,7 +57,8 @@
status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta);
// Check whether the stream defined by meta can be offloaded to hardware
-bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming);
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
+ bool isStreaming, audio_stream_type_t streamType);
} // namespace android
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 395f164..7fd9379 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -360,6 +360,7 @@
// which must be > 0.
// buffer->mNonContig is unused.
// buffer->mRaw is unused.
+ // ackFlush is true iff being called from Track::start to acknowledge a pending flush.
// On exit:
// buffer->mFrameCount has the actual number of contiguous available frames,
// which is always 0 when the return status != NO_ERROR.
@@ -370,7 +371,7 @@
// NO_ERROR Success, buffer->mFrameCount > 0.
// WOULD_BLOCK No frames are available.
// NO_INIT Shared memory is corrupt.
- virtual status_t obtainBuffer(Buffer* buffer);
+ virtual status_t obtainBuffer(Buffer* buffer, bool ackFlush = false);
// Release (some of) the frames last obtained.
// On entry, buffer->mFrameCount should have the number of frames to release,
@@ -437,7 +438,7 @@
public:
virtual size_t framesReady();
virtual void framesReadyIsCalledByMultipleThreads();
- virtual status_t obtainBuffer(Buffer* buffer);
+ virtual status_t obtainBuffer(Buffer* buffer, bool ackFlush);
virtual void releaseBuffer(Buffer* buffer);
virtual void tallyUnderrunFrames(uint32_t frameCount);
virtual uint32_t getUnderrunFrames() const { return 0; }
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index ab6d731..5862c08 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -62,6 +62,7 @@
virtual void pause();
virtual void close();
void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
+ virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; }
void setVolume(float left, float right);
virtual status_t dump(int fd,const Vector<String16>& args) const;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index fe5cd9e..b8a89a0 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -101,7 +101,8 @@
int notificationFrames,
int sessionId,
transfer_type transferType,
- const audio_offload_info_t *offloadInfo)
+ const audio_offload_info_t *offloadInfo,
+ int uid)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -109,7 +110,8 @@
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
- 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
+ 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
+ offloadInfo, uid);
}
AudioTrack::AudioTrack(
@@ -124,7 +126,8 @@
int notificationFrames,
int sessionId,
transfer_type transferType,
- const audio_offload_info_t *offloadInfo)
+ const audio_offload_info_t *offloadInfo,
+ int uid)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -132,7 +135,7 @@
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
- sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
+ sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
}
AudioTrack::~AudioTrack()
@@ -169,7 +172,8 @@
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
- const audio_offload_info_t *offloadInfo)
+ const audio_offload_info_t *offloadInfo,
+ int uid)
{
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -313,6 +317,11 @@
mNotificationFramesReq = notificationFrames;
mNotificationFramesAct = 0;
mSessionId = sessionId;
+ if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
+ mClientUid = IPCThreadState::self()->getCallingUid();
+ } else {
+ mClientUid = uid;
+ }
mAuxEffectId = 0;
mFlags = flags;
mCbf = cbf;
@@ -962,6 +971,7 @@
tid,
&mSessionId,
mName,
+ mClientUid,
&status);
if (track == 0) {
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index da73d65..caa7900 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -506,7 +506,7 @@
{
}
-status_t ServerProxy::obtainBuffer(Buffer* buffer)
+status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
if (mIsShutdown) {
@@ -579,7 +579,11 @@
buffer->mRaw = part1 > 0 ?
&((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL;
buffer->mNonContig = availToServer - part1;
- mUnreleased = part1;
+ // After flush(), allow releaseBuffer() on a previously obtained buffer;
+ // see "Acknowledge any pending flush()" in audioflinger/Tracks.cpp.
+ if (!ackFlush) {
+ mUnreleased = part1;
+ }
return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
}
no_init:
@@ -761,7 +765,7 @@
return (ssize_t) position;
}
-status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer)
+status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
if (mIsShutdown) {
buffer->mFrameCount = 0;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 448a82e..acfaea0 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -96,6 +96,7 @@
pid_t tid,
int *sessionId,
String8& name,
+ int clientUid,
status_t *status)
{
Parcel data, reply;
@@ -121,6 +122,7 @@
lSessionId = *sessionId;
}
data.writeInt32(lSessionId);
+ data.writeInt32(clientUid);
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
ALOGE("createTrack error: %s", strerror(-lStatus));
@@ -762,6 +764,7 @@
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
pid_t tid = (pid_t) data.readInt32();
int sessionId = data.readInt32();
+ int clientUid = data.readInt32();
String8 name;
status_t status;
sp<IAudioTrack> track;
@@ -773,7 +776,7 @@
track = createTrack(
(audio_stream_type_t) streamType, sampleRate, format,
channelMask, frameCount, &flags, buffer, output, tid,
- &sessionId, name, &status);
+ &sessionId, name, clientUid, &status);
}
reply->writeInt32(flags);
reply->writeInt32(sessionId);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9553458..cd052e6 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -590,7 +590,7 @@
}
if (!p->hardwareOutput()) {
- mAudioOutput = new AudioOutput(mAudioSessionId);
+ mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid());
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
@@ -1296,12 +1296,13 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(int sessionId)
+MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid)
: mCallback(NULL),
mCallbackCookie(NULL),
mCallbackData(NULL),
mBytesWritten(0),
mSessionId(sessionId),
+ mUid(uid),
mFlags(AUDIO_OUTPUT_FLAG_NONE) {
ALOGV("AudioOutput(%d)", sessionId);
mStreamType = AUDIO_STREAM_MUSIC;
@@ -1549,7 +1550,8 @@
0, // notification frames
mSessionId,
AudioTrack::TRANSFER_CALLBACK,
- offloadInfo);
+ offloadInfo,
+ mUid);
} else {
t = new AudioTrack(
mStreamType,
@@ -1558,10 +1560,13 @@
channelMask,
frameCount,
flags,
- NULL,
- NULL,
- 0,
- mSessionId);
+ NULL, // callback
+ NULL, // user data
+ 0, // notification frames
+ mSessionId,
+ AudioTrack::TRANSFER_DEFAULT,
+ NULL, // offload info
+ mUid);
}
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 21f4117..a486cb5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -72,7 +72,7 @@
class CallbackData;
public:
- AudioOutput(int sessionId);
+ AudioOutput(int sessionId, int uid);
virtual ~AudioOutput();
virtual bool ready() const { return mTrack != 0; }
@@ -100,7 +100,10 @@
virtual void flush();
virtual void pause();
virtual void close();
- void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
+ void setAudioStreamType(audio_stream_type_t streamType) {
+ mStreamType = streamType; }
+ virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; }
+
void setVolume(float left, float right);
virtual status_t setPlaybackRatePermille(int32_t ratePermille);
status_t setAuxEffectSendLevel(float level);
@@ -135,6 +138,7 @@
uint32_t mSampleRateHz; // sample rate of the content, as set in open()
float mMsecsPerFrame;
int mSessionId;
+ int mUid;
float mSendLevel;
int mAuxEffectId;
static bool mIsOnEmulator;
@@ -206,6 +210,9 @@
virtual void pause() {}
virtual void close() {}
void setAudioStreamType(audio_stream_type_t streamType) {}
+ // stream type is not used for AudioCache
+ virtual audio_stream_type_t getAudioStreamType() const { return AUDIO_STREAM_DEFAULT; }
+
void setVolume(float left, float right) {}
virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
uint32_t sampleRate() const { return mSampleRate; }
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 095d5ca..f9d9020 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -70,8 +70,9 @@
mOutputFd(-1),
mAudioSource(AUDIO_SOURCE_CNT),
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false), mSurfaceMediaSource(NULL),
- mCaptureTimeLapse(false) {
+ mCaptureTimeLapse(false),
+ mStarted(false),
+ mSurfaceMediaSource(NULL) {
ALOGV("Constructor");
reset();
@@ -1089,7 +1090,22 @@
}
}
-status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
+status_t StagefrightRecorder::checkVideoEncoderCapabilities(
+ bool *supportsCameraSourceMetaDataMode) {
+ /* hardware codecs must support camera source meta data mode */
+ Vector<CodecCapabilities> codecs;
+ OMXClient client;
+ CHECK_EQ(client.connect(), (status_t)OK);
+ QueryCodecs(
+ client.interface(),
+ (mVideoEncoder == VIDEO_ENCODER_H263 ? MEDIA_MIMETYPE_VIDEO_H263 :
+ mVideoEncoder == VIDEO_ENCODER_MPEG_4_SP ? MEDIA_MIMETYPE_VIDEO_MPEG4 :
+ mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
+ false /* decoder */, true /* hwCodec */, &codecs);
+ *supportsCameraSourceMetaDataMode = codecs.size() > 0;
+ ALOGV("encoder %s camera source meta-data mode",
+ *supportsCameraSourceMetaDataMode ? "supports" : "DOES NOT SUPPORT");
+
if (!mCaptureTimeLapse) {
// Dont clip for time lapse capture as encoder will have enough
// time to encode because of slow capture rate of time lapse.
@@ -1307,7 +1323,9 @@
status_t StagefrightRecorder::setupCameraSource(
sp<CameraSource> *cameraSource) {
status_t err = OK;
- if ((err = checkVideoEncoderCapabilities()) != OK) {
+ bool encoderSupportsCameraSourceMetaDataMode;
+ if ((err = checkVideoEncoderCapabilities(
+ &encoderSupportsCameraSourceMetaDataMode)) != OK) {
return err;
}
Size videoSize;
@@ -1323,13 +1341,14 @@
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenTimeLapseFrameCaptureUs);
+ mTimeBetweenTimeLapseFrameCaptureUs,
+ encoderSupportsCameraSourceMetaDataMode);
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate,
- mPreviewSurface, true /*storeMetaDataInVideoBuffers*/);
+ mPreviewSurface, encoderSupportsCameraSourceMetaDataMode);
}
mCamera.clear();
mCameraProxy.clear();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index c864207..31f09e0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -139,7 +139,8 @@
status_t startRTPRecording();
status_t startMPEG2TSRecording();
sp<MediaSource> createAudioSource();
- status_t checkVideoEncoderCapabilities();
+ status_t checkVideoEncoderCapabilities(
+ bool *supportsCameraSourceMetaDataMode);
status_t checkAudioEncoderCapabilities();
// Generic MediaSource set-up. Returns the appropriate
// source (CameraSource or SurfaceMediaSource)
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index d8b35d7..f1782cc 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -201,7 +201,16 @@
switch (what) {
case LiveSession::kWhatPrepared:
{
- notifyVideoSizeChanged(0, 0);
+ // notify the current size here if we have it, otherwise report an initial size of (0,0)
+ sp<AMessage> format = getFormat(false /* audio */);
+ int32_t width;
+ int32_t height;
+ if (format != NULL &&
+ format->findInt32("width", &width) && format->findInt32("height", &height)) {
+ notifyVideoSizeChanged(width, height);
+ } else {
+ notifyVideoSizeChanged(0, 0);
+ }
uint32_t flags = FLAG_CAN_PAUSE;
if (mLiveSession->isSeekable()) {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 1adab38..528fdb9 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1909,6 +1909,11 @@
err = setupAVCEncoderParameters(msg);
break;
+ case OMX_VIDEO_CodingVP8:
+ case OMX_VIDEO_CodingVP9:
+ err = setupVPXEncoderParameters(msg);
+ break;
+
default:
break;
}
@@ -2240,6 +2245,17 @@
return configureBitrate(bitrate, bitrateMode);
}
+status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
+ int32_t bitrate;
+ if (!msg->findInt32("bitrate", &bitrate)) {
+ return INVALID_OPERATION;
+ }
+
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
+ return configureBitrate(bitrate, bitrateMode);
+}
+
status_t ACodec::verifySupportForProfileAndLevel(
int32_t profile, int32_t level) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
@@ -3072,11 +3088,16 @@
/* these are unfilled buffers returned by client */
CHECK(msg->findInt32("err", &err));
- ALOGV("[%s] saw error %d instead of an input buffer",
- mCodec->mComponentName.c_str(), err);
+ if (err == OK) {
+ /* buffers with no errors are returned on MediaCodec.flush */
+ mode = KEEP_BUFFERS;
+ } else {
+ ALOGV("[%s] saw error %d instead of an input buffer",
+ mCodec->mComponentName.c_str(), err);
+ eos = true;
+ }
buffer.clear();
- mode = KEEP_BUFFERS;
}
int32_t tmp;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index bdd842f..d7223d9 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -236,10 +236,10 @@
memset((uint8_t *) buffer->data(), 0, buffer->range_length());
} else if (elapsedTimeUs < kAutoRampStartUs + kAutoRampDurationUs) {
int32_t autoRampDurationFrames =
- (kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL;
+ ((int64_t)kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL; //Need type casting
int32_t autoRampStartFrames =
- (kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
+ ((int64_t)kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL; //Need type casting
int32_t nFrames = mNumFramesReceived - autoRampStartFrames;
rampVolume(nFrames, autoRampDurationFrames,
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index c912f75..130207d 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -606,6 +606,9 @@
mWatchForAudioSeekComplete = false;
mWatchForAudioEOS = false;
+
+ mMediaRenderingStartGeneration = 0;
+ mStartGeneration = 0;
}
void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
@@ -798,7 +801,7 @@
}
}
- if (mFlags & (PLAYING | PREPARING)) {
+ if (mFlags & (PLAYING | PREPARING | CACHE_UNDERRUN)) {
postBufferingEvent_l();
}
}
@@ -895,6 +898,8 @@
return OK;
}
+ mMediaRenderingStartGeneration = ++mStartGeneration;
+
if (!(mFlags & PREPARED)) {
status_t err = prepare_l();
@@ -1197,8 +1202,7 @@
setVideoScalingMode_l(mVideoScalingMode);
if (USE_SURFACE_ALLOC
&& !strncmp(component, "OMX.", 4)
- && strncmp(component, "OMX.google.", 11)
- && strcmp(component, "OMX.Nvidia.mpeg2v.decode")) {
+ && strncmp(component, "OMX.google.", 11)) {
// Hardware decoders avoid the CPU color conversion by decoding
// directly to ANativeBuffers, so we must use a renderer that
// just pushes those buffers to the ANativeWindow.
@@ -1495,7 +1499,13 @@
// This doesn't guarantee that the hardware has a free stream
// but it avoids us attempting to open (and re-open) an offload
// stream to hardware that doesn't have the necessary codec
- mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP());
+ audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+ if (mAudioSink != NULL) {
+ streamType = mAudioSink->getAudioStreamType();
+ }
+
+ mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL),
+ isStreamingHTTP(), streamType);
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
ALOGV("createAudioPlayer: bypass OMX (raw)");
@@ -1927,7 +1937,7 @@
++mStats.mNumVideoFramesDropped;
}
- postVideoEvent_l();
+ postVideoEvent_l(0);
return;
}
}
@@ -1967,6 +1977,41 @@
return;
}
+ /* get next frame time */
+ if (wasSeeking == NO_SEEK) {
+ MediaSource::ReadOptions options;
+ for (;;) {
+ status_t err = mVideoSource->read(&mVideoBuffer, &options);
+ if (err != OK) {
+ // deal with any errors next time
+ CHECK(mVideoBuffer == NULL);
+ postVideoEvent_l(0);
+ return;
+ }
+
+ if (mVideoBuffer->range_length() != 0) {
+ break;
+ }
+
+ // Some decoders, notably the PV AVC software decoder
+ // return spurious empty buffers that we just want to ignore.
+
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ }
+
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ ++mStats.mNumVideoFramesDecoded;
+ }
+
+ int64_t nextTimeUs;
+ CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &nextTimeUs));
+ int64_t delayUs = nextTimeUs - ts->getRealTimeUs() + mTimeSourceDeltaUs;
+ postVideoEvent_l(delayUs > 10000 ? 10000 : delayUs < 0 ? 0 : delayUs);
+ return;
+ }
+
postVideoEvent_l();
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 20214e8..5772316 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -41,13 +41,15 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs) {
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers) {
CameraSourceTimeLapse *source = new
CameraSourceTimeLapse(camera, proxy, cameraId,
clientName, clientUid,
videoSize, videoFrameRate, surface,
- timeBetweenFrameCaptureUs);
+ timeBetweenFrameCaptureUs,
+ storeMetaDataInVideoBuffers);
if (source != NULL) {
if (source->initCheck() != OK) {
@@ -67,9 +69,11 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs)
+ int64_t timeBetweenFrameCaptureUs,
+ bool storeMetaDataInVideoBuffers)
: CameraSource(camera, proxy, cameraId, clientName, clientUid,
- videoSize, videoFrameRate, surface, true),
+ videoSize, videoFrameRate, surface,
+ storeMetaDataInVideoBuffers),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
mSkipCurrentFrame(false) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index fc6fd9c..97987e2 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -107,6 +107,7 @@
Mutex DataSource::gSnifferMutex;
List<DataSource::SnifferFunc> DataSource::gSniffers;
+bool DataSource::gSniffersRegistered = false;
bool DataSource::sniff(
String8 *mimeType, float *confidence, sp<AMessage> *meta) {
@@ -114,7 +115,13 @@
*confidence = 0.0f;
meta->clear();
- Mutex::Autolock autoLock(gSnifferMutex);
+ {
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (!gSniffersRegistered) {
+ return false;
+ }
+ }
+
for (List<SnifferFunc>::iterator it = gSniffers.begin();
it != gSniffers.end(); ++it) {
String8 newMimeType;
@@ -133,9 +140,7 @@
}
// static
-void DataSource::RegisterSniffer(SnifferFunc func) {
- Mutex::Autolock autoLock(gSnifferMutex);
-
+void DataSource::RegisterSniffer_l(SnifferFunc func) {
for (List<SnifferFunc>::iterator it = gSniffers.begin();
it != gSniffers.end(); ++it) {
if (*it == func) {
@@ -148,23 +153,29 @@
// static
void DataSource::RegisterDefaultSniffers() {
- RegisterSniffer(SniffMPEG4);
- RegisterSniffer(SniffMatroska);
- RegisterSniffer(SniffOgg);
- RegisterSniffer(SniffWAV);
- RegisterSniffer(SniffFLAC);
- RegisterSniffer(SniffAMR);
- RegisterSniffer(SniffMPEG2TS);
- RegisterSniffer(SniffMP3);
- RegisterSniffer(SniffAAC);
- RegisterSniffer(SniffMPEG2PS);
- RegisterSniffer(SniffWVM);
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (gSniffersRegistered) {
+ return;
+ }
+
+ RegisterSniffer_l(SniffMPEG4);
+ RegisterSniffer_l(SniffMatroska);
+ RegisterSniffer_l(SniffOgg);
+ RegisterSniffer_l(SniffWAV);
+ RegisterSniffer_l(SniffFLAC);
+ RegisterSniffer_l(SniffAMR);
+ RegisterSniffer_l(SniffMPEG2TS);
+ RegisterSniffer_l(SniffMP3);
+ RegisterSniffer_l(SniffAAC);
+ RegisterSniffer_l(SniffMPEG2PS);
+ RegisterSniffer_l(SniffWVM);
char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
- RegisterSniffer(SniffDRM);
+ RegisterSniffer_l(SniffDRM);
}
+ gSniffersRegistered = true;
}
// static
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 13e22d6..9b36b6a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -1386,19 +1386,33 @@
} else {
// No size was specified. Pick a conservatively large size.
int32_t width, height;
- if (mLastTrack->meta->findInt32(kKeyWidth, &width) &&
- mLastTrack->meta->findInt32(kKeyHeight, &height)) {
- mLastTrack->meta->setInt32(kKeyMaxInputSize, width * height * 3 / 2);
- } else {
+ if (!mLastTrack->meta->findInt32(kKeyWidth, &width) ||
+ !mLastTrack->meta->findInt32(kKeyHeight, &height)) {
ALOGE("No width or height, assuming worst case 1080p");
- mLastTrack->meta->setInt32(kKeyMaxInputSize, 3110400);
+ width = 1920;
+ height = 1080;
}
+
+ const char *mime;
+ CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ // AVC requires compression ratio of at least 2, and uses
+ // macroblocks
+ max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192;
+ } else {
+ // For all other formats there is no minimum compression
+ // ratio. Use compression ratio of 1.
+ max_size = width * height * 3 / 2;
+ }
+ mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size);
}
*offset += chunk_size;
- // Calculate average frame rate.
+ // NOTE: setting another piece of metadata invalidates any pointers (such as the
+ // mimetype) previously obtained, so don't cache them.
const char *mime;
CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
+ // Calculate average frame rate.
if (!strncasecmp("video/", mime, 6)) {
size_t nSamples = mLastTrack->sampleTable->countSamples();
int64_t durationUs;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index e299caf..8af1aaf 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1506,7 +1506,8 @@
info->mOwnedByClient = false;
if (portIndex == kPortIndexInput) {
- msg->setInt32("err", ERROR_END_OF_STREAM);
+ /* no error, just returning buffers */
+ msg->setInt32("err", OK);
}
msg->post();
}
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index ae6ae2d..7b60afc 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -89,6 +89,9 @@
return setData(key, TYPE_RECT, &r, sizeof(r));
}
+/**
+ * Note that the returned pointer becomes invalid when additional metadata is set.
+ */
bool MetaData::findCString(uint32_t key, const char **value) {
uint32_t type;
const void *data;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 7f56af8..43736ad 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -359,12 +359,7 @@
observer->setCodec(codec);
err = codec->configureCodec(meta);
-
if (err == OK) {
- if (!strcmp("OMX.Nvidia.mpeg2v.decode", componentName)) {
- codec->mFlags |= kOnlySubmitOneInputBufferAtOneTime;
- }
-
return codec;
}
@@ -1346,8 +1341,7 @@
mLeftOverBuffer(NULL),
mPaused(false),
mNativeWindow(
- (!strncmp(componentName, "OMX.google.", 11)
- || !strcmp(componentName, "OMX.Nvidia.mpeg2v.decode"))
+ (!strncmp(componentName, "OMX.google.", 11))
? NULL : nativeWindow) {
mPortStatus[kPortIndexInput] = ENABLED;
mPortStatus[kPortIndexOutput] = ENABLED;
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 6a16bb4..1a9a26b 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -38,11 +38,14 @@
namespace android {
+static int64_t kWakelockMinDelay = 100000ll; // 100ms
+
TimedEventQueue::TimedEventQueue()
: mNextEventID(1),
mRunning(false),
mStopped(false),
- mDeathRecipient(new PMDeathRecipient(this)) {
+ mDeathRecipient(new PMDeathRecipient(this)),
+ mWakeLockCount(0) {
}
TimedEventQueue::~TimedEventQueue() {
@@ -87,9 +90,7 @@
// some events may be left in the queue if we did not flush and the wake lock
// must be released.
- if (!mQueue.empty()) {
- releaseWakeLock_l();
- }
+ releaseWakeLock_l(true /*force*/);
mQueue.clear();
mRunning = false;
@@ -126,13 +127,15 @@
QueueItem item;
item.event = event;
item.realtime_us = realtime_us;
+ item.has_wakelock = false;
if (it == mQueue.begin()) {
mQueueHeadChangedCondition.signal();
}
- if (mQueue.empty()) {
+ if (realtime_us > ALooper::GetNowUs() + kWakelockMinDelay) {
acquireWakeLock_l();
+ item.has_wakelock = true;
}
mQueue.insert(it, item);
@@ -188,10 +191,10 @@
ALOGV("cancelling event %d", (*it).event->eventID());
(*it).event->setEventID(0);
- it = mQueue.erase(it);
- if (mQueue.empty()) {
+ if ((*it).has_wakelock) {
releaseWakeLock_l();
}
+ it = mQueue.erase(it);
if (stopAfterFirstMatch) {
return;
}
@@ -297,11 +300,10 @@
if ((*it).event->eventID() == id) {
sp<Event> event = (*it).event;
event->setEventID(0);
-
- mQueue.erase(it);
- if (mQueue.empty()) {
+ if ((*it).has_wakelock) {
releaseWakeLock_l();
}
+ mQueue.erase(it);
return event;
}
}
@@ -313,51 +315,59 @@
void TimedEventQueue::acquireWakeLock_l()
{
- if (mWakeLockToken != 0) {
- return;
- }
- if (mPowerManager == 0) {
- // use checkService() to avoid blocking if power service is not up yet
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("power"));
- if (binder == 0) {
- ALOGW("cannot connect to the power manager service");
- } else {
- mPowerManager = interface_cast<IPowerManager>(binder);
- binder->linkToDeath(mDeathRecipient);
+ if (mWakeLockCount++ == 0) {
+ CHECK(mWakeLockToken == 0);
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("cannot connect to the power manager service");
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
}
- }
- if (mPowerManager != 0) {
- sp<IBinder> binder = new BBinder();
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder,
- String16("TimedEventQueue"),
- String16("media"));
- IPCThreadState::self()->restoreCallingIdentity(token);
- if (status == NO_ERROR) {
- mWakeLockToken = binder;
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = new BBinder();
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16("TimedEventQueue"),
+ String16("media"));
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
}
}
}
-void TimedEventQueue::releaseWakeLock_l()
+void TimedEventQueue::releaseWakeLock_l(bool force)
{
- if (mWakeLockToken == 0) {
- return;
+ if (force) {
+ if (mWakeLockCount == 0) {
+ return;
+ }
+ // Force wakelock release below by setting reference count to 1.
+ mWakeLockCount = 1;
}
- if (mPowerManager != 0) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
- IPCThreadState::self()->restoreCallingIdentity(token);
+ CHECK(mWakeLockCount != 0);
+ if (--mWakeLockCount == 0) {
+ CHECK(mWakeLockToken != 0);
+ if (mPowerManager != 0) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ }
+ mWakeLockToken.clear();
}
- mWakeLockToken.clear();
}
void TimedEventQueue::clearPowerManager()
{
Mutex::Autolock _l(mLock);
- releaseWakeLock_l();
+ releaseWakeLock_l(true /*force*/);
mPowerManager.clear();
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4db8e80..9041c21 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -540,7 +540,8 @@
return BAD_VALUE;
}
-bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming)
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
+ bool isStreaming, audio_stream_type_t streamType)
{
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -594,7 +595,7 @@
info.bit_rate = brate;
- info.stream_type = AUDIO_STREAM_MUSIC;
+ info.stream_type = streamType;
info.has_video = hasVideo;
info.is_streaming = isStreaming;
diff --git a/media/libstagefright/include/TimedEventQueue.h b/media/libstagefright/include/TimedEventQueue.h
index 4e49c83..38a08b1 100644
--- a/media/libstagefright/include/TimedEventQueue.h
+++ b/media/libstagefright/include/TimedEventQueue.h
@@ -118,6 +118,7 @@
struct QueueItem {
sp<Event> event;
int64_t realtime_us;
+ bool has_wakelock;
};
struct StopEvent : public TimedEventQueue::Event {
@@ -139,6 +140,7 @@
sp<IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
const sp<PMDeathRecipient> mDeathRecipient;
+ uint32_t mWakeLockCount;
static void *ThreadWrapper(void *me);
void threadEntry();
@@ -146,7 +148,7 @@
sp<Event> removeEventFromQueue_l(event_id id);
void acquireWakeLock_l();
- void releaseWakeLock_l();
+ void releaseWakeLock_l(bool force = false);
TimedEventQueue(const TimedEventQueue &);
TimedEventQueue &operator=(const TimedEventQueue &);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 9850a46..175a263 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -1193,7 +1193,10 @@
unsigned sync_byte = br->getBits(8);
CHECK_EQ(sync_byte, 0x47u);
- MY_LOGV("transport_error_indicator = %u", br->getBits(1));
+ if (br->getBits(1)) { // transport_error_indicator
+ // silently ignore.
+ return OK;
+ }
unsigned payload_unit_start_indicator = br->getBits(1);
ALOGV("payload_unit_start_indicator = %u", payload_unit_start_indicator);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index cf43e94..b8970ad 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -148,6 +148,18 @@
}
}
+void GraphicBufferSource::omxIdle() {
+ ALOGV("omxIdle");
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting) {
+ // We are only interested in the transition from executing->idle,
+ // not loaded->idle.
+ mExecuting = false;
+ }
+}
+
void GraphicBufferSource::omxLoaded(){
Mutex::Autolock autoLock(mMutex);
if (!mExecuting) {
@@ -194,7 +206,9 @@
void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
Mutex::Autolock autoLock(mMutex);
- CHECK(mExecuting); // could this happen if app stop()s early?
+ if (!mExecuting) {
+ return;
+ }
int cbi = findMatchingCodecBuffer_l(header);
if (cbi < 0) {
@@ -213,7 +227,12 @@
// see if the GraphicBuffer reference was null, which should only ever
// happen for EOS.
if (codecBuffer.mGraphicBuffer == NULL) {
- CHECK(mEndOfStream && mEndOfStreamSent);
+ if (!(mEndOfStream && mEndOfStreamSent)) {
+ // This can happen when broken code sends us the same buffer
+ // twice in a row.
+ ALOGE("ERROR: codecBufferEmptied on non-EOS null buffer "
+ "(buffer emptied twice?)");
+ }
// No GraphicBuffer to deal with, no additional input or output is
// expected, so just return.
return;
@@ -384,6 +403,23 @@
if (mLatestSubmittedBufferId < 0 || mSuspended) {
return false;
}
+ if (mBufferSlot[mLatestSubmittedBufferId] == NULL) {
+ // This can happen if the remote side disconnects, causing
+ // onBuffersReleased() to NULL out our copy of the slots. The
+ // buffer is gone, so we have nothing to show.
+ //
+ // To be on the safe side we try to release the buffer.
+ ALOGD("repeatLatestSubmittedBuffer_l: slot was NULL");
+ mBufferQueue->releaseBuffer(
+ mLatestSubmittedBufferId,
+ mLatestSubmittedBufferFrameNum,
+ EGL_NO_DISPLAY,
+ EGL_NO_SYNC_KHR,
+ Fence::NO_FENCE);
+ mLatestSubmittedBufferId = -1;
+ mLatestSubmittedBufferFrameNum = 0;
+ return false;
+ }
int cbi = findAvailableCodecBuffer_l();
if (cbi < 0) {
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 244a843..9e5eee6 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -69,6 +69,11 @@
// sitting in the BufferQueue, this will send them to the codec.
void omxExecuting();
+ // This is called when OMX transitions to OMX_StateIdle, indicating that
+ // the codec is meant to return all buffers back to the client for them
+ // to be freed. Do NOT submit any more buffers to the component.
+ void omxIdle();
+
// This is called when OMX transitions to OMX_StateLoaded, indicating that
// we are shutting down.
void omxLoaded();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 46e5d71..5f104fc 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -243,13 +243,18 @@
status_t OMXNodeInstance::sendCommand(
OMX_COMMANDTYPE cmd, OMX_S32 param) {
const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
- if (bufferSource != NULL
- && cmd == OMX_CommandStateSet
- && param == OMX_StateLoaded) {
- // Initiating transition from Executing -> Loaded
- // Buffers are about to be freed.
- bufferSource->omxLoaded();
- setGraphicBufferSource(NULL);
+ if (bufferSource != NULL && cmd == OMX_CommandStateSet) {
+ if (param == OMX_StateIdle) {
+ // Initiating transition from Executing -> Idle
+ // ACodec is waiting for all buffers to be returned, do NOT
+ // submit any more buffers to the codec.
+ bufferSource->omxIdle();
+ } else if (param == OMX_StateLoaded) {
+ // Initiating transition from Idle/Executing -> Loaded
+ // Buffers are about to be freed.
+ bufferSource->omxLoaded();
+ setGraphicBufferSource(NULL);
+ }
// fall through
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index a9c9b56..3132e54 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -448,6 +448,7 @@
pid_t tid,
int *sessionId,
String8& name,
+ int clientUid,
status_t *status)
{
sp<PlaybackThread::Track> track;
@@ -483,6 +484,7 @@
}
pid_t pid = IPCThreadState::self()->getCallingPid();
+
client = registerPid_l(pid);
ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);
@@ -510,7 +512,7 @@
ALOGV("createTrack() lSessionId: %d", lSessionId);
track = thread->createTrack_l(client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, &lStatus);
+ channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus);
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
@@ -1284,8 +1286,11 @@
}
// create new record track.
// The record track uses one track in mHardwareMixerThread by convention.
+ // TODO: the uid should be passed in as a parameter to openRecord
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
- frameCount, lSessionId, flags, tid, &lStatus);
+ frameCount, lSessionId,
+ IPCThreadState::self()->getCallingUid(),
+ flags, tid, &lStatus);
LOG_ALWAYS_FATAL_IF((recordTrack != 0) != (lStatus == NO_ERROR));
}
if (lStatus != NO_ERROR) {
@@ -2335,6 +2340,7 @@
strategy,
sessionId,
effect->id());
+ AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
}
effect = chain->getEffectFromId_l(0);
}
@@ -2349,6 +2355,7 @@
strategy,
sessionId,
removed[i]->id());
+ AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
}
}
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 2aeb263..53e238e 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -109,6 +109,7 @@
pid_t tid,
int *sessionId,
String8& name,
+ int clientUid,
status_t *status);
virtual sp<IAudioRecord> openRecord(
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a2e2511..43b77f3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -31,6 +31,7 @@
size_t frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ int uid,
IAudioFlinger::track_flags_t flags);
virtual ~Track();
@@ -165,7 +166,8 @@
audio_channel_mask_t channelMask,
size_t frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId);
+ int sessionId,
+ int uid);
virtual ~TimedTrack();
class TimedBuffer {
@@ -208,7 +210,8 @@
audio_channel_mask_t channelMask,
size_t frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId);
+ int sessionId,
+ int uid);
void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
void timedYieldSilence_l(uint32_t numFrames,
@@ -255,7 +258,8 @@
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- size_t frameCount);
+ size_t frameCount,
+ int uid);
virtual ~OutputTrack();
virtual status_t start(AudioSystem::sync_event_t event =
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index cd8f70c..57de568 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -28,7 +28,8 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int sessionId);
+ int sessionId,
+ int uid);
virtual ~RecordTrack();
virtual status_t start(AudioSystem::sync_event_t event, int triggerSession);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 47dcca6..bf85b51 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -272,6 +272,7 @@
// mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
// set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
mParamStatus(NO_ERROR),
+ //FIXME: mStandby should be true here. Is this some kind of hack?
mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
// mName will be set by concrete (non-virtual) subclass
@@ -503,17 +504,7 @@
void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid)
{
- if (mPowerManager == 0) {
- // use checkService() to avoid blocking if power service is not up yet
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("power"));
- if (binder == 0) {
- ALOGW("Thread %s cannot connect to the power manager service", mName);
- } else {
- mPowerManager = interface_cast<IPowerManager>(binder);
- binder->linkToDeath(mDeathRecipient);
- }
- }
+ getPowerManager_l();
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
status_t status;
@@ -553,6 +544,41 @@
}
}
+void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
+ Mutex::Autolock _l(mLock);
+ updateWakeLockUids_l(uids);
+}
+
+void AudioFlinger::ThreadBase::getPowerManager_l() {
+
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", mName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+}
+
+void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
+
+ getPowerManager_l();
+ if (mWakeLockToken == NULL) {
+ ALOGE("no wake lock to update!");
+ return;
+ }
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status;
+ status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
+ ALOGV("acquireWakeLock_l() %s status %d", mName, status);
+ }
+}
+
void AudioFlinger::ThreadBase::clearPowerManager()
{
Mutex::Autolock _l(mLock);
@@ -977,6 +1003,7 @@
: ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
mNormalFrameCount(0), mMixBuffer(NULL),
mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+ mActiveTracksGeneration(0),
// mStreamTypes[] initialized in constructor body
mOutput(output),
mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
@@ -1160,6 +1187,7 @@
int sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
+ int uid,
status_t *status)
{
sp<Track> track;
@@ -1293,10 +1321,10 @@
if (!isTimed) {
track = new Track(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId, *flags);
+ channelMask, frameCount, sharedBuffer, sessionId, uid, *flags);
} else {
track = TimedTrack::create(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId);
+ channelMask, frameCount, sharedBuffer, sessionId, uid);
}
if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
lStatus = NO_MEMORY;
@@ -1432,6 +1460,9 @@
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
+ mWakeLockUids.add(track->uid());
+ mActiveTracksGeneration++;
+ mLatestActiveTrack = track;
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
@@ -1905,7 +1936,7 @@
mNumWrites++;
mInWrite = false;
-
+ mStandby = false;
return bytesWritten;
}
@@ -2127,6 +2158,8 @@
// FIXME could this be made local to while loop?
writeFrames = 0;
+ int lastGeneration = 0;
+
cacheParameters_l();
sleepTime = idleSleepTime;
@@ -2183,6 +2216,8 @@
break;
}
releaseWakeLock_l();
+ mWakeLockUids.clear();
+ mActiveTracksGeneration++;
ALOGV("wait async completion");
mWaitWorkCV.wait(mLock);
ALOGV("async completion/wake");
@@ -2213,6 +2248,8 @@
}
releaseWakeLock_l();
+ mWakeLockUids.clear();
+ mActiveTracksGeneration++;
// wait until we have something to do...
ALOGV("%s going to sleep", myName.string());
mWaitWorkCV.wait(mLock);
@@ -2237,11 +2274,18 @@
// mMixerStatusIgnoringFastTracks is also updated internally
mMixerStatus = prepareTracks_l(&tracksToRemove);
+ // compare with previously applied list
+ if (lastGeneration != mActiveTracksGeneration) {
+ // update wakelock
+ updateWakeLockUids_l(mWakeLockUids);
+ lastGeneration = mActiveTracksGeneration;
+ }
+
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
lockEffectChains_l(effectChains);
- }
+ } // mLock scope ends
if (mBytesRemaining == 0) {
mCurrentWriteLength = 0;
@@ -2315,7 +2359,6 @@
}
}
- mStandby = false;
} else {
usleep(sleepTime);
}
@@ -2351,6 +2394,8 @@
}
releaseWakeLock();
+ mWakeLockUids.clear();
+ mActiveTracksGeneration++;
ALOGV("Thread %p type %d exiting", this, mType);
return false;
@@ -2364,6 +2409,8 @@
for (size_t i=0 ; i<count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
mActiveTracks.remove(track);
+ mWakeLockUids.remove(track->uid());
+ mActiveTracksGeneration++;
ALOGV("removeTracks_l removing track on session %d", track->sessionId());
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
@@ -3559,6 +3606,12 @@
Track* const track = t.get();
audio_track_cblk_t* cblk = track->cblk();
+ // Only consider last track started for volume and mixer state control.
+ // In theory an older track could underrun and restart after the new one starts
+ // but as we only care about the transition phase between two tracks on a
+ // direct output, it is not a problem to ignore the underrun case.
+ sp<Track> l = mLatestActiveTrack.promote();
+ bool last = l.get() == track;
// The first time a track is added we wait
// for all its buffers to be filled before processing it
@@ -3568,11 +3621,6 @@
} else {
minFrames = 1;
}
- // Only consider last track started for volume and mixer state control.
- // This is the last entry in mActiveTracks unless a track underruns.
- // As we only care about the transition phase between two tracks on a
- // direct output, it is not a problem to ignore the underrun case.
- bool last = (i == (count - 1));
if ((track->framesReady() >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
@@ -3599,7 +3647,7 @@
} else {
// clear effect chain input buffer if the last active track started underruns
// to avoid sending previous audio buffer again to effects
- if (!mEffectChains.isEmpty() && (i == (count -1))) {
+ if (!mEffectChains.isEmpty() && last) {
mEffectChains[0]->clearInputBuffer();
}
@@ -3611,7 +3659,8 @@
// TODO: implement behavior for compressed audio
size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
size_t framesWritten = mBytesWritten / mFrameSize;
- if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
+ if (mStandby || !last ||
+ track->presentationComplete(framesWritten, audioHALFrames)) {
if (track->isStopped()) {
track->reset();
}
@@ -3624,6 +3673,9 @@
if (--(track->mRetryCount) <= 0) {
ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
tracksToRemove->add(track);
+ // indicate to client process that the track was disabled because of underrun;
+ // it will then automatically call start() when data is available
+ android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
} else if (last) {
mixerStatus = MIXER_TRACKS_ENABLED;
}
@@ -3886,11 +3938,8 @@
mFlushPending(false),
mPausedBytesRemaining(0)
{
-}
-
-AudioFlinger::OffloadThread::~OffloadThread()
-{
- mPreviousTrack.clear();
+ //FIXME: mStandby should be set to true by ThreadBase constructor
+ mStandby = true;
}
void AudioFlinger::OffloadThread::threadLoop_exit()
@@ -3927,24 +3976,13 @@
}
Track* const track = t.get();
audio_track_cblk_t* cblk = track->cblk();
- if (mPreviousTrack != NULL) {
- if (t != mPreviousTrack) {
- // Flush any data still being written from last track
- mBytesRemaining = 0;
- if (mPausedBytesRemaining) {
- // Last track was paused so we also need to flush saved
- // mixbuffer state and invalidate track so that it will
- // re-submit that unwritten data when it is next resumed
- mPausedBytesRemaining = 0;
- // Invalidate is a bit drastic - would be more efficient
- // to have a flag to tell client that some of the
- // previously written data was lost
- mPreviousTrack->invalidate();
- }
- }
- }
- mPreviousTrack = t;
- bool last = (i == (count - 1));
+ // Only consider last track started for volume and mixer state control.
+ // In theory an older track could underrun and restart after the new one starts
+ // but as we only care about the transition phase between two tracks on a
+ // direct output, it is not a problem to ignore the underrun case.
+ sp<Track> l = mLatestActiveTrack.promote();
+ bool last = l.get() == track;
+
if (track->isPausing()) {
track->setPaused();
if (last) {
@@ -3992,6 +4030,31 @@
}
if (last) {
+ sp<Track> previousTrack = mPreviousTrack.promote();
+ if (previousTrack != 0) {
+ if (track != previousTrack.get()) {
+ // Flush any data still being written from last track
+ mBytesRemaining = 0;
+ if (mPausedBytesRemaining) {
+ // Last track was paused so we also need to flush saved
+ // mixbuffer state and invalidate track so that it will
+ // re-submit that unwritten data when it is next resumed
+ mPausedBytesRemaining = 0;
+ // Invalidate is a bit drastic - would be more efficient
+ // to have a flag to tell client that some of the
+ // previously written data was lost
+ previousTrack->invalidate();
+ }
+ // flush data already sent to the DSP if changing audio session as audio
+ // comes from a different source. Also invalidate previous track to force a
+ // seek when resuming.
+ if (previousTrack->sessionId() != track->sessionId()) {
+ previousTrack->invalidate();
+ mFlushPending = true;
+ }
+ }
+ }
+ mPreviousTrack = track;
// reset retry count
track->mRetryCount = kMaxTrackRetriesOffload;
mActiveTrack = t;
@@ -4008,22 +4071,27 @@
// has been written
ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
- if (last) {
- sleepTime = 0;
- standbyTime = systemTime() + standbyDelay;
- mixerStatus = MIXER_DRAIN_TRACK;
- mDrainSequence += 2;
+ // do not drain if no data was ever sent to HAL (mStandby == true)
+ if (last && !mStandby) {
+ // do not modify drain sequence if we are already draining. This happens
+ // when resuming from pause after drain.
+ if ((mDrainSequence & 1) == 0) {
+ sleepTime = 0;
+ standbyTime = systemTime() + standbyDelay;
+ mixerStatus = MIXER_DRAIN_TRACK;
+ mDrainSequence += 2;
+ }
if (mHwPaused) {
// It is possible to move from PAUSED to STOPPING_1 without
// a resume so we must ensure hardware is running
- mOutput->stream->resume(mOutput->stream);
+ doHwResume = true;
mHwPaused = false;
}
}
}
} else if (track->isStopping_2()) {
- // Drain has completed, signal presentation complete
- if (!(mDrainSequence & 1) || !last) {
+ // Drain has completed or we are in standby, signal presentation complete
+ if (!(mDrainSequence & 1) || !last || mStandby) {
track->mState = TrackBase::STOPPED;
size_t audioHALFrames =
(mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
@@ -4040,6 +4108,9 @@
ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
track->name());
tracksToRemove->add(track);
+ // indicate to client process that the track was disabled because of underrun;
+ // it will then automatically call start() when data is available
+ android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
} else if (last){
mixerStatus = MIXER_TRACKS_ENABLED;
}
@@ -4053,7 +4124,7 @@
// If a flush is pending and a track is active but the HW is not paused, force a HW pause
// before flush and then resume HW. This can happen in case of pause/flush/resume
// if resume is received before pause is executed.
- if (doHwPause || (mFlushPending && !mHwPaused && (count != 0))) {
+ if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
mOutput->stream->pause(mOutput->stream);
if (!doHwPause) {
doHwResume = true;
@@ -4063,7 +4134,7 @@
flushHw_l();
mFlushPending = false;
}
- if (doHwResume) {
+ if (!mStandby && doHwResume) {
mOutput->stream->resume(mOutput->stream);
}
@@ -4185,6 +4256,7 @@
for (size_t i = 0; i < outputTracks.size(); i++) {
outputTracks[i]->write(mMixBuffer, writeFrames);
}
+ mStandby = false;
return (ssize_t)mixBufferSize;
}
@@ -4216,7 +4288,8 @@
mSampleRate,
mFormat,
mChannelMask,
- frameCount);
+ frameCount,
+ IPCThreadState::self()->getCallingUid());
if (outputTrack->cblk() != NULL) {
thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f);
mOutputTracks.add(outputTrack);
@@ -4318,7 +4391,6 @@
snprintf(mName, kNameLength, "AudioIn_%X", id);
readInputParameters();
- mClientUid = IPCThreadState::self()->getCallingUid();
}
@@ -4350,7 +4422,11 @@
nsecs_t lastWarning = 0;
inputStandBy();
- acquireWakeLock(mClientUid);
+ {
+ Mutex::Autolock _l(mLock);
+ activeTrack = mActiveTrack;
+ acquireWakeLock_l(activeTrack != 0 ? activeTrack->uid() : -1);
+ }
// used to verify we've read at least once before evaluating how many bytes were read
bool readOnce = false;
@@ -4363,6 +4439,12 @@
{ // scope for mLock
Mutex::Autolock _l(mLock);
checkForNewParameters_l();
+ if (mActiveTrack != 0 && activeTrack != mActiveTrack) {
+ SortedVector<int> tmp;
+ tmp.add(mActiveTrack->uid());
+ updateWakeLockUids_l(tmp);
+ }
+ activeTrack = mActiveTrack;
if (mActiveTrack == 0 && mConfigEvents.isEmpty()) {
standby();
@@ -4375,7 +4457,7 @@
// go to sleep
mWaitWorkCV.wait(mLock);
ALOGV("RecordThread: loop starting");
- acquireWakeLock_l(mClientUid);
+ acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1);
continue;
}
if (mActiveTrack != 0) {
@@ -4585,6 +4667,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
int sessionId,
+ int uid,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
status_t *status)
@@ -4654,7 +4737,7 @@
Mutex::Autolock _l(mLock);
track = new RecordTrack(this, client, sampleRate,
- format, channelMask, frameCount, sessionId);
+ format, channelMask, frameCount, sessionId, uid);
if (track->getCblk() == 0) {
ALOGE("createRecordTrack_l() no control block");
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 802b784..207f1eb 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -241,6 +241,9 @@
void acquireWakeLock_l(int uid = -1);
void releaseWakeLock();
void releaseWakeLock_l();
+ void updateWakeLockUids(const SortedVector<int> &uids);
+ void updateWakeLockUids_l(const SortedVector<int> &uids);
+ void getPowerManager_l();
void setEffectSuspended_l(const effect_uuid_t *type,
bool suspend,
int sessionId);
@@ -421,6 +424,7 @@
int sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
+ int uid,
status_t *status);
AudioStreamOut* getOutput() const;
@@ -495,6 +499,9 @@
void setMasterMute_l(bool muted) { mMasterMute = muted; }
protected:
SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<>
+ SortedVector<int> mWakeLockUids;
+ int mActiveTracksGeneration;
+ wp<Track> mLatestActiveTrack; // latest track added to mActiveTracks
// Allocate a track name for a given channel mask.
// Returns name >= 0 if successful, -1 on failure.
@@ -735,7 +742,7 @@
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, uint32_t device);
- virtual ~OffloadThread();
+ virtual ~OffloadThread() {};
protected:
// threadLoop snippets
@@ -755,7 +762,7 @@
bool mFlushPending;
size_t mPausedWriteLength; // length in bytes of write interrupted by pause
size_t mPausedBytesRemaining; // bytes still waiting in mixbuffer after resume
- sp<Track> mPreviousTrack; // used to detect track switch
+ wp<Track> mPreviousTrack; // used to detect track switch
};
class AsyncCallbackThread : public Thread {
@@ -873,6 +880,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
int sessionId,
+ int uid,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
status_t *status);
@@ -953,5 +961,4 @@
// For dumpsys
const sp<NBAIO_Sink> mTeeSink;
- int mClientUid;
};
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 523e4b2..cd201d9 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -45,6 +45,7 @@
size_t frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ int uid,
bool isOut);
virtual ~TrackBase();
@@ -54,6 +55,7 @@
sp<IMemory> getCblk() const { return mCblkMemory; }
audio_track_cblk_t* cblk() const { return mCblk; }
int sessionId() const { return mSessionId; }
+ int uid() const { return mUid; }
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
protected:
@@ -132,6 +134,7 @@
// openRecord(), and then adjusted as needed
const int mSessionId;
+ int mUid;
Vector < sp<SyncEvent> >mSyncEvents;
const bool mIsOut;
ServerProxy* mServerProxy;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9c6e724..af04ce7 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -68,6 +68,7 @@
size_t frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ int clientUid,
bool isOut)
: RefBase(),
mThread(thread),
@@ -88,6 +89,18 @@
mId(android_atomic_inc(&nextTrackId)),
mTerminated(false)
{
+ // if the caller is us, trust the specified uid
+ if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
+ int newclientUid = IPCThreadState::self()->getCallingUid();
+ if (clientUid != -1 && clientUid != newclientUid) {
+ ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
+ }
+ clientUid = newclientUid;
+ }
+ // clientUid contains the uid of the app that is responsible for this track, so we can blame
+ // battery usage on it.
+ mUid = clientUid;
+
// client == 0 implies sharedBuffer == 0
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -313,9 +326,10 @@
size_t frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ int uid,
IAudioFlinger::track_flags_t flags)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
- sessionId, true /*isOut*/),
+ sessionId, uid, true /*isOut*/),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
@@ -600,6 +614,15 @@
// track was already in the active list, not a problem
if (status == ALREADY_EXISTS) {
status = NO_ERROR;
+ } else {
+ // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
+ // It is usually unsafe to access the server proxy from a binder thread.
+ // But in this case we know the mixer thread (whether normal mixer or fast mixer)
+ // isn't looking at this track yet: we still hold the normal mixer thread lock,
+ // and for fast tracks the track is not yet in the fast mixer thread's active set.
+ ServerProxy::Buffer buffer;
+ buffer.mFrameCount = 1;
+ (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
}
} else {
status = BAD_VALUE;
@@ -829,6 +852,7 @@
dstChain->strategy(),
AUDIO_SESSION_OUTPUT_MIX,
effect->id());
+ AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
}
status = playbackThread->attachAuxEffect(this, EffectId);
}
@@ -954,13 +978,14 @@
audio_channel_mask_t channelMask,
size_t frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId) {
+ int sessionId,
+ int uid) {
if (!client->reserveTimedTrack())
return 0;
return new TimedTrack(
thread, client, streamType, sampleRate, format, channelMask, frameCount,
- sharedBuffer, sessionId);
+ sharedBuffer, sessionId, uid);
}
AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
@@ -972,9 +997,10 @@
audio_channel_mask_t channelMask,
size_t frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId)
+ int sessionId,
+ int uid)
: Track(thread, client, streamType, sampleRate, format, channelMask,
- frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
+ frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED),
mQueueHeadInFlight(false),
mTrimQueueHeadOnRelease(false),
mFramesPendingInQueue(0),
@@ -1467,9 +1493,10 @@
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- size_t frameCount)
+ size_t frameCount,
+ int uid)
: Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
- NULL, 0, IAudioFlinger::TRACK_DEFAULT),
+ NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT),
mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
{
@@ -1729,9 +1756,10 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int sessionId)
+ int sessionId,
+ int uid)
: TrackBase(thread, client, sampleRate, format,
- channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/),
+ channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/),
mOverflow(false)
{
ALOGV("RecordTrack constructor");
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d23f8b9..51ba698 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -35,6 +35,7 @@
device3/Camera3ZslStream.cpp \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
+ utils/CameraTraces.cpp \
LOCAL_SHARED_LIBRARIES:= \
libui \
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 34a5b15..eeedfc9 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -42,6 +42,7 @@
#include "api1/Camera2Client.h"
#include "api_pro/ProCamera2Client.h"
#include "api2/CameraDeviceClient.h"
+#include "utils/CameraTraces.h"
#include "CameraDeviceFactory.h"
namespace android {
@@ -1219,6 +1220,10 @@
if (locked) mServiceLock.unlock();
+ // Dump camera traces if there were any
+ write(fd, "\n", 1);
+ camera3::CameraTraces::dump(fd, args);
+
// change logging level
int n = args.size();
for (int i = 0; i + 1 < n; i++) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index df3b162..0b6ca5c 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -76,13 +76,15 @@
return res;
}
- SharedParameters::Lock l(mParameters);
+ {
+ SharedParameters::Lock l(mParameters);
- res = l.mParameters.initialize(&(mDevice->info()));
- if (res != OK) {
- ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return NO_INIT;
+ res = l.mParameters.initialize(&(mDevice->info()));
+ if (res != OK) {
+ ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return NO_INIT;
+ }
}
String8 threadName;
@@ -135,6 +137,7 @@
mCallbackProcessor->run(threadName.string());
if (gLogLevel >= 1) {
+ SharedParameters::Lock l(mParameters);
ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
mCameraId);
ALOGD("%s", l.mParameters.paramsFlattened.string());
@@ -353,6 +356,10 @@
result.appendFormat(" meteringCropRegion\n");
haveQuirk = true;
}
+ if (p.quirks.partialResults) {
+ result.appendFormat(" usePartialResult\n");
+ haveQuirk = true;
+ }
if (!haveQuirk) {
result.appendFormat(" none\n");
}
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index c34cb12..19acae4 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -29,13 +29,27 @@
namespace camera2 {
FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
- wp<Camera2Client> client) :
+ sp<Camera2Client> client) :
FrameProcessorBase(device),
mClient(client),
- mLastFrameNumberOfFaces(0) {
+ mLastFrameNumberOfFaces(0),
+ mLast3AFrameNumber(-1) {
sp<CameraDeviceBase> d = device.promote();
mSynthesize3ANotify = !(d->willNotify3A());
+
+ {
+ SharedParameters::Lock l(client->getParameters());
+ mUsePartialQuirk = l.mParameters.quirks.partialResults;
+
+ // Initialize starting 3A state
+ m3aState.afTriggerId = l.mParameters.afTriggerCounter;
+ m3aState.aeTriggerId = l.mParameters.precaptureTriggerCounter;
+ // Check if lens is fixed-focus
+ if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
+ m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ }
+ }
}
FrameProcessor::~FrameProcessor() {
@@ -49,20 +63,25 @@
return false;
}
- if (processFaceDetect(frame, client) != OK) {
+ bool partialResult = false;
+ if (mUsePartialQuirk) {
+ camera_metadata_entry_t entry;
+ entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count > 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ partialResult = true;
+ }
+ }
+
+ if (!partialResult && processFaceDetect(frame, client) != OK) {
return false;
}
if (mSynthesize3ANotify) {
- // Ignoring missing fields for now
process3aState(frame, client);
}
- if (!FrameProcessorBase::processSingleFrame(frame, device)) {
- return false;
- }
-
- return true;
+ return FrameProcessorBase::processSingleFrame(frame, device);
}
status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
@@ -198,86 +217,75 @@
ATRACE_CALL();
camera_metadata_ro_entry_t entry;
- int mId = client->getCameraId();
+ int cameraId = client->getCameraId();
entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
int32_t frameNumber = entry.data.i32[0];
+ // Don't send 3A notifications for the same frame number twice
+ if (frameNumber <= mLast3AFrameNumber) {
+ ALOGV("%s: Already sent 3A for frame number %d, skipping",
+ __FUNCTION__, frameNumber);
+ return OK;
+ }
+
+ mLast3AFrameNumber = frameNumber;
+
// Get 3A states from result metadata
bool gotAllStates = true;
AlgState new3aState;
- entry = frame.find(ANDROID_CONTROL_AE_STATE);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!",
- __FUNCTION__, mId, frameNumber);
- gotAllStates = false;
- } else {
- new3aState.aeState =
- static_cast<camera_metadata_enum_android_control_ae_state>(
- entry.data.u8[0]);
- }
+ // TODO: Also use AE mode, AE trigger ID
- entry = frame.find(ANDROID_CONTROL_AF_STATE);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!",
- __FUNCTION__, mId, frameNumber);
- gotAllStates = false;
- } else {
- new3aState.afState =
- static_cast<camera_metadata_enum_android_control_af_state>(
- entry.data.u8[0]);
- }
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE,
+ &new3aState.afMode, frameNumber, cameraId);
- entry = frame.find(ANDROID_CONTROL_AWB_STATE);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!",
- __FUNCTION__, mId, frameNumber);
- gotAllStates = false;
- } else {
- new3aState.awbState =
- static_cast<camera_metadata_enum_android_control_awb_state>(
- entry.data.u8[0]);
- }
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE,
+ &new3aState.awbMode, frameNumber, cameraId);
- int32_t afTriggerId = 0;
- entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!",
- __FUNCTION__, mId, frameNumber);
- gotAllStates = false;
- } else {
- afTriggerId = entry.data.i32[0];
- }
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE,
+ &new3aState.aeState, frameNumber, cameraId);
- int32_t aeTriggerId = 0;
- entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
- if (entry.count == 0) {
- ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL"
- " for frame %d!",
- __FUNCTION__, mId, frameNumber);
- gotAllStates = false;
- } else {
- aeTriggerId = entry.data.i32[0];
- }
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE,
+ &new3aState.afState, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE,
+ &new3aState.awbState, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &new3aState.afTriggerId, frameNumber, cameraId);
+
+ gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &new3aState.aeTriggerId, frameNumber, cameraId);
if (!gotAllStates) return BAD_VALUE;
if (new3aState.aeState != m3aState.aeState) {
- ALOGV("%s: AE state changed from 0x%x to 0x%x",
- __FUNCTION__, m3aState.aeState, new3aState.aeState);
- client->notifyAutoExposure(new3aState.aeState, aeTriggerId);
+ ALOGV("%s: Camera %d: AE state %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.aeState, new3aState.aeState);
+ client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
}
- if (new3aState.afState != m3aState.afState) {
- ALOGV("%s: AF state changed from 0x%x to 0x%x",
- __FUNCTION__, m3aState.afState, new3aState.afState);
- client->notifyAutoFocus(new3aState.afState, afTriggerId);
+
+ if (new3aState.afState != m3aState.afState ||
+ new3aState.afMode != m3aState.afMode ||
+ new3aState.afTriggerId != m3aState.afTriggerId) {
+ ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.afState, new3aState.afState,
+ m3aState.afMode, new3aState.afMode,
+ m3aState.afTriggerId, new3aState.afTriggerId);
+ client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
}
- if (new3aState.awbState != m3aState.awbState) {
- ALOGV("%s: AWB state changed from 0x%x to 0x%x",
- __FUNCTION__, m3aState.awbState, new3aState.awbState);
- client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
+ if (new3aState.awbState != m3aState.awbState ||
+ new3aState.awbMode != m3aState.awbMode) {
+ ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
+ __FUNCTION__, cameraId,
+ m3aState.awbState, new3aState.awbState,
+ m3aState.awbMode, new3aState.awbMode);
+ client->notifyAutoWhitebalance(new3aState.awbState,
+ new3aState.aeTriggerId);
}
m3aState = new3aState;
@@ -285,6 +293,39 @@
return OK;
}
+template<typename Src, typename T>
+bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber, int cameraId) {
+ camera_metadata_ro_entry_t entry;
+ if (value == NULL) {
+ ALOGE("%s: Camera %d: Value to write to is NULL",
+ __FUNCTION__, cameraId);
+ return false;
+ }
+
+ entry = result.find(tag);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+ __FUNCTION__, cameraId,
+ get_camera_metadata_tag_name(tag), frameNumber);
+ return false;
+ } else {
+ switch(sizeof(Src)){
+ case sizeof(uint8_t):
+ *value = static_cast<T>(entry.data.u8[0]);
+ break;
+ case sizeof(int32_t):
+ *value = static_cast<T>(entry.data.i32[0]);
+ break;
+ default:
+ ALOGE("%s: Camera %d: Unsupported source",
+ __FUNCTION__, cameraId);
+ return false;
+ }
+ }
+ return true;
+}
+
void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
const camera_frame_metadata &metadata) {
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 2a17d45..856ad32 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -39,7 +39,7 @@
*/
class FrameProcessor : public FrameProcessorBase {
public:
- FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client);
+ FrameProcessor(wp<CameraDeviceBase> device, sp<Camera2Client> client);
~FrameProcessor();
private:
@@ -61,18 +61,44 @@
status_t process3aState(const CameraMetadata &frame,
const sp<Camera2Client> &client);
+ // Helper for process3aState
+ template<typename Src, typename T>
+ bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+ int32_t frameNumber, int cameraId);
+
+
struct AlgState {
+ // TODO: also track AE mode
+ camera_metadata_enum_android_control_af_mode afMode;
+ camera_metadata_enum_android_control_awb_mode awbMode;
+
camera_metadata_enum_android_control_ae_state aeState;
camera_metadata_enum_android_control_af_state afState;
camera_metadata_enum_android_control_awb_state awbState;
+ int32_t afTriggerId;
+ int32_t aeTriggerId;
+
+ // These defaults need to match those in Parameters.cpp
AlgState() :
+ afMode(ANDROID_CONTROL_AF_MODE_AUTO),
+ awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
- awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
+ awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
+ afTriggerId(0),
+ aeTriggerId(0) {
}
} m3aState;
+ // Whether the partial result quirk is enabled for this device
+ bool mUsePartialQuirk;
+
+ // Track most recent frame number for which 3A notifications were sent for.
+ // Used to filter against sending 3A notifications for the same frame
+ // several times.
+ int32_t mLast3AFrameNumber;
+
// Emit FaceDetection event to java if faces changed
void callbackFaceDetection(sp<Camera2Client> client,
const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 8a4e75c..08af566 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -183,8 +183,7 @@
// still have to do something sane for them
// NOTE: Not scaled like FPS range values are.
- previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
- lastSetPreviewFps = previewFps;
+ int previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE,
previewFps);
@@ -1047,6 +1046,11 @@
ALOGV_IF(quirks.meteringCropRegion, "Camera %d: Quirk meteringCropRegion"
" enabled", cameraId);
+ entry = info->find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+ quirks.partialResults = (entry.count != 0 && entry.data.u8[0] == 1);
+ ALOGV_IF(quirks.partialResults, "Camera %d: Quirk usePartialResult"
+ " enabled", cameraId);
+
return OK;
}
@@ -1129,13 +1133,22 @@
// PREVIEW_FPS_RANGE
bool fpsRangeChanged = false;
+ int32_t lastSetFpsRange[2];
+
+ params.getPreviewFpsRange(&lastSetFpsRange[0], &lastSetFpsRange[1]);
+ lastSetFpsRange[0] /= kFpsToApiScale;
+ lastSetFpsRange[1] /= kFpsToApiScale;
+
newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0],
&validatedParams.previewFpsRange[1]);
validatedParams.previewFpsRange[0] /= kFpsToApiScale;
validatedParams.previewFpsRange[1] /= kFpsToApiScale;
- if (validatedParams.previewFpsRange[0] != previewFpsRange[0] ||
- validatedParams.previewFpsRange[1] != previewFpsRange[1]) {
+ // Compare the FPS range value from the last set() to the current set()
+ // to determine if the client has changed it
+ if (validatedParams.previewFpsRange[0] != lastSetFpsRange[0] ||
+ validatedParams.previewFpsRange[1] != lastSetFpsRange[1]) {
+
fpsRangeChanged = true;
camera_metadata_ro_entry_t availablePreviewFpsRanges =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
@@ -1153,16 +1166,6 @@
validatedParams.previewFpsRange[1]);
return BAD_VALUE;
}
- validatedParams.previewFps =
- fpsFromRange(validatedParams.previewFpsRange[0],
- validatedParams.previewFpsRange[1]);
-
- // Update our last-seen single preview FPS, needed for disambiguating
- // when the application is intending to use the deprecated single-FPS
- // setting vs. the range FPS setting
- validatedParams.lastSetPreviewFps = newParams.getPreviewFrameRate();
-
- newParams.setPreviewFrameRate(validatedParams.previewFps);
}
// PREVIEW_FORMAT
@@ -1200,12 +1203,11 @@
// PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is
// unchanged this time. The single-value FPS is the same as the minimum of
// the range. To detect whether the application has changed the value of
- // previewFps, compare against their last-set preview FPS instead of the
- // single FPS we may have synthesized from a range FPS set.
+ // previewFps, compare against their last-set preview FPS.
if (!fpsRangeChanged) {
- validatedParams.previewFps = newParams.getPreviewFrameRate();
- if (validatedParams.previewFps != lastSetPreviewFps ||
- recordingHintChanged) {
+ int previewFps = newParams.getPreviewFrameRate();
+ int lastSetPreviewFps = params.getPreviewFrameRate();
+ if (previewFps != lastSetPreviewFps || recordingHintChanged) {
camera_metadata_ro_entry_t availableFrameRates =
staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
/**
@@ -1218,8 +1220,8 @@
* Either way, in case of multiple ranges, break the tie by
* selecting the smaller range.
*/
- int targetFps = validatedParams.previewFps;
- // all ranges which have targetFps
+
+ // all ranges which have previewFps
Vector<Range> candidateRanges;
for (i = 0; i < availableFrameRates.count; i+=2) {
Range r = {
@@ -1227,13 +1229,13 @@
availableFrameRates.data.i32[i+1]
};
- if (r.min <= targetFps && targetFps <= r.max) {
+ if (r.min <= previewFps && previewFps <= r.max) {
candidateRanges.push(r);
}
}
if (candidateRanges.isEmpty()) {
ALOGE("%s: Requested preview frame rate %d is not supported",
- __FUNCTION__, validatedParams.previewFps);
+ __FUNCTION__, previewFps);
return BAD_VALUE;
}
// most applicable range with targetFps
@@ -1272,14 +1274,6 @@
validatedParams.previewFpsRange[1],
validatedParams.recordingHint);
}
- newParams.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
- String8::format("%d,%d",
- validatedParams.previewFpsRange[0] * kFpsToApiScale,
- validatedParams.previewFpsRange[1] * kFpsToApiScale));
- // Update our last-seen single preview FPS, needed for disambiguating
- // when the application is intending to use the deprecated single-FPS
- // setting vs. the range FPS setting
- validatedParams.lastSetPreviewFps = validatedParams.previewFps;
}
// PICTURE_SIZE
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index bcbdb99..32dbd42 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -46,8 +46,6 @@
int previewWidth, previewHeight;
int32_t previewFpsRange[2];
- int lastSetPreviewFps; // the last single FPS value seen in a set call
- int previewFps; // deprecated, here only for tracking changes
int previewFormat;
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
@@ -209,6 +207,7 @@
bool triggerAfWithAuto;
bool useZslFormat;
bool meteringCropRegion;
+ bool partialResults;
} quirks;
/**
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 72126c1..1cdf8dc 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -81,7 +81,8 @@
mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
FRAME_PROCESSOR_LISTENER_MAX_ID,
- /*listener*/this);
+ /*listener*/this,
+ /*quirkSendPartials*/true);
return OK;
}
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 52906ee..f2064fb 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -37,11 +37,11 @@
}
status_t FrameProcessorBase::registerListener(int32_t minId,
- int32_t maxId, wp<FilteredListener> listener) {
+ int32_t maxId, wp<FilteredListener> listener, bool quirkSendPartials) {
Mutex::Autolock l(mInputMutex);
ALOGV("%s: Registering listener for frame id range %d - %d",
__FUNCTION__, minId, maxId);
- RangeListener rListener = { minId, maxId, listener };
+ RangeListener rListener = { minId, maxId, listener, quirkSendPartials };
mRangeListeners.push_back(rListener);
return OK;
}
@@ -145,6 +145,16 @@
ATRACE_CALL();
camera_metadata_ro_entry_t entry;
+ // Quirks: Don't deliver partial results to listeners that don't want them
+ bool quirkIsPartial = false;
+ entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
+ if (entry.count != 0 &&
+ entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
+ __FUNCTION__, device->getId());
+ quirkIsPartial = true;
+ }
+
entry = frame.find(ANDROID_REQUEST_ID);
if (entry.count == 0) {
ALOGE("%s: Camera %d: Error reading frame id",
@@ -160,7 +170,8 @@
List<RangeListener>::iterator item = mRangeListeners.begin();
while (item != mRangeListeners.end()) {
if (requestId >= item->minId &&
- requestId < item->maxId) {
+ requestId < item->maxId &&
+ (!quirkIsPartial || item->quirkSendPartials) ) {
sp<FilteredListener> listener = item->listener.promote();
if (listener == 0) {
item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 4d80ebf..89b608a 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -44,9 +44,11 @@
};
// Register a listener for a range of IDs [minId, maxId). Multiple listeners
- // can be listening to the same range
+ // can be listening to the same range.
+ // QUIRK: sendPartials controls whether partial results will be sent.
status_t registerListener(int32_t minId, int32_t maxId,
- wp<FilteredListener> listener);
+ wp<FilteredListener> listener,
+ bool quirkSendPartials = true);
status_t removeListener(int32_t minId, int32_t maxId,
wp<FilteredListener> listener);
@@ -64,6 +66,7 @@
int32_t minId;
int32_t maxId;
wp<FilteredListener> listener;
+ bool quirkSendPartials;
};
List<RangeListener> mRangeListeners;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 6f2dc85..3dbc1b0 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -41,6 +41,7 @@
#include <utils/Trace.h>
#include <utils/Timers.h>
+#include "utils/CameraTraces.h"
#include "device3/Camera3Device.h"
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
@@ -54,6 +55,7 @@
mId(id),
mHal3Device(NULL),
mStatus(STATUS_UNINITIALIZED),
+ mUsePartialResultQuirk(false),
mNextResultFrameNumber(0),
mNextShutterFrameNumber(0),
mListener(NULL)
@@ -192,6 +194,15 @@
mNeedConfig = true;
mPauseStateNotify = false;
+ /** Check for quirks */
+
+ // Will the HAL be sending in early partial result metadata?
+ camera_metadata_entry partialResultsQuirk =
+ mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
+ if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
+ mUsePartialResultQuirk = true;
+ }
+
return OK;
}
@@ -1363,6 +1374,10 @@
// But only do error state transition steps for the first error
if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
+ // Save stack trace. View by dumping it later.
+ CameraTraces::saveTrace();
+ // TODO: consider adding errorCause and client pid/procname
+
mErrorCause = errorCause;
mRequestThread->setPaused(true);
@@ -1386,6 +1401,175 @@
}
/**
+ * QUIRK(partial results)
+ * Check if all 3A fields are ready, and send off a partial 3A-only result
+ * to the output frame queue
+ */
+bool Camera3Device::processPartial3AQuirk(
+ int32_t frameNumber, int32_t requestId,
+ const CameraMetadata& partial) {
+
+ // Check if all 3A states are present
+ // The full list of fields is
+ // android.control.afMode
+ // android.control.awbMode
+ // android.control.aeState
+ // android.control.awbState
+ // android.control.afState
+ // android.control.afTriggerID
+ // android.control.aePrecaptureID
+ // TODO: Add android.control.aeMode
+
+ bool gotAllStates = true;
+
+ uint8_t afMode;
+ uint8_t awbMode;
+ uint8_t aeState;
+ uint8_t afState;
+ uint8_t awbState;
+ int32_t afTriggerId;
+ int32_t aeTriggerId;
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
+ &afMode, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
+ &awbMode, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
+ &aeState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
+ &afState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
+ &awbState, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &afTriggerId, frameNumber);
+
+ gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &aeTriggerId, frameNumber);
+
+ if (!gotAllStates) return false;
+
+ ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
+ "AF state %d, AE state %d, AWB state %d, "
+ "AF trigger %d, AE precapture trigger %d",
+ __FUNCTION__, mId, frameNumber, requestId,
+ afMode, awbMode,
+ afState, aeState, awbState,
+ afTriggerId, aeTriggerId);
+
+ // Got all states, so construct a minimal result to send
+ // In addition to the above fields, this means adding in
+ // android.request.frameCount
+ // android.request.requestId
+ // android.quirks.partialResult
+
+ const size_t kMinimal3AResultEntries = 10;
+
+ Mutex::Autolock l(mOutputLock);
+
+ CameraMetadata& min3AResult =
+ *mResultQueue.insert(
+ mResultQueue.end(),
+ CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0));
+
+ if (!insert3AResult(min3AResult, ANDROID_REQUEST_FRAME_COUNT,
+ &frameNumber, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_REQUEST_ID,
+ &requestId, frameNumber)) {
+ return false;
+ }
+
+ static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
+ if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT,
+ &partialResult, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_MODE,
+ &afMode, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_MODE,
+ &awbMode, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_STATE,
+ &aeState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_STATE,
+ &afState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_STATE,
+ &awbState, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_TRIGGER_ID,
+ &afTriggerId, frameNumber)) {
+ return false;
+ }
+
+ if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ &aeTriggerId, frameNumber)) {
+ return false;
+ }
+
+ mResultSignal.signal();
+
+ return true;
+}
+
+template<typename T>
+bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber) {
+ (void) frameNumber;
+
+ camera_metadata_ro_entry_t entry;
+
+ entry = result.find(tag);
+ if (entry.count == 0) {
+ ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
+ mId, frameNumber, get_camera_metadata_tag_name(tag));
+ return false;
+ }
+
+ if (sizeof(T) == sizeof(uint8_t)) {
+ *value = entry.data.u8[0];
+ } else if (sizeof(T) == sizeof(int32_t)) {
+ *value = entry.data.i32[0];
+ } else {
+ ALOGE("%s: Unexpected type", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+template<typename T>
+bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
+ const T* value, int32_t frameNumber) {
+ if (result.update(tag, value, 1) != NO_ERROR) {
+ mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
+ SET_ERR("Frame %d: Failed to set %s in partial metadata",
+ frameNumber, get_camera_metadata_tag_name(tag));
+ return false;
+ }
+ return true;
+}
+
+/**
* Camera HAL device callback methods
*/
@@ -1400,6 +1584,8 @@
frameNumber);
return;
}
+ bool partialResultQuirk = false;
+ CameraMetadata collectedQuirkResult;
// Get capture timestamp from list of in-flight requests, where it was added
// by the shutter notification for this frame. Then update the in-flight
@@ -1415,24 +1601,58 @@
return;
}
InFlightRequest &request = mInFlightMap.editValueAt(idx);
+
+ // Check if this result carries only partial metadata
+ if (mUsePartialResultQuirk && result->result != NULL) {
+ camera_metadata_ro_entry_t partialResultEntry;
+ res = find_camera_metadata_ro_entry(result->result,
+ ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
+ if (res != NAME_NOT_FOUND &&
+ partialResultEntry.count > 0 &&
+ partialResultEntry.data.u8[0] ==
+ ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
+ // A partial result. Flag this as such, and collect this
+ // set of metadata into the in-flight entry.
+ partialResultQuirk = true;
+ request.partialResultQuirk.collectedResult.append(
+ result->result);
+ request.partialResultQuirk.collectedResult.erase(
+ ANDROID_QUIRKS_PARTIAL_RESULT);
+ // Fire off a 3A-only result if possible
+ if (!request.partialResultQuirk.haveSent3A) {
+ request.partialResultQuirk.haveSent3A =
+ processPartial3AQuirk(frameNumber,
+ request.requestId,
+ request.partialResultQuirk.collectedResult);
+ }
+ }
+ }
+
timestamp = request.captureTimestamp;
/**
- * One of the following must happen before it's legal to call process_capture_result:
+ * One of the following must happen before it's legal to call process_capture_result,
+ * unless partial metadata is being provided:
* - CAMERA3_MSG_SHUTTER (expected during normal operation)
* - CAMERA3_MSG_ERROR (expected during flush)
*/
- if (request.requestStatus == OK && timestamp == 0) {
+ if (request.requestStatus == OK && timestamp == 0 && !partialResultQuirk) {
SET_ERR("Called before shutter notify for frame %d",
frameNumber);
return;
}
- if (result->result != NULL) {
+ // Did we get the (final) result metadata for this capture?
+ if (result->result != NULL && !partialResultQuirk) {
if (request.haveResultMetadata) {
SET_ERR("Called multiple times with metadata for frame %d",
frameNumber);
return;
}
+ if (mUsePartialResultQuirk &&
+ !request.partialResultQuirk.collectedResult.isEmpty()) {
+ collectedQuirkResult.acquire(
+ request.partialResultQuirk.collectedResult);
+ }
request.haveResultMetadata = true;
}
@@ -1444,6 +1664,7 @@
return;
}
+ // Check if everything has arrived for this result (buffers and metadata)
if (request.haveResultMetadata && request.numBuffersLeft == 0) {
ATRACE_ASYNC_END("frame capture", frameNumber);
mInFlightMap.removeItemsAt(idx, 1);
@@ -1458,9 +1679,12 @@
}
// Process the result metadata, if provided
- if (result->result != NULL) {
+ bool gotResult = false;
+ if (result->result != NULL && !partialResultQuirk) {
Mutex::Autolock l(mOutputLock);
+ gotResult = true;
+
if (frameNumber != mNextResultFrameNumber) {
SET_ERR("Out-of-order capture result metadata submitted! "
"(got frame number %d, expecting %d)",
@@ -1469,19 +1693,26 @@
}
mNextResultFrameNumber++;
- CameraMetadata &captureResult =
- *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
-
+ CameraMetadata captureResult;
captureResult = result->result;
+
if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT,
(int32_t*)&frameNumber, 1) != OK) {
SET_ERR("Failed to set frame# in metadata (%d)",
frameNumber);
+ gotResult = false;
} else {
ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
__FUNCTION__, mId, frameNumber);
}
+ // Append any previous partials to form a complete result
+ if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) {
+ captureResult.append(collectedQuirkResult);
+ }
+
+ captureResult.sort();
+
// Check that there's a timestamp in the result metadata
camera_metadata_entry entry =
@@ -1489,10 +1720,19 @@
if (entry.count == 0) {
SET_ERR("No timestamp provided by HAL for frame %d!",
frameNumber);
+ gotResult = false;
} else if (timestamp != entry.data.i64[0]) {
SET_ERR("Timestamp mismatch between shutter notify and result"
" metadata for frame %d (%lld vs %lld respectively)",
frameNumber, timestamp, entry.data.i64[0]);
+ gotResult = false;
+ }
+
+ if (gotResult) {
+ // Valid result, insert into queue
+ CameraMetadata& queuedResult =
+ *mResultQueue.insert(mResultQueue.end(), CameraMetadata());
+ queuedResult.swap(captureResult);
}
} // scope for mOutputLock
@@ -1512,7 +1752,7 @@
// Finally, signal any waiters for new frames
- if (result->result != NULL) {
+ if (gotResult) {
mResultSignal.signal();
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 12252c8..468f641 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -188,6 +188,9 @@
// Need to hold on to stream references until configure completes.
Vector<sp<camera3::Camera3StreamInterface> > mDeletedStreams;
+ // Whether quirk ANDROID_QUIRKS_USE_PARTIAL_RESULT is enabled
+ bool mUsePartialResultQuirk;
+
/**** End scope for mLock ****/
class CaptureRequest : public LightRefBase<CaptureRequest> {
@@ -445,6 +448,18 @@
// buffers
int numBuffersLeft;
+ // Fields used by the partial result quirk only
+ struct PartialResultQuirkInFlight {
+ // Set by process_capture_result once 3A has been sent to clients
+ bool haveSent3A;
+ // Result metadata collected so far, when partial results are in use
+ CameraMetadata collectedResult;
+
+ PartialResultQuirkInFlight():
+ haveSent3A(false) {
+ }
+ } partialResultQuirk;
+
// Default constructor needed by KeyedVector
InFlightRequest() :
requestId(0),
@@ -472,6 +487,22 @@
int32_t numBuffers);
/**
+ * For the partial result quirk, check if all 3A state fields are available
+ * and if so, queue up 3A-only result to the client. Returns true if 3A
+ * is sent.
+ */
+ bool processPartial3AQuirk(int32_t frameNumber, int32_t requestId,
+ const CameraMetadata& partial);
+
+ // Helpers for reading and writing 3A metadata into to/from partial results
+ template<typename T>
+ bool get3AResult(const CameraMetadata& result, int32_t tag,
+ T* value, int32_t frameNumber);
+
+ template<typename T>
+ bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
+ int32_t frameNumber);
+ /**
* Tracking for idle detection
*/
sp<camera3::StatusTracker> mStatusTracker;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
new file mode 100644
index 0000000..346e15f
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraTraces"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "utils/CameraTraces.h"
+#include <utils/ProcessCallStack.h>
+
+#include <utils/Mutex.h>
+#include <utils/List.h>
+
+#include <utils/Log.h>
+#include <cutils/trace.h>
+
+namespace android {
+namespace camera3 {
+
+struct CameraTracesImpl {
+ Mutex tracesLock;
+ List<ProcessCallStack> pcsList;
+}; // class CameraTraces::Impl;
+
+static CameraTracesImpl gImpl;
+CameraTracesImpl& CameraTraces::sImpl = gImpl;
+
+void CameraTraces::saveTrace() {
+ ALOGV("%s: begin", __FUNCTION__);
+ ATRACE_BEGIN("CameraTraces::saveTrace");
+ Mutex::Autolock al(sImpl.tracesLock);
+
+ List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+ // Insert new ProcessCallStack, and immediately crawl all the threads
+ pcsList.push_front(ProcessCallStack());
+ ProcessCallStack& pcs = *pcsList.begin();
+ pcs.update();
+
+ if (pcsList.size() > MAX_TRACES) {
+ // Prune list periodically and discard oldest entry
+ pcsList.erase(--pcsList.end());
+ }
+
+ IF_ALOGV() {
+ pcs.log(LOG_TAG, ANDROID_LOG_VERBOSE);
+ }
+
+ ALOGD("Process trace saved. Use dumpsys media.camera to view.");
+
+ ATRACE_END();
+}
+
+status_t CameraTraces::dump(int fd, const Vector<String16> &args __attribute__((unused))) {
+ ALOGV("%s: fd = %d", __FUNCTION__, fd);
+ Mutex::Autolock al(sImpl.tracesLock);
+ List<ProcessCallStack>& pcsList = sImpl.pcsList;
+
+ if (fd < 0) {
+ ALOGW("%s: Negative FD (%d)", __FUNCTION__, fd);
+ return BAD_VALUE;
+ }
+
+ fdprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+
+ if (pcsList.empty()) {
+ fdprintf(fd, " No camera traces collected.\n");
+ }
+
+ // Print newest items first
+ List<ProcessCallStack>::iterator it, end;
+ for (it = pcsList.begin(), end = pcsList.end(); it != end; ++it) {
+ const ProcessCallStack& pcs = *it;
+ pcs.dump(fd, DUMP_INDENT);
+ }
+
+ return OK;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
new file mode 100644
index 0000000..d10dbc9
--- /dev/null
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_TRACES_H_
+#define ANDROID_SERVERS_CAMERA_TRACES_H_
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace camera3 {
+
+class CameraTracesImpl;
+
+// Collect a list of the process's stack traces
+class CameraTraces {
+public:
+ /**
+ * Save the current stack trace for each thread in the process. At most
+ * MAX_TRACES will be saved, after which the oldest traces will be discarded.
+ *
+ * <p>Use CameraTraces::dump to print out the traces.</p>
+ */
+ static void saveTrace();
+
+ /**
+ * Prints all saved traces to the specified file descriptor.
+ *
+ * <p>Each line is indented by DUMP_INDENT spaces.</p>
+ */
+ static status_t dump(int fd, const Vector<String16>& args);
+
+private:
+ enum {
+ // Don't collect more than 100 traces. Discard oldest.
+ MAX_TRACES = 100,
+
+ // Insert 2 spaces when dumping the traces
+ DUMP_INDENT = 2,
+ };
+
+ CameraTraces();
+ ~CameraTraces();
+ CameraTraces(CameraTraces& rhs);
+
+ static CameraTracesImpl& sImpl;
+}; // class CameraTraces
+
+}; // namespace camera3
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_TRACES_H_