Merge "Implementing MediaDrm APIs" into jb-mr2-dev
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index e8908d2..1b136de 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -96,32 +96,14 @@
return c->unlock();
}
-// pass the buffered Surface to the camera service
-status_t Camera::setPreviewDisplay(const sp<Surface>& surface)
-{
- ALOGV("setPreviewDisplay(%p)", surface.get());
- sp <ICamera> c = mCamera;
- if (c == 0) return NO_INIT;
- if (surface != 0) {
- return c->setPreviewDisplay(surface);
- } else {
- ALOGD("app passed NULL surface");
- return c->setPreviewDisplay(0);
- }
-}
-
// pass the buffered IGraphicBufferProducer to the camera service
status_t Camera::setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setPreviewTexture(%p)", bufferProducer.get());
sp <ICamera> c = mCamera;
if (c == 0) return NO_INIT;
- if (bufferProducer != 0) {
- return c->setPreviewTexture(bufferProducer);
- } else {
- ALOGD("app passed NULL surface");
- return c->setPreviewTexture(0);
- }
+ ALOGD_IF(bufferProducer == 0, "app passed NULL surface");
+ return c->setPreviewTexture(bufferProducer);
}
// start preview mode
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 5d210e7..8900867 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -29,7 +29,6 @@
enum {
DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
- SET_PREVIEW_DISPLAY,
SET_PREVIEW_TEXTURE,
SET_PREVIEW_CALLBACK_FLAG,
START_PREVIEW,
@@ -68,17 +67,6 @@
remote()->transact(DISCONNECT, data, &reply);
}
- // pass the buffered Surface to the camera service
- status_t setPreviewDisplay(const sp<Surface>& surface)
- {
- ALOGV("setPreviewDisplay");
- Parcel data, reply;
- data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
- Surface::writeToParcel(surface, &data);
- remote()->transact(SET_PREVIEW_DISPLAY, data, &reply);
- return reply.readInt32();
- }
-
// pass the buffered IGraphicBufferProducer to the camera service
status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)
{
@@ -282,13 +270,6 @@
disconnect();
return NO_ERROR;
} break;
- case SET_PREVIEW_DISPLAY: {
- ALOGV("SET_PREVIEW_DISPLAY");
- CHECK_INTERFACE(ICamera, data, reply);
- sp<Surface> surface = Surface::readFromParcel(data);
- reply->writeInt32(setPreviewDisplay(surface));
- return NO_ERROR;
- } break;
case SET_PREVIEW_TEXTURE: {
ALOGV("SET_PREVIEW_TEXTURE");
CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/tests/ProCameraTests.cpp b/camera/tests/ProCameraTests.cpp
index 71813ae..ecc0854 100644
--- a/camera/tests/ProCameraTests.cpp
+++ b/camera/tests/ProCameraTests.cpp
@@ -1015,6 +1015,9 @@
ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/2,
/*requests*/REQUEST_COUNT));
+ int depthFrames = 0;
+ int greyFrames = 0;
+
// Consume two frames simultaneously. Unsynchronized by timestamps.
for (int i = 0; i < REQUEST_COUNT; ++i) {
@@ -1041,6 +1044,8 @@
EXPECT_OK(depthConsumer->unlockBuffer(depthBuffer));
+ depthFrames++;
+
/** Consume Greyscale frames if there are any.
* There may not be since it runs at half FPS */
@@ -1053,9 +1058,14 @@
", timestamp = " << greyBuffer.timestamp << std::endl;
EXPECT_OK(consumer->unlockBuffer(greyBuffer));
+
+ greyFrames++;
}
}
+ dout << "Done, summary: depth frames " << std::dec << depthFrames
+ << ", grey frames " << std::dec << greyFrames << std::endl;
+
// Done: clean up
EXPECT_OK(mCamera->deleteStream(streamId));
EXPECT_OK(mCamera->exclusiveUnlock());
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 71c66ce..37626a4 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -74,9 +74,6 @@
status_t lock();
status_t unlock();
- // pass the buffered Surface to the camera service
- status_t setPreviewDisplay(const sp<Surface>& surface);
-
// pass the buffered IGraphicBufferProducer to the camera service
status_t setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index eccaa41..2236c1f 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -46,9 +46,6 @@
// allow other processes to use this ICamera interface
virtual status_t unlock() = 0;
- // pass the buffered Surface to the camera service
- virtual status_t setPreviewDisplay(const sp<Surface>& surface) = 0;
-
// pass the buffered IGraphicBufferProducer to the camera service
virtual status_t setPreviewTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 8d7f11d..3e67550 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -35,7 +35,7 @@
virtual status_t setCamera(const sp<ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy) = 0;
- virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0;
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
virtual status_t setVideoSource(int vs) = 0;
virtual status_t setAudioSource(int as) = 0;
virtual status_t setOutputFormat(int of) = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index 8dd40d2..d7ac302 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -42,7 +42,7 @@
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
virtual status_t setCamera(const sp<ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy) = 0;
- virtual status_t setPreviewSurface(const sp<Surface>& surface) = 0;
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
virtual status_t setOutputFile(const char *path) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;}
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 3b33479..88a42a0 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -207,7 +207,7 @@
void died();
status_t initCheck();
status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
- status_t setPreviewSurface(const sp<Surface>& surface);
+ status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
status_t setVideoSource(int vs);
status_t setAudioSource(int as);
status_t setOutputFormat(int of);
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index cf38b14..a829916 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -82,7 +82,7 @@
uid_t clientUid,
Size videoSize,
int32_t frameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
bool storeMetaDataInVideoBuffers = false);
virtual ~CameraSource();
@@ -154,7 +154,7 @@
sp<Camera> mCamera;
sp<ICameraRecordingProxy> mCameraRecordingProxy;
sp<DeathNotifier> mDeathNotifier;
- sp<Surface> mSurface;
+ sp<IGraphicBufferProducer> mSurface;
sp<MetaData> mMeta;
int64_t mStartTimeUs;
@@ -169,7 +169,7 @@
CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid,
Size videoSize, int32_t frameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
bool storeMetaDataInVideoBuffers);
virtual void startCameraRecording();
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 774772b..6b7a63c 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -40,7 +40,7 @@
uid_t clientUid,
Size videoSize,
int32_t videoFrameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
int64_t timeBetweenTimeLapseFrameCaptureUs);
virtual ~CameraSourceTimeLapse();
@@ -115,7 +115,7 @@
uid_t clientUid,
Size videoSize,
int32_t videoFrameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
int64_t timeBetweenTimeLapseFrameCaptureUs);
// Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 3596b38..88df6b0 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -35,7 +35,13 @@
MPEG4Writer(const char *filename);
MPEG4Writer(int fd);
+ // Limitations
+ // 1. No more than 2 tracks can be added
+ // 2. Only video or audio source can be added
+ // 3. No more than one video and/or one audio source can be added.
virtual status_t addSource(const sp<MediaSource> &source);
+
+ // Returns INVALID_OPERATION if there is no source or track.
virtual status_t start(MetaData *param = NULL);
virtual status_t stop() { return reset(); }
virtual status_t pause();
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
index 9ad94e0..cdfc441 100755
--- a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
@@ -2486,6 +2486,12 @@
#endif
+ if ((M4MP4W_Time32)auPtr->CTS < mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS) {
+ // Do not report as error, it will abort the entire filewrite. Just skip this frame.
+ M4OSA_TRACE1_0("Skip frame. Video frame has too old timestamp.");
+ return M4NO_ERROR;
+ }
+
mMp4FileDataPtr->videoTrackPtr->currentPos += auPtr->size;
/* Warning: time conversion cast 64to32! */
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 94b9acf..54f8d9e 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -224,6 +224,7 @@
pContext->pBundledContext->NumberEffectsEnabled = 0;
pContext->pBundledContext->NumberEffectsCalled = 0;
pContext->pBundledContext->firstVolume = LVM_TRUE;
+ pContext->pBundledContext->volume = 0;
#ifdef LVM_PCM
char fileName[256];
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index c935d97..8e58162 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -87,12 +87,12 @@
return interface_cast<IGraphicBufferProducer>(reply.readStrongBinder());
}
- status_t setPreviewSurface(const sp<Surface>& surface)
+ status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface)
{
ALOGV("setPreviewSurface(%p)", surface.get());
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
- Surface::writeToParcel(surface, &data);
+ data.writeStrongBinder(surface->asBinder());
remote()->transact(SET_PREVIEW_SURFACE, data, &reply);
return reply.readInt32();
}
@@ -443,7 +443,7 @@
case SET_PREVIEW_SURFACE: {
ALOGV("SET_PREVIEW_SURFACE");
CHECK_INTERFACE(IMediaRecorder, data, reply);
- sp<Surface> surface = Surface::readFromParcel(data);
+ sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
reply->writeInt32(setPreviewSurface(surface));
return NO_ERROR;
} break;
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 3ac98cc..3710e46 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -49,7 +49,7 @@
return ret;
}
-status_t MediaRecorder::setPreviewSurface(const sp<Surface>& surface)
+status_t MediaRecorder::setPreviewSurface(const sp<IGraphicBufferProducer>& surface)
{
ALOGV("setPreviewSurface(%p)", surface.get());
if (mMediaRecorder == NULL) {
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index a52b238..a9820e0 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -81,7 +81,7 @@
return mRecorder->setCamera(camera, proxy);
}
-status_t MediaRecorderClient::setPreviewSurface(const sp<Surface>& surface)
+status_t MediaRecorderClient::setPreviewSurface(const sp<IGraphicBufferProducer>& surface)
{
ALOGV("setPreviewSurface");
Mutex::Autolock lock(mLock);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index bd0eaf1..a65ec9f 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -32,7 +32,7 @@
public:
virtual status_t setCamera(const sp<ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy);
- virtual status_t setPreviewSurface(const sp<Surface>& surface);
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
virtual status_t setVideoSource(int vs);
virtual status_t setAudioSource(int as);
virtual status_t setOutputFormat(int of);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index f570856..c2c9985 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -224,7 +224,7 @@
return OK;
}
-status_t StagefrightRecorder::setPreviewSurface(const sp<Surface> &surface) {
+status_t StagefrightRecorder::setPreviewSurface(const sp<IGraphicBufferProducer> &surface) {
ALOGV("setPreviewSurface: %p", surface.get());
mPreviewSurface = surface;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index fbe6fa6..c864207 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -51,7 +51,7 @@
virtual status_t setVideoSize(int width, int height);
virtual status_t setVideoFrameRate(int frames_per_second);
virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
- virtual status_t setPreviewSurface(const sp<Surface>& surface);
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
virtual status_t setOutputFile(const char *path);
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
virtual status_t setParameters(const String8& params);
@@ -71,7 +71,7 @@
private:
sp<ICamera> mCamera;
sp<ICameraRecordingProxy> mCameraProxy;
- sp<Surface> mPreviewSurface;
+ sp<IGraphicBufferProducer> mPreviewSurface;
sp<IMediaRecorderClient> mListener;
String16 mClientName;
uid_t mClientUid;
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index f8557d0..5a26b06 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -140,7 +140,7 @@
uid_t clientUid,
Size videoSize,
int32_t frameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
bool storeMetaDataInVideoBuffers) {
CameraSource *source = new CameraSource(camera, proxy, cameraId,
@@ -157,7 +157,7 @@
uid_t clientUid,
Size videoSize,
int32_t frameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
bool storeMetaDataInVideoBuffers)
: mCameraFlags(0),
mNumInputBuffers(0),
@@ -536,7 +536,7 @@
if (mSurface != NULL) {
// This CHECK is good, since we just passed the lock/unlock
// check earlier by calling mCamera->setParameters().
- CHECK_EQ((status_t)OK, mCamera->setPreviewDisplay(mSurface));
+ CHECK_EQ((status_t)OK, mCamera->setPreviewTexture(mSurface));
}
// By default, do not store metadata in video buffers
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 2ed2223..20214e8 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -40,7 +40,7 @@
uid_t clientUid,
Size videoSize,
int32_t videoFrameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
int64_t timeBetweenFrameCaptureUs) {
CameraSourceTimeLapse *source = new
@@ -66,7 +66,7 @@
uid_t clientUid,
Size videoSize,
int32_t videoFrameRate,
- const sp<Surface>& surface,
+ const sp<IGraphicBufferProducer>& surface,
int64_t timeBetweenFrameCaptureUs)
: CameraSource(camera, proxy, cameraId, clientName, clientUid,
videoSize, videoFrameRate, surface, true),
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index b2e60be..56fad60 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2067,17 +2067,30 @@
sampleRate = br.getBits(24);
numChannels = br.getBits(4);
} else {
- static uint32_t kSamplingRate[] = {
- 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
- 16000, 12000, 11025, 8000, 7350
- };
-
- if (freqIndex == 13 || freqIndex == 14) {
- return ERROR_MALFORMED;
+ numChannels = br.getBits(4);
+ if (objectType == 5) {
+ // SBR specific config per 14496-3 table 1.13
+ freqIndex = br.getBits(4);
+ if (freqIndex == 15) {
+ if (csd_size < 8) {
+ return ERROR_MALFORMED;
+ }
+ sampleRate = br.getBits(24);
+ }
}
- sampleRate = kSamplingRate[freqIndex];
- numChannels = br.getBits(4);
+ if (sampleRate == 0) {
+ static uint32_t kSamplingRate[] = {
+ 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
+ 16000, 12000, 11025, 8000, 7350
+ };
+
+ if (freqIndex == 13 || freqIndex == 14) {
+ return ERROR_MALFORMED;
+ }
+
+ sampleRate = kSamplingRate[freqIndex];
+ }
}
if (numChannels == 0) {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 056b47a..316f669 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -428,6 +428,42 @@
ALOGE("Attempt to add source AFTER recording is started");
return UNKNOWN_ERROR;
}
+
+ // At most 2 tracks can be supported.
+ if (mTracks.size() >= 2) {
+ ALOGE("Too many tracks (%d) to add", mTracks.size());
+ return ERROR_UNSUPPORTED;
+ }
+
+ CHECK(source.get() != NULL);
+
+ // A track of type other than video or audio is not supported.
+ const char *mime;
+ source->getFormat()->findCString(kKeyMIMEType, &mime);
+ bool isAudio = !strncasecmp(mime, "audio/", 6);
+ bool isVideo = !strncasecmp(mime, "video/", 6);
+ if (!isAudio && !isVideo) {
+ ALOGE("Track (%s) other than video or audio is not supported",
+ mime);
+ return ERROR_UNSUPPORTED;
+ }
+
+ // At this point, we know the track to be added is either
+ // video or audio. Thus, we only need to check whether it
+ // is an audio track or not (if it is not, then it must be
+ // a video track).
+
+ // No more than one video or one audio track is supported.
+ for (List<Track*>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it) {
+ if ((*it)->isAudio() == isAudio) {
+ ALOGE("%s track already exists", isAudio? "Audio": "Video");
+ return ERROR_UNSUPPORTED;
+ }
+ }
+
+ // This is the first track of either audio or video.
+ // Go ahead to add the track.
Track *track = new Track(this, source, 1 + mTracks.size());
mTracks.push_back(track);
@@ -435,6 +471,11 @@
}
status_t MPEG4Writer::startTracks(MetaData *params) {
+ if (mTracks.empty()) {
+ ALOGE("No source added");
+ return INVALID_OPERATION;
+ }
+
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
status_t err = (*it)->start(params);
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index aefc270..21841b3 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -76,17 +76,17 @@
convertMessageToMetaData(format, meta);
sp<MediaAdapter> newTrack = new MediaAdapter(meta);
- return mTrackList.add(newTrack);
+ status_t result = mWriter->addSource(newTrack);
+ if (result == OK) {
+ return mTrackList.add(newTrack);
+ }
+ return -1;
}
status_t MediaMuxer::start() {
Mutex::Autolock autoLock(mMuxerLock);
-
if (mState == INITED) {
mState = STARTED;
- for (size_t i = 0 ; i < mTrackList.size(); i++) {
- mWriter->addSource(mTrackList[i]);
- }
return mWriter->start();
} else {
ALOGE("start() is called in invalid state %d", mState);
@@ -100,7 +100,9 @@
if (mState == STARTED) {
mState = STOPPED;
for (size_t i = 0; i < mTrackList.size(); i++) {
- mTrackList[i]->stop();
+ if (mTrackList[i]->stop() != OK) {
+ return INVALID_OPERATION;
+ }
}
return mWriter->stop();
} else {
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 5d760d3..95ed43a 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -511,6 +511,9 @@
if (response->mStatusCode != 200) {
result = UNKNOWN_ERROR;
+ } else if (response->mContent == NULL) {
+ result = ERROR_MALFORMED;
+ ALOGE("The response has no content.");
} else {
mSessionDesc = new ASessionDescription;
diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/wifi-display/ANetworkSession.cpp
index 465f4c4..df20ae2 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.cpp
+++ b/media/libstagefright/wifi-display/ANetworkSession.cpp
@@ -81,7 +81,8 @@
status_t readMore();
status_t writeMore();
- status_t sendRequest(const void *data, ssize_t size);
+ status_t sendRequest(
+ const void *data, ssize_t size, bool timeValid, int64_t timeUs);
void setIsRTSPConnection(bool yesno);
@@ -89,6 +90,15 @@
virtual ~Session();
private:
+ enum {
+ FRAGMENT_FLAG_TIME_VALID = 1,
+ };
+ struct Fragment {
+ uint32_t mFlags;
+ int64_t mTimeUs;
+ sp<ABuffer> mBuffer;
+ };
+
int32_t mSessionID;
State mState;
bool mIsRTSPConnection;
@@ -96,17 +106,17 @@
sp<AMessage> mNotify;
bool mSawReceiveFailure, mSawSendFailure;
- // for TCP / stream data
- AString mOutBuffer;
-
- // for UDP / datagrams
- List<sp<ABuffer> > mOutDatagrams;
+ List<Fragment> mOutFragments;
AString mInBuffer;
+ int64_t mLastStallReportUs;
+
void notifyError(bool send, status_t err, const char *detail);
void notify(NotificationReason reason);
+ void dumpFragmentStats(const Fragment &frag);
+
DISALLOW_EVIL_CONSTRUCTORS(Session);
};
////////////////////////////////////////////////////////////////////////////////
@@ -137,7 +147,8 @@
mSocket(s),
mNotify(notify),
mSawReceiveFailure(false),
- mSawSendFailure(false) {
+ mSawSendFailure(false),
+ mLastStallReportUs(-1ll) {
if (mState == CONNECTED) {
struct sockaddr_in localAddr;
socklen_t localAddrLen = sizeof(localAddr);
@@ -218,8 +229,8 @@
bool ANetworkSession::Session::wantsToWrite() {
return !mSawSendFailure
&& (mState == CONNECTING
- || (mState == CONNECTED && !mOutBuffer.empty())
- || (mState == DATAGRAM && !mOutDatagrams.empty()));
+ || (mState == CONNECTED && !mOutFragments.empty())
+ || (mState == DATAGRAM && !mOutFragments.empty()));
}
status_t ANetworkSession::Session::readMore() {
@@ -404,13 +415,41 @@
return err;
}
+void ANetworkSession::Session::dumpFragmentStats(const Fragment &frag) {
+#if 0
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t delayMs = (nowUs - frag.mTimeUs) / 1000ll;
+
+ static const int64_t kMinDelayMs = 0;
+ static const int64_t kMaxDelayMs = 300;
+
+ const char *kPattern = "########################################";
+ size_t kPatternSize = strlen(kPattern);
+
+ int n = (kPatternSize * (delayMs - kMinDelayMs))
+ / (kMaxDelayMs - kMinDelayMs);
+
+ if (n < 0) {
+ n = 0;
+ } else if ((size_t)n > kPatternSize) {
+ n = kPatternSize;
+ }
+
+ ALOGI("[%lld]: (%4lld ms) %s\n",
+ frag.mTimeUs / 1000,
+ delayMs,
+ kPattern + kPatternSize - n);
+#endif
+}
+
status_t ANetworkSession::Session::writeMore() {
if (mState == DATAGRAM) {
- CHECK(!mOutDatagrams.empty());
+ CHECK(!mOutFragments.empty());
status_t err;
do {
- const sp<ABuffer> &datagram = *mOutDatagrams.begin();
+ const Fragment &frag = *mOutFragments.begin();
+ const sp<ABuffer> &datagram = frag.mBuffer;
uint8_t *data = datagram->data();
if (data[0] == 0x80 && (data[1] & 0x7f) == 33) {
@@ -438,17 +477,21 @@
err = OK;
if (n > 0) {
- mOutDatagrams.erase(mOutDatagrams.begin());
+ if (frag.mFlags & FRAGMENT_FLAG_TIME_VALID) {
+ dumpFragmentStats(frag);
+ }
+
+ mOutFragments.erase(mOutFragments.begin());
} else if (n < 0) {
err = -errno;
} else if (n == 0) {
err = -ECONNRESET;
}
- } while (err == OK && !mOutDatagrams.empty());
+ } while (err == OK && !mOutFragments.empty());
if (err == -EAGAIN) {
- if (!mOutDatagrams.empty()) {
- ALOGI("%d datagrams remain queued.", mOutDatagrams.size());
+ if (!mOutFragments.empty()) {
+ ALOGI("%d datagrams remain queued.", mOutFragments.size());
}
err = OK;
}
@@ -481,23 +524,37 @@
}
CHECK_EQ(mState, CONNECTED);
- CHECK(!mOutBuffer.empty());
+ CHECK(!mOutFragments.empty());
ssize_t n;
- do {
- n = send(mSocket, mOutBuffer.c_str(), mOutBuffer.size(), 0);
- } while (n < 0 && errno == EINTR);
+ while (!mOutFragments.empty()) {
+ const Fragment &frag = *mOutFragments.begin();
+
+ do {
+ n = send(mSocket, frag.mBuffer->data(), frag.mBuffer->size(), 0);
+ } while (n < 0 && errno == EINTR);
+
+ if (n <= 0) {
+ break;
+ }
+
+ frag.mBuffer->setRange(
+ frag.mBuffer->offset() + n, frag.mBuffer->size() - n);
+
+ if (frag.mBuffer->size() > 0) {
+ break;
+ }
+
+ if (frag.mFlags & FRAGMENT_FLAG_TIME_VALID) {
+ dumpFragmentStats(frag);
+ }
+
+ mOutFragments.erase(mOutFragments.begin());
+ }
status_t err = OK;
- if (n > 0) {
-#if 0
- ALOGI("out:");
- hexdump(mOutBuffer.c_str(), n);
-#endif
-
- mOutBuffer.erase(0, n);
- } else if (n < 0) {
+ if (n < 0) {
err = -errno;
} else if (n == 0) {
err = -ECONNRESET;
@@ -508,43 +565,69 @@
mSawSendFailure = true;
}
-#if 0
+#if 1
int numBytesQueued;
int res = ioctl(mSocket, SIOCOUTQ, &numBytesQueued);
- if (res == 0 && numBytesQueued > 102400) {
- ALOGI("numBytesQueued = %d", numBytesQueued);
+ if (res == 0 && numBytesQueued > 50 * 1024) {
+ if (numBytesQueued > 409600) {
+ ALOGW("!!! numBytesQueued = %d", numBytesQueued);
+ }
+
+ int64_t nowUs = ALooper::GetNowUs();
+
+ if (mLastStallReportUs < 0ll
+ || nowUs > mLastStallReportUs + 500000ll) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("sessionID", mSessionID);
+ msg->setInt32("reason", kWhatNetworkStall);
+ msg->setSize("numBytesQueued", numBytesQueued);
+ msg->post();
+
+ mLastStallReportUs = nowUs;
+ }
}
#endif
return err;
}
-status_t ANetworkSession::Session::sendRequest(const void *data, ssize_t size) {
+status_t ANetworkSession::Session::sendRequest(
+ const void *data, ssize_t size, bool timeValid, int64_t timeUs) {
CHECK(mState == CONNECTED || mState == DATAGRAM);
- if (mState == DATAGRAM) {
- CHECK_GE(size, 0);
+ if (size < 0) {
+ size = strlen((const char *)data);
+ }
- sp<ABuffer> datagram = new ABuffer(size);
- memcpy(datagram->data(), data, size);
-
- mOutDatagrams.push_back(datagram);
+ if (size == 0) {
return OK;
}
+ sp<ABuffer> buffer;
+
if (mState == CONNECTED && !mIsRTSPConnection) {
CHECK_LE(size, 65535);
- uint8_t prefix[2];
- prefix[0] = size >> 8;
- prefix[1] = size & 0xff;
-
- mOutBuffer.append((const char *)prefix, sizeof(prefix));
+ buffer = new ABuffer(size + 2);
+ buffer->data()[0] = size >> 8;
+ buffer->data()[1] = size & 0xff;
+ memcpy(buffer->data() + 2, data, size);
+ } else {
+ buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
}
- mOutBuffer.append(
- (const char *)data,
- (size >= 0) ? size : strlen((const char *)data));
+ Fragment frag;
+
+ frag.mFlags = 0;
+ if (timeValid) {
+ frag.mFlags = FRAGMENT_FLAG_TIME_VALID;
+ frag.mTimeUs = timeUs;
+ }
+
+ frag.mBuffer = buffer;
+
+ mOutFragments.push_back(frag);
return OK;
}
@@ -967,7 +1050,8 @@
}
status_t ANetworkSession::sendRequest(
- int32_t sessionID, const void *data, ssize_t size) {
+ int32_t sessionID, const void *data, ssize_t size,
+ bool timeValid, int64_t timeUs) {
Mutex::Autolock autoLock(mLock);
ssize_t index = mSessions.indexOfKey(sessionID);
@@ -978,7 +1062,7 @@
const sp<Session> session = mSessions.valueAt(index);
- status_t err = session->sendRequest(data, size);
+ status_t err = session->sendRequest(data, size, timeValid, timeUs);
interrupt();
diff --git a/media/libstagefright/wifi-display/ANetworkSession.h b/media/libstagefright/wifi-display/ANetworkSession.h
index c1acdcc..7c62b29 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.h
+++ b/media/libstagefright/wifi-display/ANetworkSession.h
@@ -74,7 +74,8 @@
status_t destroySession(int32_t sessionID);
status_t sendRequest(
- int32_t sessionID, const void *data, ssize_t size = -1);
+ int32_t sessionID, const void *data, ssize_t size = -1,
+ bool timeValid = false, int64_t timeUs = -1ll);
enum NotificationReason {
kWhatError,
@@ -83,6 +84,7 @@
kWhatData,
kWhatDatagram,
kWhatBinaryData,
+ kWhatNetworkStall,
};
protected:
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 105c642..d13a92e 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -252,10 +252,45 @@
fwrite(tsPackets->data(), 1, tsPackets->size(), mLogFile);
}
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+ tsPackets->meta()->setInt64("timeUs", timeUs);
+
err = mTSSender->queueBuffer(
tsPackets,
33 /* packetType */,
RTPSender::PACKETIZATION_TRANSPORT_STREAM);
+
+#if 0
+ {
+ int64_t nowUs = ALooper::GetNowUs();
+
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ int64_t delayMs = (nowUs - timeUs) / 1000ll;
+
+ static const int64_t kMinDelayMs = 0;
+ static const int64_t kMaxDelayMs = 300;
+
+ const char *kPattern = "########################################";
+ size_t kPatternSize = strlen(kPattern);
+
+ int n = (kPatternSize * (delayMs - kMinDelayMs))
+ / (kMaxDelayMs - kMinDelayMs);
+
+ if (n < 0) {
+ n = 0;
+ } else if ((size_t)n > kPatternSize) {
+ n = kPatternSize;
+ }
+
+ ALOGI("[%lld]: (%4lld ms) %s\n",
+ timeUs / 1000,
+ delayMs,
+ kPattern + kPatternSize - n);
+ }
+#endif
}
if (err != OK) {
@@ -325,6 +360,15 @@
break;
}
+ case kWhatNetworkStall:
+ {
+ size_t numBytesQueued;
+ CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
+
+ notifyNetworkStall(numBytesQueued);
+ break;
+ }
+
default:
TRESPASS();
}
@@ -344,6 +388,13 @@
notify->post();
}
+void MediaSender::notifyNetworkStall(size_t numBytesQueued) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatNetworkStall);
+ notify->setSize("numBytesQueued", numBytesQueued);
+ notify->post();
+}
+
status_t MediaSender::packetizeAccessUnit(
size_t trackIndex,
sp<ABuffer> accessUnit,
diff --git a/media/libstagefright/wifi-display/MediaSender.h b/media/libstagefright/wifi-display/MediaSender.h
index 9a50f9a..447abf7 100644
--- a/media/libstagefright/wifi-display/MediaSender.h
+++ b/media/libstagefright/wifi-display/MediaSender.h
@@ -42,6 +42,7 @@
enum {
kWhatInitDone,
kWhatError,
+ kWhatNetworkStall,
};
MediaSender(
@@ -113,6 +114,7 @@
void notifyInitDone(status_t err);
void notifyError(status_t err);
+ void notifyNetworkStall(size_t numBytesQueued);
status_t packetizeAccessUnit(
size_t trackIndex,
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index b60853d..c8e265c 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -194,6 +194,9 @@
const sp<ABuffer> &tsPackets, uint8_t packetType) {
CHECK_EQ(0, tsPackets->size() % 188);
+ int64_t timeUs;
+ CHECK(tsPackets->meta()->findInt64("timeUs", &timeUs));
+
const size_t numTSPackets = tsPackets->size() / 188;
size_t srcOffset = 0;
@@ -232,13 +235,19 @@
memcpy(&rtp[12], tsPackets->data() + srcOffset, numTSPackets * 188);
udpPacket->setRange(0, 12 + numTSPackets * 188);
- status_t err = sendRTPPacket(udpPacket, true /* storeInHistory */);
+
+ srcOffset += numTSPackets * 188;
+ bool isLastPacket = (srcOffset == tsPackets->size());
+
+ status_t err = sendRTPPacket(
+ udpPacket,
+ true /* storeInHistory */,
+ isLastPacket /* timeValid */,
+ timeUs);
if (err != OK) {
return err;
}
-
- srcOffset += numTSPackets * 188;
}
return OK;
@@ -395,11 +404,13 @@
}
status_t RTPSender::sendRTPPacket(
- const sp<ABuffer> &buffer, bool storeInHistory) {
+ const sp<ABuffer> &buffer, bool storeInHistory,
+ bool timeValid, int64_t timeUs) {
CHECK(mRTPConnected);
status_t err = mNetSession->sendRequest(
- mRTPSessionID, buffer->data(), buffer->size());
+ mRTPSessionID, buffer->data(), buffer->size(),
+ timeValid, timeUs);
if (err != OK) {
return err;
@@ -530,6 +541,18 @@
}
break;
}
+
+ case ANetworkSession::kWhatNetworkStall:
+ {
+ size_t numBytesQueued;
+ CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
+
+ notifyNetworkStall(numBytesQueued);
+ break;
+ }
+
+ default:
+ TRESPASS();
}
}
@@ -699,5 +722,12 @@
notify->post();
}
+void RTPSender::notifyNetworkStall(size_t numBytesQueued) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatNetworkStall);
+ notify->setSize("numBytesQueued", numBytesQueued);
+ notify->post();
+}
+
} // namespace android
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.h b/media/libstagefright/wifi-display/rtp/RTPSender.h
index 2b683a4..90b1796 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.h
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.h
@@ -36,6 +36,7 @@
enum {
kWhatInitDone,
kWhatError,
+ kWhatNetworkStall,
};
RTPSender(
const sp<ANetworkSession> &netSession,
@@ -93,7 +94,9 @@
status_t queueTSPackets(const sp<ABuffer> &tsPackets, uint8_t packetType);
status_t queueAVCBuffer(const sp<ABuffer> &accessUnit, uint8_t packetType);
- status_t sendRTPPacket(const sp<ABuffer> &packet, bool storeInHistory);
+ status_t sendRTPPacket(
+ const sp<ABuffer> &packet, bool storeInHistory,
+ bool timeValid = false, int64_t timeUs = -1ll);
void onNetNotify(bool isRTP, const sp<AMessage> &msg);
@@ -103,6 +106,7 @@
void notifyInitDone(status_t err);
void notifyError(status_t err);
+ void notifyNetworkStall(size_t numBytesQueued);
DISALLOW_EVIL_CONSTRUCTORS(RTPSender);
};
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
index 5efcd17..12338e9 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -22,6 +22,7 @@
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
+#include <media/AudioTrack.h>
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -34,12 +35,438 @@
namespace android {
+/*
+ Drives the decoding process using a MediaCodec instance. Input buffers
+ queued by calls to "queueInputBuffer" are fed to the decoder as soon
+ as the decoder is ready for them, the client is notified about output
+ buffers as the decoder spits them out.
+*/
+struct DirectRenderer::DecoderContext : public AHandler {
+ enum {
+ kWhatOutputBufferReady,
+ };
+ DecoderContext(const sp<AMessage> ¬ify);
+
+ status_t init(
+ const sp<AMessage> &format,
+ const sp<IGraphicBufferProducer> &surfaceTex);
+
+ void queueInputBuffer(const sp<ABuffer> &accessUnit);
+
+ status_t renderOutputBufferAndRelease(size_t index);
+ status_t releaseOutputBuffer(size_t index);
+
+protected:
+ virtual ~DecoderContext();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum {
+ kWhatDecoderNotify,
+ };
+
+ sp<AMessage> mNotify;
+ sp<ALooper> mDecoderLooper;
+ sp<MediaCodec> mDecoder;
+ Vector<sp<ABuffer> > mDecoderInputBuffers;
+ Vector<sp<ABuffer> > mDecoderOutputBuffers;
+ List<size_t> mDecoderInputBuffersAvailable;
+ bool mDecoderNotificationPending;
+
+ List<sp<ABuffer> > mAccessUnits;
+
+ void onDecoderNotify();
+ void scheduleDecoderNotification();
+ void queueDecoderInputBuffers();
+
+ void queueOutputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecoderContext);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/*
+ A "push" audio renderer. The primary function of this renderer is to use
+ an AudioTrack in push mode and making sure not to block the event loop
+ be ensuring that calls to AudioTrack::write never block. This is done by
+ estimating an upper bound of data that can be written to the AudioTrack
+ buffer without delay.
+*/
+struct DirectRenderer::AudioRenderer : public AHandler {
+ AudioRenderer(const sp<DecoderContext> &decoderContext);
+
+ void queueInputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+protected:
+ virtual ~AudioRenderer();
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum {
+ kWhatPushAudio,
+ };
+
+ struct BufferInfo {
+ size_t mIndex;
+ int64_t mTimeUs;
+ sp<ABuffer> mBuffer;
+ };
+
+ sp<DecoderContext> mDecoderContext;
+ sp<AudioTrack> mAudioTrack;
+
+ List<BufferInfo> mInputBuffers;
+ bool mPushPending;
+
+ size_t mNumFramesWritten;
+
+ void schedulePushIfNecessary();
+ void onPushAudio();
+
+ ssize_t writeNonBlocking(const uint8_t *data, size_t size);
+
+ DISALLOW_EVIL_CONSTRUCTORS(AudioRenderer);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DecoderContext::DecoderContext(const sp<AMessage> ¬ify)
+ : mNotify(notify),
+ mDecoderNotificationPending(false) {
+}
+
+DirectRenderer::DecoderContext::~DecoderContext() {
+ if (mDecoder != NULL) {
+ mDecoder->release();
+ mDecoder.clear();
+
+ mDecoderLooper->stop();
+ mDecoderLooper.clear();
+ }
+}
+
+status_t DirectRenderer::DecoderContext::init(
+ const sp<AMessage> &format,
+ const sp<IGraphicBufferProducer> &surfaceTex) {
+ CHECK(mDecoder == NULL);
+
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
+ mDecoderLooper = new ALooper;
+ mDecoderLooper->setName("video codec looper");
+
+ mDecoderLooper->start(
+ false /* runOnCallingThread */,
+ false /* canCallJava */,
+ PRIORITY_DEFAULT);
+
+ mDecoder = MediaCodec::CreateByType(
+ mDecoderLooper, mime.c_str(), false /* encoder */);
+
+ CHECK(mDecoder != NULL);
+
+ status_t err = mDecoder->configure(
+ format,
+ surfaceTex == NULL
+ ? NULL : new Surface(surfaceTex),
+ NULL /* crypto */,
+ 0 /* flags */);
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->start();
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->getInputBuffers(
+ &mDecoderInputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mDecoder->getOutputBuffers(
+ &mDecoderOutputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+
+ scheduleDecoderNotification();
+
+ return OK;
+}
+
+void DirectRenderer::DecoderContext::queueInputBuffer(
+ const sp<ABuffer> &accessUnit) {
+ CHECK(mDecoder != NULL);
+
+ mAccessUnits.push_back(accessUnit);
+ queueDecoderInputBuffers();
+}
+
+status_t DirectRenderer::DecoderContext::renderOutputBufferAndRelease(
+ size_t index) {
+ return mDecoder->renderOutputBufferAndRelease(index);
+}
+
+status_t DirectRenderer::DecoderContext::releaseOutputBuffer(size_t index) {
+ return mDecoder->releaseOutputBuffer(index);
+}
+
+void DirectRenderer::DecoderContext::queueDecoderInputBuffers() {
+ if (mDecoder == NULL) {
+ return;
+ }
+
+ bool submittedMore = false;
+
+ while (!mAccessUnits.empty()
+ && !mDecoderInputBuffersAvailable.empty()) {
+ size_t index = *mDecoderInputBuffersAvailable.begin();
+
+ mDecoderInputBuffersAvailable.erase(
+ mDecoderInputBuffersAvailable.begin());
+
+ sp<ABuffer> srcBuffer = *mAccessUnits.begin();
+ mAccessUnits.erase(mAccessUnits.begin());
+
+ const sp<ABuffer> &dstBuffer =
+ mDecoderInputBuffers.itemAt(index);
+
+ memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
+
+ int64_t timeUs;
+ CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+
+ status_t err = mDecoder->queueInputBuffer(
+ index,
+ 0 /* offset */,
+ srcBuffer->size(),
+ timeUs,
+ 0 /* flags */);
+ CHECK_EQ(err, (status_t)OK);
+
+ submittedMore = true;
+ }
+
+ if (submittedMore) {
+ scheduleDecoderNotification();
+ }
+}
+
+void DirectRenderer::DecoderContext::onMessageReceived(
+ const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatDecoderNotify:
+ {
+ onDecoderNotify();
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void DirectRenderer::DecoderContext::onDecoderNotify() {
+ mDecoderNotificationPending = false;
+
+ for (;;) {
+ size_t index;
+ status_t err = mDecoder->dequeueInputBuffer(&index);
+
+ if (err == OK) {
+ mDecoderInputBuffersAvailable.push_back(index);
+ } else if (err == -EAGAIN) {
+ break;
+ } else {
+ TRESPASS();
+ }
+ }
+
+ queueDecoderInputBuffers();
+
+ for (;;) {
+ size_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ uint32_t flags;
+ status_t err = mDecoder->dequeueOutputBuffer(
+ &index,
+ &offset,
+ &size,
+ &timeUs,
+ &flags);
+
+ if (err == OK) {
+ queueOutputBuffer(
+ index, timeUs, mDecoderOutputBuffers.itemAt(index));
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ err = mDecoder->getOutputBuffers(
+ &mDecoderOutputBuffers);
+ CHECK_EQ(err, (status_t)OK);
+ } else if (err == INFO_FORMAT_CHANGED) {
+ // We don't care.
+ } else if (err == -EAGAIN) {
+ break;
+ } else {
+ TRESPASS();
+ }
+ }
+
+ scheduleDecoderNotification();
+}
+
+void DirectRenderer::DecoderContext::scheduleDecoderNotification() {
+ if (mDecoderNotificationPending) {
+ return;
+ }
+
+ sp<AMessage> notify =
+ new AMessage(kWhatDecoderNotify, id());
+
+ mDecoder->requestActivityNotification(notify);
+ mDecoderNotificationPending = true;
+}
+
+void DirectRenderer::DecoderContext::queueOutputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatOutputBufferReady);
+ msg->setSize("index", index);
+ msg->setInt64("timeUs", timeUs);
+ msg->setBuffer("buffer", buffer);
+ msg->post();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::AudioRenderer::AudioRenderer(
+ const sp<DecoderContext> &decoderContext)
+ : mDecoderContext(decoderContext),
+ mPushPending(false),
+ mNumFramesWritten(0) {
+ mAudioTrack = new AudioTrack(
+ AUDIO_STREAM_DEFAULT,
+ 48000.0f,
+ AUDIO_FORMAT_PCM,
+ AUDIO_CHANNEL_OUT_STEREO,
+ (int)0 /* frameCount */);
+
+ CHECK_EQ((status_t)OK, mAudioTrack->initCheck());
+
+ mAudioTrack->start();
+}
+
+DirectRenderer::AudioRenderer::~AudioRenderer() {
+}
+
+void DirectRenderer::AudioRenderer::queueInputBuffer(
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ BufferInfo info;
+ info.mIndex = index;
+ info.mTimeUs = timeUs;
+ info.mBuffer = buffer;
+
+ mInputBuffers.push_back(info);
+ schedulePushIfNecessary();
+}
+
+void DirectRenderer::AudioRenderer::onMessageReceived(
+ const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatPushAudio:
+ {
+ onPushAudio();
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void DirectRenderer::AudioRenderer::schedulePushIfNecessary() {
+ if (mPushPending || mInputBuffers.empty()) {
+ return;
+ }
+
+ mPushPending = true;
+
+ uint32_t numFramesPlayed;
+ CHECK_EQ(mAudioTrack->getPosition(&numFramesPlayed),
+ (status_t)OK);
+
+ uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+
+ // This is how long the audio sink will have data to
+ // play back.
+ const float msecsPerFrame = 1000.0f / mAudioTrack->getSampleRate();
+
+ int64_t delayUs =
+ msecsPerFrame * numFramesPendingPlayout * 1000ll;
+
+ // Let's give it more data after about half that time
+ // has elapsed.
+ (new AMessage(kWhatPushAudio, id()))->post(delayUs / 2);
+}
+
+void DirectRenderer::AudioRenderer::onPushAudio() {
+ mPushPending = false;
+
+ while (!mInputBuffers.empty()) {
+ const BufferInfo &info = *mInputBuffers.begin();
+
+ ssize_t n = writeNonBlocking(
+ info.mBuffer->data(), info.mBuffer->size());
+
+ if (n < (ssize_t)info.mBuffer->size()) {
+ CHECK_GE(n, 0);
+
+ info.mBuffer->setRange(
+ info.mBuffer->offset() + n, info.mBuffer->size() - n);
+ break;
+ }
+
+ mDecoderContext->releaseOutputBuffer(info.mIndex);
+
+ mInputBuffers.erase(mInputBuffers.begin());
+ }
+
+ schedulePushIfNecessary();
+}
+
+ssize_t DirectRenderer::AudioRenderer::writeNonBlocking(
+ const uint8_t *data, size_t size) {
+ uint32_t numFramesPlayed;
+ status_t err = mAudioTrack->getPosition(&numFramesPlayed);
+ if (err != OK) {
+ return err;
+ }
+
+ ssize_t numFramesAvailableToWrite =
+ mAudioTrack->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+ size_t numBytesAvailableToWrite =
+ numFramesAvailableToWrite * mAudioTrack->frameSize();
+
+ if (size > numBytesAvailableToWrite) {
+ size = numBytesAvailableToWrite;
+ }
+
+ CHECK_EQ(mAudioTrack->write(data, size), (ssize_t)size);
+
+ size_t numFramesWritten = size / mAudioTrack->frameSize();
+ mNumFramesWritten += numFramesWritten;
+
+ return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
DirectRenderer::DirectRenderer(
const sp<IGraphicBufferProducer> &bufferProducer)
: mSurfaceTex(bufferProducer),
- mVideoDecoderNotificationPending(false),
- mRenderPending(false),
- mTimeOffsetUs(0ll),
+ mVideoRenderPending(false),
mLatencySum(0ll),
mLatencyCount(0),
mNumFramesLate(0),
@@ -47,17 +474,6 @@
}
DirectRenderer::~DirectRenderer() {
- if (mVideoDecoder != NULL) {
- mVideoDecoder->release();
- mVideoDecoder.clear();
-
- mVideoDecoderLooper->stop();
- mVideoDecoderLooper.clear();
- }
-}
-
-void DirectRenderer::setTimeOffset(int64_t offset) {
- mTimeOffsetUs = offset;
}
int64_t DirectRenderer::getAvgLatenessUs() {
@@ -81,15 +497,15 @@
void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
- case kWhatVideoDecoderNotify:
+ case kWhatDecoderNotify:
{
- onVideoDecoderNotify();
+ onDecoderNotify(msg);
break;
}
- case kWhatRender:
+ case kWhatRenderVideo:
{
- onRender();
+ onRenderVideo();
break;
}
@@ -98,196 +514,114 @@
}
}
-void DirectRenderer::setFormat(
- size_t trackIndex, const sp<AMessage> &format) {
+void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+ CHECK_LT(trackIndex, 2u);
+
+ CHECK(mDecoderContext[trackIndex] == NULL);
+
+ sp<AMessage> notify = new AMessage(kWhatDecoderNotify, id());
+ notify->setSize("trackIndex", trackIndex);
+
+ mDecoderContext[trackIndex] = new DecoderContext(notify);
+ looper()->registerHandler(mDecoderContext[trackIndex]);
+
+ CHECK_EQ((status_t)OK,
+ mDecoderContext[trackIndex]->init(
+ format, trackIndex == 0 ? mSurfaceTex : NULL));
+
if (trackIndex == 1) {
- // Ignore audio for now.
- return;
+ // Audio
+ mAudioRenderer = new AudioRenderer(mDecoderContext[1]);
+ looper()->registerHandler(mAudioRenderer);
}
-
- CHECK(mVideoDecoder == NULL);
-
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- mVideoDecoderLooper = new ALooper;
- mVideoDecoderLooper->setName("video codec looper");
-
- mVideoDecoderLooper->start(
- false /* runOnCallingThread */,
- false /* canCallJava */,
- PRIORITY_DEFAULT);
-
- mVideoDecoder = MediaCodec::CreateByType(
- mVideoDecoderLooper, mime.c_str(), false /* encoder */);
-
- CHECK(mVideoDecoder != NULL);
-
- status_t err = mVideoDecoder->configure(
- format,
- mSurfaceTex == NULL
- ? NULL : new Surface(mSurfaceTex),
- NULL /* crypto */,
- 0 /* flags */);
- CHECK_EQ(err, (status_t)OK);
-
- err = mVideoDecoder->start();
- CHECK_EQ(err, (status_t)OK);
-
- err = mVideoDecoder->getInputBuffers(
- &mVideoDecoderInputBuffers);
- CHECK_EQ(err, (status_t)OK);
-
- scheduleVideoDecoderNotification();
}
void DirectRenderer::queueAccessUnit(
size_t trackIndex, const sp<ABuffer> &accessUnit) {
- if (trackIndex == 1) {
- // Ignore audio for now.
- return;
- }
+ CHECK_LT(trackIndex, 2u);
- if (mVideoDecoder == NULL) {
+ if (mDecoderContext[trackIndex] == NULL) {
+ CHECK_EQ(trackIndex, 0u);
+
sp<AMessage> format = new AMessage;
format->setString("mime", "video/avc");
format->setInt32("width", 640);
format->setInt32("height", 360);
- setFormat(0, format);
+ setFormat(trackIndex, format);
}
- mVideoAccessUnits.push_back(accessUnit);
- queueVideoDecoderInputBuffers();
+ mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
}
-void DirectRenderer::queueVideoDecoderInputBuffers() {
- if (mVideoDecoder == NULL) {
+void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ switch (what) {
+ case DecoderContext::kWhatOutputBufferReady:
+ {
+ size_t index;
+ CHECK(msg->findSize("index", &index));
+
+ int64_t timeUs;
+ CHECK(msg->findInt64("timeUs", &timeUs));
+
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ queueOutputBuffer(trackIndex, index, timeUs, buffer);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void DirectRenderer::queueOutputBuffer(
+ size_t trackIndex,
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+ if (trackIndex == 1) {
+ // Audio
+ mAudioRenderer->queueInputBuffer(index, timeUs, buffer);
return;
}
- bool submittedMore = false;
-
- while (!mVideoAccessUnits.empty()
- && !mVideoDecoderInputBuffersAvailable.empty()) {
- size_t index = *mVideoDecoderInputBuffersAvailable.begin();
-
- mVideoDecoderInputBuffersAvailable.erase(
- mVideoDecoderInputBuffersAvailable.begin());
-
- sp<ABuffer> srcBuffer = *mVideoAccessUnits.begin();
- mVideoAccessUnits.erase(mVideoAccessUnits.begin());
-
- const sp<ABuffer> &dstBuffer =
- mVideoDecoderInputBuffers.itemAt(index);
-
- memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
-
- int64_t timeUs;
- CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
-
- status_t err = mVideoDecoder->queueInputBuffer(
- index,
- 0 /* offset */,
- srcBuffer->size(),
- timeUs,
- 0 /* flags */);
- CHECK_EQ(err, (status_t)OK);
-
- submittedMore = true;
- }
-
- if (submittedMore) {
- scheduleVideoDecoderNotification();
- }
-}
-
-void DirectRenderer::onVideoDecoderNotify() {
- mVideoDecoderNotificationPending = false;
-
- for (;;) {
- size_t index;
- status_t err = mVideoDecoder->dequeueInputBuffer(&index);
-
- if (err == OK) {
- mVideoDecoderInputBuffersAvailable.push_back(index);
- } else if (err == -EAGAIN) {
- break;
- } else {
- TRESPASS();
- }
- }
-
- queueVideoDecoderInputBuffers();
-
- for (;;) {
- size_t index;
- size_t offset;
- size_t size;
- int64_t timeUs;
- uint32_t flags;
- status_t err = mVideoDecoder->dequeueOutputBuffer(
- &index,
- &offset,
- &size,
- &timeUs,
- &flags);
-
- if (err == OK) {
- queueOutputBuffer(index, timeUs);
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- // We don't care.
- } else if (err == INFO_FORMAT_CHANGED) {
- // We don't care.
- } else if (err == -EAGAIN) {
- break;
- } else {
- TRESPASS();
- }
- }
-
- scheduleVideoDecoderNotification();
-}
-
-void DirectRenderer::queueOutputBuffer(size_t index, int64_t timeUs) {
-#if 1
OutputInfo info;
info.mIndex = index;
- info.mTimeUs = timeUs + mTimeOffsetUs;
- mOutputBuffers.push_back(info);
+ info.mTimeUs = timeUs;
+ info.mBuffer = buffer;
+ mVideoOutputBuffers.push_back(info);
- scheduleRenderIfNecessary();
-#else
- mLatencySum += ALooper::GetNowUs() - (timeUs + mTimeOffsetUs);
- ++mLatencyCount;
-
- status_t err = mVideoDecoder->renderOutputBufferAndRelease(index);
- CHECK_EQ(err, (status_t)OK);
-#endif
+ scheduleVideoRenderIfNecessary();
}
-void DirectRenderer::scheduleRenderIfNecessary() {
- if (mRenderPending || mOutputBuffers.empty()) {
+void DirectRenderer::scheduleVideoRenderIfNecessary() {
+ if (mVideoRenderPending || mVideoOutputBuffers.empty()) {
return;
}
- mRenderPending = true;
+ mVideoRenderPending = true;
- int64_t timeUs = (*mOutputBuffers.begin()).mTimeUs;
+ int64_t timeUs = (*mVideoOutputBuffers.begin()).mTimeUs;
int64_t nowUs = ALooper::GetNowUs();
int64_t delayUs = timeUs - nowUs;
- (new AMessage(kWhatRender, id()))->post(delayUs);
+ (new AMessage(kWhatRenderVideo, id()))->post(delayUs);
}
-void DirectRenderer::onRender() {
- mRenderPending = false;
+void DirectRenderer::onRenderVideo() {
+ mVideoRenderPending = false;
int64_t nowUs = ALooper::GetNowUs();
- while (!mOutputBuffers.empty()) {
- const OutputInfo &info = *mOutputBuffers.begin();
+ while (!mVideoOutputBuffers.empty()) {
+ const OutputInfo &info = *mVideoOutputBuffers.begin();
if (info.mTimeUs > nowUs) {
break;
@@ -301,25 +635,14 @@
mLatencySum += nowUs - info.mTimeUs;
++mLatencyCount;
- status_t err = mVideoDecoder->renderOutputBufferAndRelease(info.mIndex);
+ status_t err =
+ mDecoderContext[0]->renderOutputBufferAndRelease(info.mIndex);
CHECK_EQ(err, (status_t)OK);
- mOutputBuffers.erase(mOutputBuffers.begin());
+ mVideoOutputBuffers.erase(mVideoOutputBuffers.begin());
}
- scheduleRenderIfNecessary();
-}
-
-void DirectRenderer::scheduleVideoDecoderNotification() {
- if (mVideoDecoderNotificationPending) {
- return;
- }
-
- sp<AMessage> notify =
- new AMessage(kWhatVideoDecoderNotify, id());
-
- mVideoDecoder->requestActivityNotification(notify);
- mVideoDecoderNotificationPending = true;
+ scheduleVideoRenderIfNecessary();
}
} // namespace android
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
index 44be8f8..92c176a 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.h
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -23,21 +23,17 @@
namespace android {
struct ABuffer;
+struct AudioTrack;
struct IGraphicBufferProducer;
struct MediaCodec;
-// An experimental renderer that only supports video and decodes video data
-// as soon as it arrives using a MediaCodec instance, rendering it without
-// delay. Primarily meant to finetune packet loss discovery and minimize
-// latency.
+// Renders audio and video data queued by calls to "queueAccessUnit".
struct DirectRenderer : public AHandler {
DirectRenderer(const sp<IGraphicBufferProducer> &bufferProducer);
void setFormat(size_t trackIndex, const sp<AMessage> &format);
void queueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
- void setTimeOffset(int64_t offset);
-
int64_t getAvgLatenessUs();
protected:
@@ -45,30 +41,28 @@
virtual ~DirectRenderer();
private:
+ struct DecoderContext;
+ struct AudioRenderer;
+
enum {
- kWhatVideoDecoderNotify,
- kWhatRender,
+ kWhatDecoderNotify,
+ kWhatRenderVideo,
};
struct OutputInfo {
size_t mIndex;
int64_t mTimeUs;
+ sp<ABuffer> mBuffer;
};
sp<IGraphicBufferProducer> mSurfaceTex;
- sp<ALooper> mVideoDecoderLooper;
- sp<MediaCodec> mVideoDecoder;
- Vector<sp<ABuffer> > mVideoDecoderInputBuffers;
- List<size_t> mVideoDecoderInputBuffersAvailable;
- bool mVideoDecoderNotificationPending;
+ sp<DecoderContext> mDecoderContext[2];
+ List<OutputInfo> mVideoOutputBuffers;
- List<sp<ABuffer> > mVideoAccessUnits;
+ bool mVideoRenderPending;
- List<OutputInfo> mOutputBuffers;
- bool mRenderPending;
-
- int64_t mTimeOffsetUs;
+ sp<AudioRenderer> mAudioRenderer;
int64_t mLatencySum;
size_t mLatencyCount;
@@ -76,14 +70,14 @@
int32_t mNumFramesLate;
int32_t mNumFrames;
- void onVideoDecoderNotify();
- void onRender();
+ void onDecoderNotify(const sp<AMessage> &msg);
- void queueVideoDecoderInputBuffers();
- void scheduleVideoDecoderNotification();
- void scheduleRenderIfNecessary();
+ void queueOutputBuffer(
+ size_t trackIndex,
+ size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
- void queueOutputBuffer(size_t index, int64_t timeUs);
+ void scheduleVideoRenderIfNecessary();
+ void onRenderVideo();
DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
};
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
index 0d2e347..62021c0 100644
--- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
@@ -319,7 +319,7 @@
CHECK(mTimeOffsetValid);
- int64_t latencyUs = 300000ll; // 300ms by default
+ int64_t latencyUs = 200000ll; // 200ms by default
char val[PROPERTY_VALUE_MAX];
if (property_get("media.wfd-sink.latency", val, NULL)) {
@@ -337,13 +337,18 @@
ALOGI("Assuming %lld ms of latency.", latencyUs / 1000ll);
}
- // We are the timesync _client_,
- // client time = server time - time offset.
- mRenderer->setTimeOffset(-mTimeOffsetUs + mTargetLatencyUs);
-
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ // We are the timesync _client_,
+ // client time = server time - time offset.
+ timeUs += mTargetLatencyUs - mTimeOffsetUs;
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 2861aa9..bb8c387 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -55,6 +55,7 @@
,mInSilentMode(false)
#endif
,mPrevVideoBitrate(-1)
+ ,mNumFramesToDrop(0)
{
AString mime;
CHECK(mInputFormat->findString("mime", &mime));
@@ -327,6 +328,13 @@
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
+ if (mIsVideo && mNumFramesToDrop) {
+ --mNumFramesToDrop;
+ ALOGI("dropping frame.");
+ ReleaseMediaBufferReference(accessUnit);
+ break;
+ }
+
#if 0
void *mbuf;
if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
@@ -422,6 +430,12 @@
break;
}
+ case kWhatDropAFrame:
+ {
+ ++mNumFramesToDrop;
+ break;
+ }
+
default:
TRESPASS();
}
@@ -690,4 +704,8 @@
(new AMessage(kWhatRequestIDRFrame, id()))->post();
}
+void Converter::dropAFrame() {
+ (new AMessage(kWhatDropAFrame, id()))->post();
+}
+
} // namespace android
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 57802bd..a418f69 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -51,6 +51,8 @@
void requestIDRFrame();
+ void dropAFrame();
+
enum {
kWhatAccessUnit,
kWhatEOS,
@@ -63,6 +65,7 @@
kWhatShutdown,
kWhatMediaPullerNotify,
kWhatEncoderActivity,
+ kWhatDropAFrame,
};
void shutdownAsync();
@@ -102,6 +105,8 @@
int32_t mPrevVideoBitrate;
+ int32_t mNumFramesToDrop;
+
status_t initEncoder();
void releaseEncoder();
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index ea195b3..a3b6542 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -39,6 +39,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/NuMediaExtractor.h>
#include <media/stagefright/SurfaceMediaSource.h>
#include <media/stagefright/Utils.h>
@@ -57,6 +58,8 @@
const sp<MediaPuller> &mediaPuller,
const sp<Converter> &converter);
+ Track(const sp<AMessage> ¬ify, const sp<AMessage> &format);
+
void setRepeaterSource(const sp<RepeaterSource> &source);
sp<AMessage> getFormat();
@@ -104,6 +107,7 @@
sp<ALooper> mCodecLooper;
sp<MediaPuller> mMediaPuller;
sp<Converter> mConverter;
+ sp<AMessage> mFormat;
bool mStarted;
ssize_t mMediaSenderTrackIndex;
bool mIsAudio;
@@ -133,6 +137,15 @@
mLastOutputBufferQueuedTimeUs(-1ll) {
}
+WifiDisplaySource::PlaybackSession::Track::Track(
+ const sp<AMessage> ¬ify, const sp<AMessage> &format)
+ : mNotify(notify),
+ mFormat(format),
+ mStarted(false),
+ mIsAudio(IsAudioFormat(format)),
+ mLastOutputBufferQueuedTimeUs(-1ll) {
+}
+
WifiDisplaySource::PlaybackSession::Track::~Track() {
CHECK(!mStarted);
}
@@ -147,7 +160,7 @@
}
sp<AMessage> WifiDisplaySource::PlaybackSession::Track::getFormat() {
- return mConverter->getOutputFormat();
+ return mFormat != NULL ? mFormat : mConverter->getOutputFormat();
}
bool WifiDisplaySource::PlaybackSession::Track::isAudio() const {
@@ -189,7 +202,9 @@
void WifiDisplaySource::PlaybackSession::Track::stopAsync() {
ALOGV("Track::stopAsync isAudio=%d", mIsAudio);
- mConverter->shutdownAsync();
+ if (mConverter != NULL) {
+ mConverter->shutdownAsync();
+ }
sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, id());
@@ -201,6 +216,7 @@
mMediaPuller->stopAsync(msg);
} else {
+ mStarted = false;
msg->post();
}
}
@@ -324,7 +340,8 @@
const sp<ANetworkSession> &netSession,
const sp<AMessage> ¬ify,
const in_addr &interfaceAddr,
- const sp<IHDCP> &hdcp)
+ const sp<IHDCP> &hdcp,
+ const char *path)
: mNetSession(netSession),
mNotify(notify),
mInterfaceAddr(interfaceAddr),
@@ -334,7 +351,14 @@
mPaused(false),
mLastLifesignUs(),
mVideoTrackIndex(-1),
- mPrevTimeUs(-1ll) {
+ mPrevTimeUs(-1ll),
+ mPullExtractorPending(false),
+ mPullExtractorGeneration(0),
+ mFirstSampleTimeRealUs(-1ll),
+ mFirstSampleTimeUs(-1ll) {
+ if (path != NULL) {
+ mMediaPath.setTo(path);
+ }
}
status_t WifiDisplaySource::PlaybackSession::init(
@@ -405,10 +429,6 @@
return OK;
}
-status_t WifiDisplaySource::PlaybackSession::finishPlay() {
- return OK;
-}
-
status_t WifiDisplaySource::PlaybackSession::onMediaSenderInitialized() {
for (size_t i = 0; i < mTracks.size(); ++i) {
CHECK_EQ((status_t)OK, mTracks.editValueAt(i)->start());
@@ -515,6 +535,19 @@
}
} else if (what == MediaSender::kWhatError) {
notifySessionDead();
+ } else if (what == MediaSender::kWhatNetworkStall) {
+ size_t numBytesQueued;
+ CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
+
+ if (mVideoTrackIndex >= 0) {
+ const sp<Track> &videoTrack =
+ mTracks.valueFor(mVideoTrackIndex);
+
+ sp<Converter> converter = videoTrack->converter();
+ if (converter != NULL) {
+ converter->dropAFrame();
+ }
+ }
} else {
TRESPASS();
}
@@ -554,6 +587,12 @@
case kWhatPause:
{
+ if (mExtractor != NULL) {
+ ++mPullExtractorGeneration;
+ mFirstSampleTimeRealUs = -1ll;
+ mFirstSampleTimeUs = -1ll;
+ }
+
if (mPaused) {
break;
}
@@ -568,6 +607,10 @@
case kWhatResume:
{
+ if (mExtractor != NULL) {
+ schedulePullExtractor();
+ }
+
if (!mPaused) {
break;
}
@@ -580,11 +623,152 @@
break;
}
+ case kWhatPullExtractorSample:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mPullExtractorGeneration) {
+ break;
+ }
+
+ mPullExtractorPending = false;
+
+ onPullExtractor();
+ break;
+ }
+
default:
TRESPASS();
}
}
+status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
+ bool enableAudio, bool enableVideo) {
+ DataSource::RegisterDefaultSniffers();
+
+ mExtractor = new NuMediaExtractor;
+
+ status_t err = mExtractor->setDataSource(mMediaPath.c_str());
+
+ if (err != OK) {
+ return err;
+ }
+
+ size_t n = mExtractor->countTracks();
+ bool haveAudio = false;
+ bool haveVideo = false;
+ for (size_t i = 0; i < n; ++i) {
+ sp<AMessage> format;
+ err = mExtractor->getTrackFormat(i, &format);
+
+ if (err != OK) {
+ continue;
+ }
+
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
+ bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6);
+ bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+
+ if (isAudio && enableAudio && !haveAudio) {
+ haveAudio = true;
+ } else if (isVideo && enableVideo && !haveVideo) {
+ haveVideo = true;
+ } else {
+ continue;
+ }
+
+ err = mExtractor->selectTrack(i);
+
+ size_t trackIndex = mTracks.size();
+
+ sp<AMessage> notify = new AMessage(kWhatTrackNotify, id());
+ notify->setSize("trackIndex", trackIndex);
+
+ sp<Track> track = new Track(notify, format);
+ looper()->registerHandler(track);
+
+ mTracks.add(trackIndex, track);
+
+ mExtractorTrackToInternalTrack.add(i, trackIndex);
+
+ if (isVideo) {
+ mVideoTrackIndex = trackIndex;
+ }
+
+ uint32_t flags = MediaSender::FLAG_MANUALLY_PREPEND_SPS_PPS;
+
+ ssize_t mediaSenderTrackIndex =
+ mMediaSender->addTrack(format, flags);
+ CHECK_GE(mediaSenderTrackIndex, 0);
+
+ track->setMediaSenderTrackIndex(mediaSenderTrackIndex);
+
+ if ((haveAudio || !enableAudio) && (haveVideo || !enableVideo)) {
+ break;
+ }
+ }
+
+ return OK;
+}
+
+void WifiDisplaySource::PlaybackSession::schedulePullExtractor() {
+ if (mPullExtractorPending) {
+ return;
+ }
+
+ int64_t sampleTimeUs;
+ status_t err = mExtractor->getSampleTime(&sampleTimeUs);
+
+ int64_t nowUs = ALooper::GetNowUs();
+
+ if (mFirstSampleTimeRealUs < 0ll) {
+ mFirstSampleTimeRealUs = nowUs;
+ mFirstSampleTimeUs = sampleTimeUs;
+ }
+
+ int64_t whenUs = sampleTimeUs - mFirstSampleTimeUs + mFirstSampleTimeRealUs;
+
+ sp<AMessage> msg = new AMessage(kWhatPullExtractorSample, id());
+ msg->setInt32("generation", mPullExtractorGeneration);
+ msg->post(whenUs - nowUs);
+
+ mPullExtractorPending = true;
+}
+
+void WifiDisplaySource::PlaybackSession::onPullExtractor() {
+ sp<ABuffer> accessUnit = new ABuffer(1024 * 1024);
+ status_t err = mExtractor->readSampleData(accessUnit);
+ if (err != OK) {
+ // EOS.
+ return;
+ }
+
+ int64_t timeUs;
+ CHECK_EQ((status_t)OK, mExtractor->getSampleTime(&timeUs));
+
+ accessUnit->meta()->setInt64(
+ "timeUs", mFirstSampleTimeRealUs + timeUs - mFirstSampleTimeUs);
+
+ size_t trackIndex;
+ CHECK_EQ((status_t)OK, mExtractor->getSampleTrackIndex(&trackIndex));
+
+ sp<AMessage> msg = new AMessage(kWhatConverterNotify, id());
+
+ msg->setSize(
+ "trackIndex", mExtractorTrackToInternalTrack.valueFor(trackIndex));
+
+ msg->setInt32("what", Converter::kWhatAccessUnit);
+ msg->setBuffer("accessUnit", accessUnit);
+ msg->post();
+
+ mExtractor->advance();
+
+ schedulePullExtractor();
+}
+
status_t WifiDisplaySource::PlaybackSession::setupPacketizer(
bool enableAudio,
bool usePCMAudio,
@@ -593,6 +777,10 @@
size_t videoResolutionIndex) {
CHECK(enableAudio || enableVideo);
+ if (!mMediaPath.empty()) {
+ return setupMediaPacketizer(enableAudio, enableVideo);
+ }
+
if (enableVideo) {
status_t err = addVideoSource(
videoResolutionType, videoResolutionIndex);
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index cd6da85..da207e2 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -31,6 +31,7 @@
struct MediaPuller;
struct MediaSource;
struct MediaSender;
+struct NuMediaExtractor;
// Encapsulates the state of an RTP/RTCP session in the context of wifi
// display.
@@ -39,7 +40,8 @@
const sp<ANetworkSession> &netSession,
const sp<AMessage> ¬ify,
const struct in_addr &interfaceAddr,
- const sp<IHDCP> &hdcp);
+ const sp<IHDCP> &hdcp,
+ const char *path = NULL);
status_t init(
const char *clientIP, int32_t clientRtp, int32_t clientRtcp,
@@ -87,12 +89,14 @@
kWhatPause,
kWhatResume,
kWhatMediaSenderNotify,
+ kWhatPullExtractorSample,
};
sp<ANetworkSession> mNetSession;
sp<AMessage> mNotify;
in_addr mInterfaceAddr;
sp<IHDCP> mHDCP;
+ AString mMediaPath;
sp<MediaSender> mMediaSender;
int32_t mLocalRTPPort;
@@ -109,6 +113,15 @@
int64_t mPrevTimeUs;
+ sp<NuMediaExtractor> mExtractor;
+ KeyedVector<size_t, size_t> mExtractorTrackToInternalTrack;
+ bool mPullExtractorPending;
+ int32_t mPullExtractorGeneration;
+ int64_t mFirstSampleTimeRealUs;
+ int64_t mFirstSampleTimeUs;
+
+ status_t setupMediaPacketizer(bool enableAudio, bool enableVideo);
+
status_t setupPacketizer(
bool enableAudio,
bool usePCMAudio,
@@ -133,6 +146,9 @@
void notifySessionDead();
+ void schedulePullExtractor();
+ void onPullExtractor();
+
DISALLOW_EVIL_CONSTRUCTORS(PlaybackSession);
};
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
index 53b7187..d993764 100644
--- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp
+++ b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
@@ -261,7 +261,7 @@
data[0] = 40; // descriptor_tag
data[1] = 4; // descriptor_length
- CHECK_EQ(mCSD.size(), 1u);
+ CHECK_GE(mCSD.size(), 1u);
const sp<ABuffer> &sps = mCSD.itemAt(0);
CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
CHECK_GE(sps->size(), 7u);
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index de66bde..5167cb3 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -44,7 +44,8 @@
WifiDisplaySource::WifiDisplaySource(
const sp<ANetworkSession> &netSession,
- const sp<IRemoteDisplayClient> &client)
+ const sp<IRemoteDisplayClient> &client,
+ const char *path)
: mState(INITIALIZED),
mNetSession(netSession),
mClient(client),
@@ -59,7 +60,12 @@
mIsHDCP2_0(false),
mHDCPPort(0),
mHDCPInitializationComplete(false),
- mSetupTriggerDeferred(false) {
+ mSetupTriggerDeferred(false),
+ mPlaybackSessionEstablished(false) {
+ if (path != NULL) {
+ mMediaPath.setTo(path);
+ }
+
mSupportedSourceVideoFormats.disableAll();
mSupportedSourceVideoFormats.setNativeResolution(
@@ -272,6 +278,11 @@
break;
}
+ case ANetworkSession::kWhatNetworkStall:
+ {
+ break;
+ }
+
default:
TRESPASS();
}
@@ -384,6 +395,8 @@
mClient->onDisplayError(
IRemoteDisplayClient::kDisplayErrorUnknown);
} else if (what == PlaybackSession::kWhatSessionEstablished) {
+ mPlaybackSessionEstablished = true;
+
if (mClient != NULL) {
if (!mSinkSupportsVideo) {
mClient->onDisplayConnected(
@@ -414,6 +427,8 @@
}
}
+ finishPlay();
+
if (mState == ABOUT_TO_PLAY) {
mState = PLAYING;
}
@@ -1217,7 +1232,7 @@
sp<PlaybackSession> playbackSession =
new PlaybackSession(
- mNetSession, notify, mInterfaceAddr, mHDCP);
+ mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
looper()->registerHandler(playbackSession);
@@ -1327,16 +1342,18 @@
}
ALOGI("Received PLAY request.");
-
- status_t err = playbackSession->play();
- CHECK_EQ(err, (status_t)OK);
+ if (mPlaybackSessionEstablished) {
+ finishPlay();
+ } else {
+ ALOGI("deferring PLAY request until session established.");
+ }
AString response = "RTSP/1.0 200 OK\r\n";
AppendCommonResponse(&response, cseq, playbackSessionID);
response.append("Range: npt=now-\r\n");
response.append("\r\n");
- err = mNetSession->sendRequest(sessionID, response.c_str());
+ status_t err = mNetSession->sendRequest(sessionID, response.c_str());
if (err != OK) {
return err;
@@ -1347,14 +1364,20 @@
return OK;
}
- playbackSession->finishPlay();
-
CHECK_EQ(mState, AWAITING_CLIENT_PLAY);
mState = ABOUT_TO_PLAY;
return OK;
}
+void WifiDisplaySource::finishPlay() {
+ const sp<PlaybackSession> &playbackSession =
+ mClientInfo.mPlaybackSession;
+
+ status_t err = playbackSession->play();
+ CHECK_EQ(err, (status_t)OK);
+}
+
status_t WifiDisplaySource::onPauseRequest(
int32_t sessionID,
int32_t cseq,
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 9e72682..3a1b0f9 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -39,7 +39,8 @@
WifiDisplaySource(
const sp<ANetworkSession> &netSession,
- const sp<IRemoteDisplayClient> &client);
+ const sp<IRemoteDisplayClient> &client,
+ const char *path = NULL);
status_t start(const char *iface);
status_t stop();
@@ -116,6 +117,7 @@
VideoFormats mSupportedSourceVideoFormats;
sp<ANetworkSession> mNetSession;
sp<IRemoteDisplayClient> mClient;
+ AString mMediaPath;
sp<TimeSyncer> mTimeSyncer;
struct in_addr mInterfaceAddr;
int32_t mSessionID;
@@ -161,6 +163,8 @@
bool mHDCPInitializationComplete;
bool mSetupTriggerDeferred;
+ bool mPlaybackSessionEstablished;
+
status_t makeHDCP();
// <<<< HDCP specific section
@@ -257,6 +261,8 @@
void finishStopAfterDisconnectingClient();
void finishStop2();
+ void finishPlay();
+
DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySource);
};
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index 0b18484..4f7dcc8 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -42,6 +42,7 @@
" %s -c host[:port]\tconnect to wifi source\n"
" -u uri \tconnect to an rtsp uri\n"
" -l ip[:port] \tlisten on the specified port "
+ " -f(ilename) \tstream media "
"(create a sink)\n",
me);
}
@@ -93,22 +94,24 @@
ALOGI("onDisplayConnected width=%u, height=%u, flags = 0x%08x",
width, height, flags);
- mSurfaceTexture = bufferProducer;
- mDisplayBinder = mComposerClient->createDisplay(
- String8("foo"), false /* secure */);
+ if (bufferProducer != NULL) {
+ mSurfaceTexture = bufferProducer;
+ mDisplayBinder = mComposerClient->createDisplay(
+ String8("foo"), false /* secure */);
- SurfaceComposerClient::openGlobalTransaction();
- mComposerClient->setDisplaySurface(mDisplayBinder, mSurfaceTexture);
+ SurfaceComposerClient::openGlobalTransaction();
+ mComposerClient->setDisplaySurface(mDisplayBinder, mSurfaceTexture);
- Rect layerStackRect(1280, 720); // XXX fix this.
- Rect displayRect(1280, 720);
+ Rect layerStackRect(1280, 720); // XXX fix this.
+ Rect displayRect(1280, 720);
- mComposerClient->setDisplayProjection(
- mDisplayBinder, 0 /* 0 degree rotation */,
- layerStackRect,
- displayRect);
+ mComposerClient->setDisplayProjection(
+ mDisplayBinder, 0 /* 0 degree rotation */,
+ layerStackRect,
+ displayRect);
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::closeGlobalTransaction();
+ }
}
void RemoteDisplayClient::onDisplayDisconnected() {
@@ -181,6 +184,26 @@
enableAudioSubmix(false /* enable */);
}
+static void createFileSource(
+ const AString &addr, int32_t port, const char *path) {
+ sp<ANetworkSession> session = new ANetworkSession;
+ session->start();
+
+ sp<ALooper> looper = new ALooper;
+ looper->start();
+
+ sp<RemoteDisplayClient> client = new RemoteDisplayClient;
+ sp<WifiDisplaySource> source = new WifiDisplaySource(session, client, path);
+ looper->registerHandler(source);
+
+ AString iface = StringPrintf("%s:%d", addr.c_str(), port);
+ CHECK_EQ((status_t)OK, source->start(iface.c_str()));
+
+ client->waitUntilDone();
+
+ source->stop();
+}
+
} // namespace android
int main(int argc, char **argv) {
@@ -197,8 +220,10 @@
AString listenOnAddr;
int32_t listenOnPort = -1;
+ AString path;
+
int res;
- while ((res = getopt(argc, argv, "hc:l:u:")) >= 0) {
+ while ((res = getopt(argc, argv, "hc:l:u:f:")) >= 0) {
switch (res) {
case 'c':
{
@@ -228,6 +253,12 @@
break;
}
+ case 'f':
+ {
+ path = optarg;
+ break;
+ }
+
case 'l':
{
const char *colonPos = strrchr(optarg, ':');
@@ -266,7 +297,12 @@
}
if (listenOnPort >= 0) {
- createSource(listenOnAddr, listenOnPort);
+ if (path.empty()) {
+ createSource(listenOnAddr, listenOnPort);
+ } else {
+ createFileSource(listenOnAddr, listenOnPort, path.c_str());
+ }
+
exit(0);
}
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index 37ba5ae..946cdba 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -426,7 +426,7 @@
totalTime += kSleepTime;
if (totalTime > kMaxSleepTime) {
ALOGE("%s: Waited %d us, %d requests still in flight", __FUNCTION__,
- mHal2Device->ops->get_in_progress_count(mHal2Device), totalTime);
+ totalTime, mHal2Device->ops->get_in_progress_count(mHal2Device));
return TIMED_OUT;
}
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 7636143..5a6a3c8 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -277,55 +277,61 @@
sp<Client> client;
- Mutex::Autolock lock(mServiceLock);
- if (!canConnectUnsafe(cameraId, clientPackageName,
- cameraClient->asBinder(),
- /*out*/client)) {
- return NULL;
- } else if (client.get() != NULL) {
- return client;
+ {
+ Mutex::Autolock lock(mServiceLock);
+ if (!canConnectUnsafe(cameraId, clientPackageName,
+ cameraClient->asBinder(),
+ /*out*/client)) {
+ return NULL;
+ } else if (client.get() != NULL) {
+ return client;
+ }
+
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ // If there are other non-exclusive users of the camera,
+ // this will tear them down before we can reuse the camera
+ if (isValidCameraId(cameraId)) {
+ updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
+ cameraId);
+ }
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ client = new CameraClient(this, cameraClient,
+ clientPackageName, cameraId,
+ facing, callingPid, clientUid, getpid());
+ break;
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ client = new Camera2Client(this, cameraClient,
+ clientPackageName, cameraId,
+ facing, callingPid, clientUid, getpid(),
+ deviceVersion);
+ break;
+ case -1:
+ ALOGE("Invalid camera id %d", cameraId);
+ return NULL;
+ default:
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+ return NULL;
+ }
+
+ if (!connectFinishUnsafe(client, client->asBinder())) {
+ // this is probably not recoverable.. maybe the client can try again
+ updateStatus(ICameraServiceListener::STATUS_AVAILABLE, cameraId);
+
+ return NULL;
+ }
+
+ mClient[cameraId] = client;
+ LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId,
+ getpid());
}
-
- int facing = -1;
- int deviceVersion = getDeviceVersion(cameraId, &facing);
-
- // If there are other non-exclusive users of the camera,
- // this will tear them down before we can reuse the camera
- if (isValidCameraId(cameraId)) {
- updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE, cameraId);
- }
-
- switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- client = new CameraClient(this, cameraClient,
- clientPackageName, cameraId,
- facing, callingPid, clientUid, getpid());
- break;
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
- case CAMERA_DEVICE_API_VERSION_3_0:
- client = new Camera2Client(this, cameraClient,
- clientPackageName, cameraId,
- facing, callingPid, clientUid, getpid(),
- deviceVersion);
- break;
- case -1:
- ALOGE("Invalid camera id %d", cameraId);
- return NULL;
- default:
- ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return NULL;
- }
-
- if (!connectFinishUnsafe(client, client->asBinder())) {
- // this is probably not recoverable.. but maybe the client can try again
- updateStatus(ICameraServiceListener::STATUS_AVAILABLE, cameraId);
-
- return NULL;
- }
-
- mClient[cameraId] = client;
- LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid());
+ // important: release the mutex here so the client can call back
+ // into the service from its destructor (can be at the end of the call)
return client;
}
@@ -357,47 +363,51 @@
return NULL;
}
- Mutex::Autolock lock(mServiceLock);
+ sp<ProClient> client;
{
- sp<Client> client;
- if (!canConnectUnsafe(cameraId, clientPackageName,
- cameraCb->asBinder(),
- /*out*/client)) {
+ Mutex::Autolock lock(mServiceLock);
+ {
+ sp<Client> client;
+ if (!canConnectUnsafe(cameraId, clientPackageName,
+ cameraCb->asBinder(),
+ /*out*/client)) {
+ return NULL;
+ }
+ }
+
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ ALOGE("Camera id %d uses HALv1, doesn't support ProCamera",
+ cameraId);
+ return NULL;
+ break;
+ case CAMERA_DEVICE_API_VERSION_2_0:
+ case CAMERA_DEVICE_API_VERSION_2_1:
+ client = new ProCamera2Client(this, cameraCb, String16(),
+ cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+ break;
+ case -1:
+ ALOGE("Invalid camera id %d", cameraId);
+ return NULL;
+ default:
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
return NULL;
}
+
+ if (!connectFinishUnsafe(client, client->asBinder())) {
+ return NULL;
+ }
+
+ mProClientList[cameraId].push(client);
+
+ LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId,
+ getpid());
}
-
- sp<ProClient> client;
-
- int facing = -1;
- int deviceVersion = getDeviceVersion(cameraId, &facing);
-
- switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- ALOGE("Camera id %d uses HALv1, doesn't support ProCamera", cameraId);
- return NULL;
- break;
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
- client = new ProCamera2Client(this, cameraCb, String16(),
- cameraId, facing, callingPid, USE_CALLING_UID, getpid());
- break;
- case -1:
- ALOGE("Invalid camera id %d", cameraId);
- return NULL;
- default:
- ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return NULL;
- }
-
- if (!connectFinishUnsafe(client, client->asBinder())) {
- return NULL;
- }
-
- mProClientList[cameraId].push(client);
-
- LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId,
- getpid());
+ // important: release the mutex here so the client can call back
+ // into the service from its destructor (can be at the end of the call)
return client;
}
diff --git a/services/camera/tests/CameraServiceTest/Android.mk b/services/camera/tests/CameraServiceTest/Android.mk
deleted file mode 100644
index 41b6f63..0000000
--- a/services/camera/tests/CameraServiceTest/Android.mk
+++ /dev/null
@@ -1,26 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= CameraServiceTest.cpp
-
-LOCAL_MODULE:= CameraServiceTest
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_C_INCLUDES += \
- frameworks/av/libs
-
-LOCAL_CFLAGS :=
-
-LOCAL_SHARED_LIBRARIES += \
- libbinder \
- libcutils \
- libutils \
- libui \
- libcamera_client \
- libgui
-
-# Disable it because the ISurface interface may change, and before we have a
-# chance to fix this test, we don't want to break normal builds.
-#include $(BUILD_EXECUTABLE)
diff --git a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
deleted file mode 100644
index e417b79..0000000
--- a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
+++ /dev/null
@@ -1,924 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "CameraServiceTest"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <camera/Camera.h>
-#include <camera/CameraParameters.h>
-#include <ui/GraphicBuffer.h>
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/ICameraService.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/ProcessState.h>
-#include <utils/KeyedVector.h>
-#include <utils/Log.h>
-#include <utils/Vector.h>
-#include <utils/threads.h>
-
-using namespace android;
-
-//
-// Assertion and Logging utilities
-//
-#define INFO(...) \
- do { \
- printf(__VA_ARGS__); \
- printf("\n"); \
- ALOGD(__VA_ARGS__); \
- } while(0)
-
-void assert_fail(const char *file, int line, const char *func, const char *expr) {
- INFO("assertion failed at file %s, line %d, function %s:",
- file, line, func);
- INFO("%s", expr);
- abort();
-}
-
-void assert_eq_fail(const char *file, int line, const char *func,
- const char *expr, int actual) {
- INFO("assertion failed at file %s, line %d, function %s:",
- file, line, func);
- INFO("(expected) %s != (actual) %d", expr, actual);
- abort();
-}
-
-#define ASSERT(e) \
- do { \
- if (!(e)) \
- assert_fail(__FILE__, __LINE__, __func__, #e); \
- } while(0)
-
-#define ASSERT_EQ(expected, actual) \
- do { \
- int _x = (actual); \
- if (_x != (expected)) \
- assert_eq_fail(__FILE__, __LINE__, __func__, #expected, _x); \
- } while(0)
-
-//
-// Holder service for pass objects between processes.
-//
-class IHolder : public IInterface {
-protected:
- enum {
- HOLDER_PUT = IBinder::FIRST_CALL_TRANSACTION,
- HOLDER_GET,
- HOLDER_CLEAR
- };
-public:
- DECLARE_META_INTERFACE(Holder);
-
- virtual void put(sp<IBinder> obj) = 0;
- virtual sp<IBinder> get() = 0;
- virtual void clear() = 0;
-};
-
-class BnHolder : public BnInterface<IHolder> {
- virtual status_t onTransact(uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-class BpHolder : public BpInterface<IHolder> {
-public:
- BpHolder(const sp<IBinder>& impl)
- : BpInterface<IHolder>(impl) {
- }
-
- virtual void put(sp<IBinder> obj) {
- Parcel data, reply;
- data.writeStrongBinder(obj);
- remote()->transact(HOLDER_PUT, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
- virtual sp<IBinder> get() {
- Parcel data, reply;
- remote()->transact(HOLDER_GET, data, &reply);
- return reply.readStrongBinder();
- }
-
- virtual void clear() {
- Parcel data, reply;
- remote()->transact(HOLDER_CLEAR, data, &reply);
- }
-};
-
-IMPLEMENT_META_INTERFACE(Holder, "CameraServiceTest.Holder");
-
-status_t BnHolder::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
- switch(code) {
- case HOLDER_PUT: {
- put(data.readStrongBinder());
- return NO_ERROR;
- } break;
- case HOLDER_GET: {
- reply->writeStrongBinder(get());
- return NO_ERROR;
- } break;
- case HOLDER_CLEAR: {
- clear();
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-class HolderService : public BnHolder {
- virtual void put(sp<IBinder> obj) {
- mObj = obj;
- }
- virtual sp<IBinder> get() {
- return mObj;
- }
- virtual void clear() {
- mObj.clear();
- }
-private:
- sp<IBinder> mObj;
-};
-
-//
-// A mock CameraClient
-//
-class MCameraClient : public BnCameraClient {
-public:
- virtual void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2);
- virtual void dataCallback(int32_t msgType, const sp<IMemory>& data);
- virtual void dataCallbackTimestamp(nsecs_t timestamp,
- int32_t msgType, const sp<IMemory>& data);
-
- // new functions
- void clearStat();
- enum OP { EQ, GE, LE, GT, LT };
- void assertNotify(int32_t msgType, OP op, int count);
- void assertData(int32_t msgType, OP op, int count);
- void waitNotify(int32_t msgType, OP op, int count);
- void waitData(int32_t msgType, OP op, int count);
- void assertDataSize(int32_t msgType, OP op, int dataSize);
-
- void setReleaser(ICamera *releaser) {
- mReleaser = releaser;
- }
-private:
- Mutex mLock;
- Condition mCond;
- DefaultKeyedVector<int32_t, int> mNotifyCount;
- DefaultKeyedVector<int32_t, int> mDataCount;
- DefaultKeyedVector<int32_t, int> mDataSize;
- bool test(OP op, int v1, int v2);
- void assertTest(OP op, int v1, int v2);
-
- ICamera *mReleaser;
-};
-
-void MCameraClient::clearStat() {
- Mutex::Autolock _l(mLock);
- mNotifyCount.clear();
- mDataCount.clear();
- mDataSize.clear();
-}
-
-bool MCameraClient::test(OP op, int v1, int v2) {
- switch (op) {
- case EQ: return v1 == v2;
- case GT: return v1 > v2;
- case LT: return v1 < v2;
- case GE: return v1 >= v2;
- case LE: return v1 <= v2;
- default: ASSERT(0); break;
- }
- return false;
-}
-
-void MCameraClient::assertTest(OP op, int v1, int v2) {
- if (!test(op, v1, v2)) {
- ALOGE("assertTest failed: op=%d, v1=%d, v2=%d", op, v1, v2);
- ASSERT(0);
- }
-}
-
-void MCameraClient::assertNotify(int32_t msgType, OP op, int count) {
- Mutex::Autolock _l(mLock);
- int v = mNotifyCount.valueFor(msgType);
- assertTest(op, v, count);
-}
-
-void MCameraClient::assertData(int32_t msgType, OP op, int count) {
- Mutex::Autolock _l(mLock);
- int v = mDataCount.valueFor(msgType);
- assertTest(op, v, count);
-}
-
-void MCameraClient::assertDataSize(int32_t msgType, OP op, int dataSize) {
- Mutex::Autolock _l(mLock);
- int v = mDataSize.valueFor(msgType);
- assertTest(op, v, dataSize);
-}
-
-void MCameraClient::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2) {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ssize_t i = mNotifyCount.indexOfKey(msgType);
- if (i < 0) {
- mNotifyCount.add(msgType, 1);
- } else {
- ++mNotifyCount.editValueAt(i);
- }
- mCond.signal();
-}
-
-void MCameraClient::dataCallback(int32_t msgType, const sp<IMemory>& data) {
- INFO("%s", __func__);
- int dataSize = data->size();
- INFO("data type = %d, size = %d", msgType, dataSize);
- Mutex::Autolock _l(mLock);
- ssize_t i = mDataCount.indexOfKey(msgType);
- if (i < 0) {
- mDataCount.add(msgType, 1);
- mDataSize.add(msgType, dataSize);
- } else {
- ++mDataCount.editValueAt(i);
- mDataSize.editValueAt(i) = dataSize;
- }
- mCond.signal();
-
- if (msgType == CAMERA_MSG_VIDEO_FRAME) {
- ASSERT(mReleaser != NULL);
- mReleaser->releaseRecordingFrame(data);
- }
-}
-
-void MCameraClient::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType,
- const sp<IMemory>& data) {
- dataCallback(msgType, data);
-}
-
-void MCameraClient::waitNotify(int32_t msgType, OP op, int count) {
- INFO("waitNotify: %d, %d, %d", msgType, op, count);
- Mutex::Autolock _l(mLock);
- while (true) {
- int v = mNotifyCount.valueFor(msgType);
- if (test(op, v, count)) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-void MCameraClient::waitData(int32_t msgType, OP op, int count) {
- INFO("waitData: %d, %d, %d", msgType, op, count);
- Mutex::Autolock _l(mLock);
- while (true) {
- int v = mDataCount.valueFor(msgType);
- if (test(op, v, count)) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-//
-// A mock Surface
-//
-class MSurface : public BnSurface {
-public:
- virtual status_t registerBuffers(const BufferHeap& buffers);
- virtual void postBuffer(ssize_t offset);
- virtual void unregisterBuffers();
- virtual sp<GraphicBuffer> requestBuffer(int bufferIdx, int usage);
- virtual status_t setBufferCount(int bufferCount);
-
- // new functions
- void clearStat();
- void waitUntil(int c0, int c1, int c2);
-
-private:
- // check callback count
- Condition mCond;
- Mutex mLock;
- int registerBuffersCount;
- int postBufferCount;
- int unregisterBuffersCount;
-};
-
-status_t MSurface::registerBuffers(const BufferHeap& buffers) {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++registerBuffersCount;
- mCond.signal();
- return NO_ERROR;
-}
-
-void MSurface::postBuffer(ssize_t offset) {
- // INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++postBufferCount;
- mCond.signal();
-}
-
-void MSurface::unregisterBuffers() {
- INFO("%s", __func__);
- Mutex::Autolock _l(mLock);
- ++unregisterBuffersCount;
- mCond.signal();
-}
-
-sp<GraphicBuffer> MSurface::requestBuffer(int bufferIdx, int usage) {
- INFO("%s", __func__);
- return NULL;
-}
-
-status_t MSurface::setBufferCount(int bufferCount) {
- INFO("%s", __func__);
- return NULL;
-}
-
-void MSurface::clearStat() {
- Mutex::Autolock _l(mLock);
- registerBuffersCount = 0;
- postBufferCount = 0;
- unregisterBuffersCount = 0;
-}
-
-void MSurface::waitUntil(int c0, int c1, int c2) {
- INFO("waitUntil: %d %d %d", c0, c1, c2);
- Mutex::Autolock _l(mLock);
- while (true) {
- if (registerBuffersCount >= c0 &&
- postBufferCount >= c1 &&
- unregisterBuffersCount >= c2) {
- break;
- }
- mCond.wait(mLock);
- }
-}
-
-//
-// Utilities to use the Holder service
-//
-sp<IHolder> getHolder() {
- sp<IServiceManager> sm = defaultServiceManager();
- ASSERT(sm != 0);
- sp<IBinder> binder = sm->getService(String16("CameraServiceTest.Holder"));
- ASSERT(binder != 0);
- sp<IHolder> holder = interface_cast<IHolder>(binder);
- ASSERT(holder != 0);
- return holder;
-}
-
-void putTempObject(sp<IBinder> obj) {
- INFO("%s", __func__);
- getHolder()->put(obj);
-}
-
-sp<IBinder> getTempObject() {
- INFO("%s", __func__);
- return getHolder()->get();
-}
-
-void clearTempObject() {
- INFO("%s", __func__);
- getHolder()->clear();
-}
-
-//
-// Get a Camera Service
-//
-sp<ICameraService> getCameraService() {
- sp<IServiceManager> sm = defaultServiceManager();
- ASSERT(sm != 0);
- sp<IBinder> binder = sm->getService(String16("media.camera"));
- ASSERT(binder != 0);
- sp<ICameraService> cs = interface_cast<ICameraService>(binder);
- ASSERT(cs != 0);
- return cs;
-}
-
-int getNumberOfCameras() {
- sp<ICameraService> cs = getCameraService();
- return cs->getNumberOfCameras();
-}
-
-//
-// Various Connect Tests
-//
-void testConnect(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- c->disconnect();
-}
-
-void testAllowConnectOnceOnly(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- // Connect the first client.
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Same client -- ok.
- ASSERT(cs->connect(cc, cameraId) != 0);
- // Different client -- not ok.
- sp<MCameraClient> cc2 = new MCameraClient();
- ASSERT(cs->connect(cc2, cameraId) == 0);
- c->disconnect();
-}
-
-void testReconnectFailed() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- sp<MCameraClient> cc = new MCameraClient();
- ASSERT(c->connect(cc) != NO_ERROR);
-}
-
-void testReconnectSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- sp<MCameraClient> cc = new MCameraClient();
- ASSERT(c->connect(cc) == NO_ERROR);
- c->disconnect();
-}
-
-void testLockFailed() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() != NO_ERROR);
-}
-
-void testLockUnlockSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() == NO_ERROR);
- ASSERT(c->unlock() == NO_ERROR);
-}
-
-void testLockSuccess() {
- INFO("%s", __func__);
- sp<ICamera> c = interface_cast<ICamera>(getTempObject());
- ASSERT(c->lock() == NO_ERROR);
- c->disconnect();
-}
-
-//
-// Run the connect tests in another process.
-//
-const char *gExecutable;
-
-struct FunctionTableEntry {
- const char *name;
- void (*func)();
-};
-
-FunctionTableEntry function_table[] = {
-#define ENTRY(x) {#x, &x}
- ENTRY(testReconnectFailed),
- ENTRY(testReconnectSuccess),
- ENTRY(testLockUnlockSuccess),
- ENTRY(testLockFailed),
- ENTRY(testLockSuccess),
-#undef ENTRY
-};
-
-void runFunction(const char *tag) {
- INFO("runFunction: %s", tag);
- int entries = sizeof(function_table) / sizeof(function_table[0]);
- for (int i = 0; i < entries; i++) {
- if (strcmp(function_table[i].name, tag) == 0) {
- (*function_table[i].func)();
- return;
- }
- }
- ASSERT(0);
-}
-
-void runInAnotherProcess(const char *tag) {
- pid_t pid = fork();
- if (pid == 0) {
- execlp(gExecutable, gExecutable, tag, NULL);
- ASSERT(0);
- } else {
- int status;
- ASSERT_EQ(pid, wait(&status));
- ASSERT_EQ(0, status);
- }
-}
-
-void testReconnect(int cameraId) {
- INFO("%s", __func__);
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Reconnect to the same client -- ok.
- ASSERT(c->connect(cc) == NO_ERROR);
- // Reconnect to a different client (but the same pid) -- ok.
- sp<MCameraClient> cc2 = new MCameraClient();
- ASSERT(c->connect(cc2) == NO_ERROR);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
-}
-
-void testLockUnlock(int cameraId) {
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // We can lock as many times as we want.
- ASSERT(c->lock() == NO_ERROR);
- ASSERT(c->lock() == NO_ERROR);
- // Lock from a different process -- not ok.
- putTempObject(c->asBinder());
- runInAnotherProcess("testLockFailed");
- // Unlock then lock from a different process -- ok.
- ASSERT(c->unlock() == NO_ERROR);
- runInAnotherProcess("testLockUnlockSuccess");
- // Unlock then lock from a different process -- ok.
- runInAnotherProcess("testLockSuccess");
- clearTempObject();
-}
-
-void testReconnectFromAnotherProcess(int cameraId) {
- INFO("%s", __func__);
-
- sp<ICameraService> cs = getCameraService();
- sp<MCameraClient> cc = new MCameraClient();
- sp<ICamera> c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- // Reconnect from a different process -- not ok.
- putTempObject(c->asBinder());
- runInAnotherProcess("testReconnectFailed");
- // Unlock then reconnect from a different process -- ok.
- ASSERT(c->unlock() == NO_ERROR);
- runInAnotherProcess("testReconnectSuccess");
- clearTempObject();
-}
-
-// We need to flush the command buffer after the reference
-// to ICamera is gone. The sleep is for the server to run
-// the destructor for it.
-static void flushCommands() {
- IPCThreadState::self()->flushCommands();
- usleep(200000); // 200ms
-}
-
-// Run a test case
-#define RUN(class_name, cameraId) do { \
- { \
- INFO(#class_name); \
- class_name instance; \
- instance.init(cameraId); \
- instance.run(); \
- } \
- flushCommands(); \
-} while(0)
-
-// Base test case after the the camera is connected.
-class AfterConnect {
-public:
- void init(int cameraId) {
- cs = getCameraService();
- cc = new MCameraClient();
- c = cs->connect(cc, cameraId);
- ASSERT(c != 0);
- }
-
-protected:
- sp<ICameraService> cs;
- sp<MCameraClient> cc;
- sp<ICamera> c;
-
- ~AfterConnect() {
- c->disconnect();
- c.clear();
- cc.clear();
- cs.clear();
- }
-};
-
-class TestSetPreviewDisplay : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStartPreview : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
-
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
-
- surface->waitUntil(1, 10, 0); // needs 1 registerBuffers and 10 postBuffer
- surface->clearStat();
-
- sp<MSurface> another_surface = new MSurface();
- c->setPreviewDisplay(another_surface); // just to make sure unregisterBuffers
- // is called.
- surface->waitUntil(0, 0, 1); // needs unregisterBuffers
-
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStartPreviewWithoutDisplay : public AfterConnect {
-public:
- void run() {
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-// Base test case after the the camera is connected and the preview is started.
-class AfterStartPreview : public AfterConnect {
-public:
- void init(int cameraId) {
- AfterConnect::init(cameraId);
- surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- ASSERT(c->startPreview() == NO_ERROR);
- }
-
-protected:
- sp<MSurface> surface;
-
- ~AfterStartPreview() {
- surface.clear();
- }
-};
-
-class TestAutoFocus : public AfterStartPreview {
-public:
- void run() {
- cc->assertNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 0);
- c->autoFocus();
- cc->waitNotify(CAMERA_MSG_FOCUS, MCameraClient::EQ, 1);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestStopPreview : public AfterStartPreview {
-public:
- void run() {
- ASSERT(c->previewEnabled() == true);
- c->stopPreview();
- ASSERT(c->previewEnabled() == false);
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestTakePicture: public AfterStartPreview {
-public:
- void run() {
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- c->stopPreview();
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestTakeMultiplePictures: public AfterStartPreview {
-public:
- void run() {
- for (int i = 0; i < 10; i++) {
- cc->clearStat();
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitNotify(CAMERA_MSG_SHUTTER, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- }
- c->disconnect();
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-};
-
-class TestGetParameters: public AfterStartPreview {
-public:
- void run() {
- String8 param_str = c->getParameters();
- INFO("%s", static_cast<const char*>(param_str));
- }
-};
-
-static bool getNextSize(const char **ptrS, int *w, int *h) {
- const char *s = *ptrS;
-
- // skip over ','
- if (*s == ',') s++;
-
- // remember start position in p
- const char *p = s;
- while (*s != '\0' && *s != 'x') {
- s++;
- }
- if (*s == '\0') return false;
-
- // get the width
- *w = atoi(p);
-
- // skip over 'x'
- ASSERT(*s == 'x');
- p = s + 1;
- while (*s != '\0' && *s != ',') {
- s++;
- }
-
- // get the height
- *h = atoi(p);
- *ptrS = s;
- return true;
-}
-
-class TestPictureSize : public AfterStartPreview {
-public:
- void checkOnePicture(int w, int h) {
- const float rate = 0.9; // byte per pixel limit
- int pixels = w * h;
-
- CameraParameters param(c->getParameters());
- param.setPictureSize(w, h);
- // disable thumbnail to get more accurate size.
- param.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, 0);
- param.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, 0);
- c->setParameters(param.flatten());
-
- cc->clearStat();
- ASSERT(c->takePicture() == NO_ERROR);
- cc->waitData(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, 1);
- //cc->assertDataSize(CAMERA_MSG_RAW_IMAGE, MCameraClient::EQ, pixels*3/2);
- cc->waitData(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::EQ, 1);
- cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::LT,
- int(pixels * rate));
- cc->assertDataSize(CAMERA_MSG_COMPRESSED_IMAGE, MCameraClient::GT, 0);
- cc->assertNotify(CAMERA_MSG_ERROR, MCameraClient::EQ, 0);
- }
-
- void run() {
- CameraParameters param(c->getParameters());
- int w, h;
- const char *s = param.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES);
- while (getNextSize(&s, &w, &h)) {
- ALOGD("checking picture size %dx%d", w, h);
- checkOnePicture(w, h);
- }
- }
-};
-
-class TestPreviewCallbackFlag : public AfterConnect {
-public:
- void run() {
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
-
- // Try all flag combinations.
- for (int v = 0; v < 8; v++) {
- ALOGD("TestPreviewCallbackFlag: flag=%d", v);
- usleep(100000); // sleep a while to clear the in-flight callbacks.
- cc->clearStat();
- c->setPreviewCallbackFlag(v);
- ASSERT(c->previewEnabled() == false);
- ASSERT(c->startPreview() == NO_ERROR);
- ASSERT(c->previewEnabled() == true);
- sleep(2);
- c->stopPreview();
- if ((v & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) == 0) {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 0);
- } else {
- if ((v & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) == 0) {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 10);
- } else {
- cc->assertData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, 1);
- }
- }
- }
- }
-};
-
-class TestRecording : public AfterConnect {
-public:
- void run() {
- ASSERT(c->recordingEnabled() == false);
- sp<MSurface> surface = new MSurface();
- ASSERT(c->setPreviewDisplay(surface) == NO_ERROR);
- c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
- cc->setReleaser(c.get());
- c->startRecording();
- ASSERT(c->recordingEnabled() == true);
- sleep(2);
- c->stopRecording();
- usleep(100000); // sleep a while to clear the in-flight callbacks.
- cc->setReleaser(NULL);
- cc->assertData(CAMERA_MSG_VIDEO_FRAME, MCameraClient::GE, 10);
- }
-};
-
-class TestPreviewSize : public AfterStartPreview {
-public:
- void checkOnePicture(int w, int h) {
- int size = w*h*3/2; // should read from parameters
-
- c->stopPreview();
-
- CameraParameters param(c->getParameters());
- param.setPreviewSize(w, h);
- c->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
- c->setParameters(param.flatten());
-
- c->startPreview();
-
- cc->clearStat();
- cc->waitData(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::GE, 1);
- cc->assertDataSize(CAMERA_MSG_PREVIEW_FRAME, MCameraClient::EQ, size);
- }
-
- void run() {
- CameraParameters param(c->getParameters());
- int w, h;
- const char *s = param.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
- while (getNextSize(&s, &w, &h)) {
- ALOGD("checking preview size %dx%d", w, h);
- checkOnePicture(w, h);
- }
- }
-};
-
-void runHolderService() {
- defaultServiceManager()->addService(
- String16("CameraServiceTest.Holder"), new HolderService());
- ProcessState::self()->startThreadPool();
-}
-
-int main(int argc, char **argv)
-{
- if (argc != 1) {
- runFunction(argv[1]);
- return 0;
- }
- INFO("CameraServiceTest start");
- gExecutable = argv[0];
- runHolderService();
- int n = getNumberOfCameras();
- INFO("%d Cameras available", n);
-
- for (int id = 0; id < n; id++) {
- INFO("Testing camera %d", id);
- testConnect(id); flushCommands();
- testAllowConnectOnceOnly(id); flushCommands();
- testReconnect(id); flushCommands();
- testLockUnlock(id); flushCommands();
- testReconnectFromAnotherProcess(id); flushCommands();
-
- RUN(TestSetPreviewDisplay, id);
- RUN(TestStartPreview, id);
- RUN(TestStartPreviewWithoutDisplay, id);
- RUN(TestAutoFocus, id);
- RUN(TestStopPreview, id);
- RUN(TestTakePicture, id);
- RUN(TestTakeMultiplePictures, id);
- RUN(TestGetParameters, id);
- RUN(TestPictureSize, id);
- RUN(TestPreviewCallbackFlag, id);
- RUN(TestRecording, id);
- RUN(TestPreviewSize, id);
- }
-
- INFO("CameraServiceTest finished");
-}
diff --git a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2 b/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/services/camera/tests/CameraServiceTest/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/services/camera/tests/CameraServiceTest/NOTICE b/services/camera/tests/CameraServiceTest/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/services/camera/tests/CameraServiceTest/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
- Copyright (c) 2005-2008, The Android Open Source Project
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-