Merge "Clean up references to AUDIO_FORMAT_PCM_8_24_BIT"
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
index fec5461..190402e 100644
--- a/camera/ProCamera.cpp
+++ b/camera/ProCamera.cpp
@@ -247,7 +247,8 @@
sp <IProCameraUser> c = mCamera;
if (c == 0) return NO_INIT;
- sp<CpuConsumer> cc = new CpuConsumer(heapCount, synchronousMode);
+ sp<BufferQueue> bq = new BufferQueue();
+ sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount, synchronousMode);
cc->setName(String8("ProCamera::mCpuConsumer"));
sp<Surface> stc = new Surface(
diff --git a/camera/photography/ICameraDeviceUser.cpp b/camera/photography/ICameraDeviceUser.cpp
index 0515bd7..325f94d 100644
--- a/camera/photography/ICameraDeviceUser.cpp
+++ b/camera/photography/ICameraDeviceUser.cpp
@@ -151,21 +151,22 @@
}
- virtual status_t getCameraInfo(int cameraId, camera_metadata** info)
+ virtual status_t getCameraInfo(CameraMetadata* info)
{
Parcel data, reply;
data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(cameraId);
remote()->transact(GET_CAMERA_INFO, data, &reply);
-
reply.readExceptionCode();
status_t result = reply.readInt32();
+ CameraMetadata out;
if (reply.readInt32() != 0) {
- CameraMetadata::readFromParcel(reply, /*out*/info);
- } else if (info) {
- *info = NULL;
+ out.readFromParcel(&reply);
+ }
+
+ if (info != NULL) {
+ info->swap(out);
}
return result;
@@ -273,6 +274,7 @@
reply->writeNoException();
reply->writeInt32(ret);
+ // out-variables are after exception and return value
reply->writeInt32(1); // to mark presence of metadata object
request.writeToParcel(const_cast<Parcel*>(reply));
@@ -281,19 +283,16 @@
case GET_CAMERA_INFO: {
CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int cameraId = data.readInt32();
-
- camera_metadata_t* info = NULL;
+ CameraMetadata info;
status_t ret;
- ret = getCameraInfo(cameraId, &info);
-
- reply->writeInt32(1); // to mark presence of metadata object
- CameraMetadata::writeToParcel(*reply, info);
+ ret = getCameraInfo(&info);
reply->writeNoException();
reply->writeInt32(ret);
- free_camera_metadata(info);
+ // out-variables are after exception and return value
+ reply->writeInt32(1); // to mark presence of metadata object
+ info.writeToParcel(reply);
return NO_ERROR;
} break;
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index f8fc8ed..529b96c 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -937,7 +937,8 @@
} else {
CHECK(useSurfaceTexAlloc);
- sp<GLConsumer> texture = new GLConsumer(0 /* tex */);
+ sp<BufferQueue> bq = new BufferQueue();
+ sp<GLConsumer> texture = new GLConsumer(bq, 0 /* tex */);
gSurface = new Surface(texture->getBufferQueue());
}
diff --git a/include/camera/photography/ICameraDeviceUser.h b/include/camera/photography/ICameraDeviceUser.h
index 1b8d666..3ea49f4 100644
--- a/include/camera/photography/ICameraDeviceUser.h
+++ b/include/camera/photography/ICameraDeviceUser.h
@@ -58,9 +58,8 @@
/*out*/
CameraMetadata* request) = 0;
// Get static camera metadata
- virtual status_t getCameraInfo(int cameraId,
- /*out*/
- camera_metadata** info) = 0;
+ virtual status_t getCameraInfo(/*out*/
+ CameraMetadata* info) = 0;
};
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 6727601..58e0deb 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -75,8 +75,10 @@
size_t frameCount; // number of sample frames corresponding to size;
// on input it is the number of frames desired,
// on output is the number of frames actually filled
+ // (currently ignored, but will make the primary field in future)
size_t size; // input/output in bytes == frameCount * frameSize
+ // on output is the number of bytes actually filled
// FIXME this is redundant with respect to frameCount,
// and TRANSFER_OBTAIN mode is broken for 8-bit data
// since we don't define the frame format
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index f8a9f2b..0aa5870 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -125,7 +125,9 @@
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
const = 0;
- // register a current process for audio output change notifications
+ // Register an object to receive audio input/output change and track notifications.
+ // For a given calling pid, AudioFlinger disregards any registrations after the first.
+ // Thus the IAudioFlingerClient must be a singleton per process.
virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
// retrieve the audio recording buffer size
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 0b1d1e4..38f9d11 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -130,6 +130,16 @@
node_id node,
const char *parameter_name,
OMX_INDEXTYPE *index) = 0;
+
+ enum InternalOptionType {
+ INTERNAL_OPTION_SUSPEND, // data is a bool
+ };
+ virtual status_t setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size) = 0;
};
struct omx_message {
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 107ba66..6d59ea7 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -90,6 +90,8 @@
virtual ~Timeline();
#endif
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
static size_t sharedSize(size_t size);
#if 0
@@ -110,8 +112,12 @@
class Writer : public RefBase {
public:
Writer(); // dummy nop implementation without shared memory
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
Writer(size_t size, void *shared);
Writer(size_t size, const sp<IMemory>& iMemory);
+
virtual ~Writer() { }
virtual void log(const char *string);
@@ -165,8 +171,12 @@
class Reader : public RefBase {
public:
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
Reader(size_t size, const void *shared);
Reader(size_t size, const sp<IMemory>& iMemory);
+
virtual ~Reader() { }
void dump(int fd, size_t indent = 0);
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index b41684a..0592683 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -168,6 +168,7 @@
const bool mIsOut; // true for AudioTrack, false for AudioRecord
const bool mClientInServer; // true for OutputTrack, false for AudioTrack & AudioRecord
bool mIsShutdown; // latch set to true when shared memory corruption detected
+ size_t mUnreleased; // unreleased frames remaining from most recent obtainBuffer
};
// ----------------------------------------------------------------------------
@@ -213,7 +214,7 @@
// DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
// -EINTR Call has been interrupted. Look around to see why, and then perhaps try again.
// NO_INIT Shared memory is corrupt.
- // BAD_VALUE On entry buffer == NULL or buffer->mFrameCount == 0.
+ // Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
status_t obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
struct timespec *elapsed = NULL);
@@ -372,7 +373,6 @@
virtual void releaseBuffer(Buffer* buffer);
protected:
- size_t mUnreleased; // unreleased frames remaining from most recent obtainBuffer()
size_t mAvailToClient; // estimated frames available to client prior to releaseBuffer()
private:
int32_t mFlush; // our copy of cblk->u.mStreaming.mFlush, for streaming output only
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
index 702900b..84a8e15 100755
--- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
@@ -568,7 +568,8 @@
RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId)
: mRenderer(renderer)
, mTextureId(textureId) {
- mST = new GLConsumer(mTextureId);
+ sp<BufferQueue> bq = new BufferQueue();
+ mST = new GLConsumer(bq, mTextureId);
mSTC = new Surface(mST->getBufferQueue());
native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA);
}
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 22d6763..6b9b3be 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -454,7 +454,7 @@
} break;
case OUTPUT_CLOSED: {
if (gOutputs.indexOfKey(ioHandle) < 0) {
- ALOGW("ioConfigChanged() closing unknow output! %d", ioHandle);
+ ALOGW("ioConfigChanged() closing unknown output! %d", ioHandle);
break;
}
ALOGV("ioConfigChanged() output %d closed", ioHandle);
@@ -465,7 +465,7 @@
case OUTPUT_CONFIG_CHANGED: {
int index = gOutputs.indexOfKey(ioHandle);
if (index < 0) {
- ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
+ ALOGW("ioConfigChanged() modifying unknown output! %d", ioHandle);
break;
}
if (param2 == NULL) break;
@@ -537,6 +537,8 @@
return gAudioPolicyService;
}
+// ---------------------------------------------------------------------------
+
status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address)
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 33c4462..7b6b38d 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -585,6 +585,7 @@
status_t AudioTrack::setMarkerPosition(uint32_t marker)
{
+ // The only purpose of setting marker position is to get a callback
if (mCbf == NULL) {
return INVALID_OPERATION;
}
@@ -610,6 +611,7 @@
status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
{
+ // The only purpose of setting position update period is to get a callback
if (mCbf == NULL) {
return INVALID_OPERATION;
}
@@ -1220,6 +1222,11 @@
nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
{
+ // Currently the AudioTrack thread is not created if there are no callbacks.
+ // Would it ever make sense to run the thread, even without callbacks?
+ // If so, then replace this by checks at each use for mCbf != NULL.
+ LOG_ALWAYS_FATAL_IF(mCblk == NULL);
+
mLock.lock();
if (mAwaitBoost) {
mAwaitBoost = false;
@@ -1238,7 +1245,8 @@
if (tryCounter < 0) {
ALOGE("did not receive expected priority boost on time");
}
- return true;
+ // Run again immediately
+ return 0;
}
// Can only reference mCblk while locked
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 5f8f292..55bf175 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -38,7 +38,7 @@
bool isOut, bool clientInServer)
: mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize),
mFrameCountP2(roundup(frameCount)), mIsOut(isOut), mClientInServer(clientInServer),
- mIsShutdown(false)
+ mIsShutdown(false), mUnreleased(0)
{
}
@@ -64,10 +64,7 @@
status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
struct timespec *elapsed)
{
- if (buffer == NULL || buffer->mFrameCount == 0) {
- ALOGE("%s BAD_VALUE", __func__);
- return BAD_VALUE;
- }
+ LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
struct timespec total; // total elapsed time spent waiting
total.tv_sec = 0;
total.tv_nsec = 0;
@@ -164,7 +161,7 @@
buffer->mRaw = part1 > 0 ?
&((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL;
buffer->mNonContig = avail - part1;
- // mUnreleased = part1;
+ mUnreleased = part1;
status = NO_ERROR;
break;
}
@@ -238,6 +235,7 @@
case -EWOULDBLOCK: // benign race condition with server
case -EINTR: // wait was interrupted by signal or other spurious wakeup
case -ETIMEDOUT: // time-out expired
+ // FIXME these error/non-0 status are being dropped
break;
default:
ALOGE("%s unexpected error %d", __func__, ret);
@@ -252,6 +250,7 @@
buffer->mFrameCount = 0;
buffer->mRaw = NULL;
buffer->mNonContig = 0;
+ mUnreleased = 0;
}
if (elapsed != NULL) {
*elapsed = total;
@@ -268,14 +267,17 @@
void ClientProxy::releaseBuffer(Buffer* buffer)
{
+ LOG_ALWAYS_FATAL_IF(buffer == NULL);
size_t stepCount = buffer->mFrameCount;
- // FIXME
- // check mUnreleased
- // verify that stepCount <= frameCount returned by the last obtainBuffer()
- // verify stepCount not > total frame count of pipe
- if (stepCount == 0) {
+ if (stepCount == 0 || mIsShutdown) {
+ // prevent accidental re-use of buffer
+ buffer->mFrameCount = 0;
+ buffer->mRaw = NULL;
+ buffer->mNonContig = 0;
return;
}
+ LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
+ mUnreleased -= stepCount;
audio_track_cblk_t* cblk = mCblk;
// Both of these barriers are required
if (mIsOut) {
@@ -362,20 +364,18 @@
ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool isOut, bool clientInServer)
- : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mUnreleased(0),
+ : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
mAvailToClient(0), mFlush(0), mDeferWake(false)
{
}
status_t ServerProxy::obtainBuffer(Buffer* buffer)
{
+ LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
if (mIsShutdown) {
- buffer->mFrameCount = 0;
- buffer->mRaw = NULL;
- buffer->mNonContig = 0;
- mUnreleased = 0;
- return NO_INIT;
+ goto no_init;
}
+ {
audio_track_cblk_t* cblk = mCblk;
// compute number of frames available to write (AudioTrack) or read (AudioRecord),
// or use previous cached value from framesReady(), with added barrier if it omits.
@@ -388,6 +388,8 @@
if (flush != mFlush) {
front = rear;
mFlush = flush;
+ // effectively obtain then release whatever is in the buffer
+ android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
} else {
front = cblk->u.mStreaming.mFront;
}
@@ -402,11 +404,7 @@
mIsShutdown = true;
}
if (mIsShutdown) {
- buffer->mFrameCount = 0;
- buffer->mRaw = NULL;
- buffer->mNonContig = 0;
- mUnreleased = 0;
- return NO_INIT;
+ goto no_init;
}
// don't allow filling pipe beyond the nominal size
size_t availToServer;
@@ -443,23 +441,27 @@
// FIXME need to test for recording
mDeferWake = part1 < ask && availToServer >= ask;
return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
+ }
+no_init:
+ buffer->mFrameCount = 0;
+ buffer->mRaw = NULL;
+ buffer->mNonContig = 0;
+ mUnreleased = 0;
+ return NO_INIT;
}
void ServerProxy::releaseBuffer(Buffer* buffer)
{
- if (mIsShutdown) {
+ LOG_ALWAYS_FATAL_IF(buffer == NULL);
+ size_t stepCount = buffer->mFrameCount;
+ if (stepCount == 0 || mIsShutdown) {
+ // prevent accidental re-use of buffer
buffer->mFrameCount = 0;
buffer->mRaw = NULL;
buffer->mNonContig = 0;
return;
}
- size_t stepCount = buffer->mFrameCount;
- LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
- if (stepCount == 0) {
- buffer->mRaw = NULL;
- buffer->mNonContig = 0;
- return;
- }
+ LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
mUnreleased -= stepCount;
audio_track_cblk_t* cblk = mCblk;
if (mIsOut) {
@@ -637,8 +639,9 @@
void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
{
size_t stepCount = buffer->mFrameCount;
- LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
+ LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
if (stepCount == 0) {
+ // prevent accidental re-use of buffer
buffer->mRaw = NULL;
buffer->mNonContig = 0;
return;
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index d6cd43a..5bbb2f0 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -51,6 +51,7 @@
GET_EXTENSION_INDEX,
OBSERVER_ON_MSG,
GET_GRAPHIC_BUFFER_USAGE,
+ SET_INTERNAL_OPTION,
};
class BpOMX : public BpInterface<IOMX> {
@@ -439,6 +440,24 @@
return err;
}
+
+ virtual status_t setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *optionData,
+ size_t size) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+ data.writeIntPtr((intptr_t)node);
+ data.writeInt32(port_index);
+ data.writeInt32(size);
+ data.write(optionData, size);
+ data.writeInt32(type);
+ remote()->transact(SET_INTERNAL_OPTION, data, &reply);
+
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
@@ -537,6 +556,7 @@
case SET_PARAMETER:
case GET_CONFIG:
case SET_CONFIG:
+ case SET_INTERNAL_OPTION:
{
CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -562,6 +582,15 @@
case SET_CONFIG:
err = setConfig(node, index, params, size);
break;
+ case SET_INTERNAL_OPTION:
+ {
+ InternalOptionType type =
+ (InternalOptionType)data.readInt32();
+
+ err = setInternalOption(node, index, type, params, size);
+ break;
+ }
+
default:
TRESPASS();
}
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index ae4d845..62593b2 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -134,7 +134,6 @@
return;
}
- ALOGE("Failed to find crypto plugin");
mInitCheck = ERROR_UNSUPPORTED;
}
@@ -151,6 +150,7 @@
if (!mLibrary.get()) {
mLibrary = new SharedLibrary(path);
if (!*mLibrary) {
+ ALOGE("loadLibraryForScheme failed:%s", mLibrary->lastError());
return false;
}
@@ -165,6 +165,7 @@
if (createCryptoFactory == NULL ||
(mFactory = createCryptoFactory()) == NULL ||
!mFactory->isCryptoSchemeSupported(uuid)) {
+ ALOGE("createCryptoFactory failed:%s", mLibrary->lastError());
closeFactory();
return false;
}
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index 8db5b9b..270b872 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -422,7 +422,7 @@
status_t MidiFile::createOutputTrack() {
if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels,
- CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
+ CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2 /*bufferCount*/) != NO_ERROR) {
ALOGE("mAudioSink open failed");
return ERROR_OPEN_FAILED;
}
diff --git a/media/libmediaplayerservice/SharedLibrary.cpp b/media/libmediaplayerservice/SharedLibrary.cpp
index 178e15d..34db761 100644
--- a/media/libmediaplayerservice/SharedLibrary.cpp
+++ b/media/libmediaplayerservice/SharedLibrary.cpp
@@ -46,4 +46,10 @@
}
return dlsym(mLibHandle, symbol);
}
+
+ const char *SharedLibrary::lastError() const {
+ const char *error = dlerror();
+ return error ? error : "No errors or unknown error";
+ }
+
};
diff --git a/media/libmediaplayerservice/SharedLibrary.h b/media/libmediaplayerservice/SharedLibrary.h
index 5353642..88451a0 100644
--- a/media/libmediaplayerservice/SharedLibrary.h
+++ b/media/libmediaplayerservice/SharedLibrary.h
@@ -29,6 +29,7 @@
bool operator!() const;
void *lookup(const char *symbol) const;
+ const char *lastError() const;
private:
void *mLibHandle;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6bc7718..8d1020e 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -4106,6 +4106,19 @@
}
}
+ int32_t dropInputFrames;
+ if (params->findInt32("drop-input-frames", &dropInputFrames)) {
+ bool suspend = dropInputFrames != 0;
+
+ CHECK_EQ((status_t)OK,
+ mOMX->setInternalOption(
+ mNode,
+ kPortIndexInput,
+ IOMX::INTERNAL_OPTION_SUSPEND,
+ &suspend,
+ sizeof(suspend)));
+ }
+
return OK;
}
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 1822f07..810d88f 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -113,6 +113,13 @@
const char *parameter_name,
OMX_INDEXTYPE *index);
+ virtual status_t setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size);
+
private:
mutable Mutex mLock;
@@ -331,6 +338,15 @@
return getOMX(node)->getExtensionIndex(node, parameter_name, index);
}
+status_t MuxOMX::setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size) {
+ return getOMX(node)->setInternalOption(node, port_index, type, data, size);
+}
+
OMXClient::OMXClient() {
}
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 71b6569..305e7e0 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -293,7 +293,7 @@
// wait here till the frames come in from the client side
while (mStarted) {
- status_t err = mBufferQueue->acquireBuffer(&item);
+ status_t err = mBufferQueue->acquireBuffer(&item, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// wait for a buffer to be queued
mFrameAvailableCondition.wait(mMutex);
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index a92d376..4060a0a 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -12,11 +12,16 @@
frameworks/av/media/libstagefright/include \
frameworks/native/include/media/openmax \
+ifeq ($(TARGET_DEVICE), manta)
+ LOCAL_CFLAGS += -DSURFACE_IS_BGR32
+endif
+
LOCAL_STATIC_LIBRARIES := \
libvpx
LOCAL_SHARED_LIBRARIES := \
libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+ libhardware \
LOCAL_MODULE := libstagefright_soft_vpxenc
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 74d6df5..d8456fe 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -20,6 +20,8 @@
#include <utils/Log.h>
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
@@ -81,6 +83,52 @@
}
}
+static void ConvertRGB32ToPlanar(
+ const uint8_t *src, uint8_t *dstY, int32_t width, int32_t height) {
+ CHECK((width & 1) == 0);
+ CHECK((height & 1) == 0);
+
+ uint8_t *dstU = dstY + width * height;
+ uint8_t *dstV = dstU + (width / 2) * (height / 2);
+
+ for (int32_t y = 0; y < height; ++y) {
+ for (int32_t x = 0; x < width; ++x) {
+#ifdef SURFACE_IS_BGR32
+ unsigned blue = src[4 * x];
+ unsigned green = src[4 * x + 1];
+ unsigned red= src[4 * x + 2];
+#else
+ unsigned red= src[4 * x];
+ unsigned green = src[4 * x + 1];
+ unsigned blue = src[4 * x + 2];
+#endif
+
+ unsigned luma =
+ ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+
+ dstY[x] = luma;
+
+ if ((x & 1) == 0 && (y & 1) == 0) {
+ unsigned U =
+ ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+
+ unsigned V =
+ ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+
+ dstU[x / 2] = U;
+ dstV[x / 2] = V;
+ }
+ }
+
+ if ((y & 1) == 0) {
+ dstU += width / 2;
+ dstV += width / 2;
+ }
+
+ src += 4 * width;
+ dstY += width;
+ }
+}
SoftVPXEncoder::SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
@@ -99,8 +147,9 @@
mErrorResilience(OMX_FALSE),
mColorFormat(OMX_COLOR_FormatYUV420Planar),
mLevel(OMX_VIDEO_VP8Level_Version0),
- mConversionBuffer(NULL) {
-
+ mConversionBuffer(NULL),
+ mInputDataIsMeta(false),
+ mGrallocModule(NULL) {
initPorts();
}
@@ -247,7 +296,7 @@
return UNKNOWN_ERROR;
}
- if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
if (mConversionBuffer == NULL) {
mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
if (mConversionBuffer == NULL) {
@@ -427,9 +476,17 @@
(const OMX_VIDEO_PARAM_BITRATETYPE *)param);
case OMX_IndexParamPortDefinition:
- return internalSetPortParams(
+ {
+ OMX_ERRORTYPE err = internalSetPortParams(
(const OMX_PARAM_PORTDEFINITIONTYPE *)param);
+ if (err != OMX_ErrorNone) {
+ return err;
+ }
+
+ return SimpleSoftOMXComponent::internalSetParameter(index, param);
+ }
+
case OMX_IndexParamVideoPortFormat:
return internalSetFormatParams(
(const OMX_VIDEO_PARAM_PORTFORMATTYPE *)param);
@@ -442,6 +499,21 @@
return internalSetProfileLevel(
(const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param);
+ case OMX_IndexVendorStartUnused:
+ {
+ // storeMetaDataInBuffers
+ const StoreMetaDataInBuffersParams *storeParam =
+ (const StoreMetaDataInBuffersParams *)param;
+
+ if (storeParam->nPortIndex != kInputPortIndex) {
+ return OMX_ErrorBadPortIndex;
+ }
+
+ mInputDataIsMeta = (storeParam->bStoreMetaData == OMX_TRUE);
+
+ return OMX_ErrorNone;
+ }
+
default:
return SimpleSoftOMXComponent::internalSetParameter(index, param);
}
@@ -507,6 +579,10 @@
format->eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
format->eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
mColorFormat = format->eColorFormat;
+
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+ def->format.video.eColorFormat = mColorFormat;
+
return OMX_ErrorNone;
} else {
ALOGE("Unsupported color format %i", format->eColorFormat);
@@ -552,11 +628,17 @@
if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
port->format.video.eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
- mColorFormat = port->format.video.eColorFormat;
+ mColorFormat = port->format.video.eColorFormat;
} else {
return OMX_ErrorUnsupportedSetting;
}
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+ def->format.video.nFrameWidth = mWidth;
+ def->format.video.nFrameHeight = mHeight;
+ def->format.video.xFramerate = port->format.video.xFramerate;
+ def->format.video.eColorFormat = mColorFormat;
+
return OMX_ErrorNone;
} else if (port->nPortIndex == kOutputPortIndex) {
mBitrate = port->format.video.nBitrate;
@@ -625,24 +707,56 @@
return;
}
- uint8_t* source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
+ uint8_t *source =
+ inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
- // NOTE: As much as nothing is known about color format
- // when it is denoted as AndroidOpaque, it is at least
- // assumed to be planar.
- if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
- ConvertSemiPlanarToPlanar(source, mConversionBuffer, mWidth, mHeight);
+ if (mInputDataIsMeta) {
+ CHECK_GE(inputBufferHeader->nFilledLen,
+ 4 + sizeof(buffer_handle_t));
+
+ uint32_t bufferType = *(uint32_t *)source;
+ CHECK_EQ(bufferType, kMetadataBufferTypeGrallocSource);
+
+ if (mGrallocModule == NULL) {
+ CHECK_EQ(0, hw_get_module(
+ GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
+ }
+
+ const gralloc_module_t *grmodule =
+ (const gralloc_module_t *)mGrallocModule;
+
+ buffer_handle_t handle = *(buffer_handle_t *)(source + 4);
+
+ void *bits;
+ CHECK_EQ(0,
+ grmodule->lock(
+ grmodule, handle,
+ GRALLOC_USAGE_SW_READ_OFTEN
+ | GRALLOC_USAGE_SW_WRITE_NEVER,
+ 0, 0, mWidth, mHeight, &bits));
+
+ ConvertRGB32ToPlanar(
+ (const uint8_t *)bits, mConversionBuffer, mWidth, mHeight);
+
+ source = mConversionBuffer;
+
+ CHECK_EQ(0, grmodule->unlock(grmodule, handle));
+ } else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ ConvertSemiPlanarToPlanar(
+ source, mConversionBuffer, mWidth, mHeight);
+
source = mConversionBuffer;
}
vpx_image_t raw_frame;
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
kInputBufferAlignment, source);
- codec_return = vpx_codec_encode(mCodecContext,
- &raw_frame,
- inputBufferHeader->nTimeStamp, // in timebase units
- mFrameDurationUs, // frame duration in timebase units
- 0, // frame flags
- VPX_DL_REALTIME); // encoding deadline
+ codec_return = vpx_codec_encode(
+ mCodecContext,
+ &raw_frame,
+ inputBufferHeader->nTimeStamp, // in timebase units
+ mFrameDurationUs, // frame duration in timebase units
+ 0, // frame flags
+ VPX_DL_REALTIME); // encoding deadline
if (codec_return != VPX_CODEC_OK) {
ALOGE("vpx encoder failed to encode frame");
notify(OMX_EventError,
@@ -676,6 +790,17 @@
notifyEmptyBufferDone(inputBufferHeader);
}
}
+
+OMX_ERRORTYPE SoftVPXEncoder::getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index) {
+ if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
+ *index = OMX_IndexVendorStartUnused;
+ return OMX_ErrorNone;
+ }
+
+ return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index a0a8ee6..d570154 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -23,6 +23,8 @@
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
+#include <hardware/gralloc.h>
+
#include "vpx/vpx_encoder.h"
#include "vpx/vpx_codec.h"
#include "vpx/vp8cx.h"
@@ -57,14 +59,13 @@
// - OMX timestamps are in microseconds, therefore
// encoder timebase is fixed to 1/1000000
-class SoftVPXEncoder : public SimpleSoftOMXComponent {
- public:
+struct SoftVPXEncoder : public SimpleSoftOMXComponent {
SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
OMX_COMPONENTTYPE **component);
- protected:
+protected:
virtual ~SoftVPXEncoder();
// Returns current values for requested OMX
@@ -83,7 +84,10 @@
// encoding of the frame
virtual void onQueueFilled(OMX_U32 portIndex);
- private:
+ virtual OMX_ERRORTYPE getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index);
+
+private:
// number of buffers allocated per port
static const uint32_t kNumBuffers = 4;
@@ -156,6 +160,9 @@
// indeed YUV420SemiPlanar.
uint8_t* mConversionBuffer;
+ bool mInputDataIsMeta;
+ const hw_module_t *mGrallocModule;
+
// Initializes input and output OMX ports with sensible
// default values.
void initPorts();
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 24b8d98..7fed7d4 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -109,6 +109,13 @@
const char *parameter_name,
OMX_INDEXTYPE *index);
+ virtual status_t setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size);
+
virtual void binderDied(const wp<IBinder> &the_late_who);
OMX_ERRORTYPE OnEvent(
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 67aba6b..f6ae376 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -96,6 +96,12 @@
status_t getExtensionIndex(
const char *parameterName, OMX_INDEXTYPE *index);
+ status_t setInternalOption(
+ OMX_U32 portIndex,
+ IOMX::InternalOptionType type,
+ const void *data,
+ size_t size);
+
void onMessage(const omx_message &msg);
void onObserverDied(OMXMaster *master);
void onGetHandleFailed();
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index b3a8463..6f3ed0d 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -36,6 +36,7 @@
mInitCheck(UNKNOWN_ERROR),
mNodeInstance(nodeInstance),
mExecuting(false),
+ mSuspended(false),
mNumFramesAvailable(0),
mEndOfStream(false),
mEndOfStreamSent(false) {
@@ -237,9 +238,43 @@
return;
}
+void GraphicBufferSource::suspend(bool suspend) {
+ Mutex::Autolock autoLock(mMutex);
+
+ if (suspend) {
+ mSuspended = true;
+
+ while (mNumFramesAvailable > 0) {
+ BufferQueue::BufferItem item;
+ status_t err = mBufferQueue->acquireBuffer(&item, 0);
+
+ if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+ // shouldn't happen.
+ ALOGW("suspend: frame was not available");
+ break;
+ } else if (err != OK) {
+ ALOGW("suspend: acquireBuffer returned err=%d", err);
+ break;
+ }
+
+ --mNumFramesAvailable;
+
+ mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+ }
+ return;
+ }
+
+ mSuspended = false;
+}
+
bool GraphicBufferSource::fillCodecBuffer_l() {
CHECK(mExecuting && mNumFramesAvailable > 0);
+ if (mSuspended) {
+ return false;
+ }
+
int cbi = findAvailableCodecBuffer_l();
if (cbi < 0) {
// No buffers available, bail.
@@ -251,7 +286,7 @@
ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
mNumFramesAvailable);
BufferQueue::BufferItem item;
- status_t err = mBufferQueue->acquireBuffer(&item);
+ status_t err = mBufferQueue->acquireBuffer(&item, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// shouldn't happen
ALOGW("fillCodecBuffer_l: frame was not available");
@@ -416,13 +451,18 @@
ALOGV("onFrameAvailable exec=%d avail=%d",
mExecuting, mNumFramesAvailable);
- if (mEndOfStream) {
- // This should only be possible if a new buffer was queued after
- // EOS was signaled, i.e. the app is misbehaving.
- ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+ if (mEndOfStream || mSuspended) {
+ if (mEndOfStream) {
+ // This should only be possible if a new buffer was queued after
+ // EOS was signaled, i.e. the app is misbehaving.
+
+ ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+ } else {
+ ALOGV("onFrameAvailable: suspended, ignoring frame");
+ }
BufferQueue::BufferItem item;
- status_t err = mBufferQueue->acquireBuffer(&item);
+ status_t err = mBufferQueue->acquireBuffer(&item, 0);
if (err == OK) {
mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 8c6b470..ac73770 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -85,6 +85,10 @@
// have a codec buffer ready, we just set the mEndOfStream flag.
status_t signalEndOfInputStream();
+ // If suspend is true, all incoming buffers (including those currently
+ // in the BufferQueue) will be discarded until the suspension is lifted.
+ void suspend(bool suspend);
+
protected:
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
@@ -155,6 +159,8 @@
// Set by omxExecuting() / omxIdling().
bool mExecuting;
+ bool mSuspended;
+
// We consume graphic buffers from this.
sp<BufferQueue> mBufferQueue;
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 3987ead..4b1dbe6 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -396,6 +396,15 @@
parameter_name, index);
}
+status_t OMX::setInternalOption(
+ node_id node,
+ OMX_U32 port_index,
+ InternalOptionType type,
+ const void *data,
+ size_t size) {
+ return findInstance(node)->setInternalOption(port_index, type, data, size);
+}
+
OMX_ERRORTYPE OMX::OnEvent(
node_id node,
OMX_IN OMX_EVENTTYPE eEvent,
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index a9eb94f..61a866f 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -238,6 +238,18 @@
status_t OMXNodeInstance::sendCommand(
OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
+ if (bufferSource != NULL
+ && cmd == OMX_CommandStateSet
+ && param == OMX_StateLoaded) {
+ // Initiating transition from Executing -> Loaded
+ // Buffers are about to be freed.
+ bufferSource->omxLoaded();
+ setGraphicBufferSource(NULL);
+
+ // fall through
+ }
+
Mutex::Autolock autoLock(mLock);
OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL);
@@ -769,6 +781,36 @@
return StatusFromOMXError(err);
}
+status_t OMXNodeInstance::setInternalOption(
+ OMX_U32 portIndex,
+ IOMX::InternalOptionType type,
+ const void *data,
+ size_t size) {
+ switch (type) {
+ case IOMX::INTERNAL_OPTION_SUSPEND:
+ {
+ const sp<GraphicBufferSource> &bufferSource =
+ getGraphicBufferSource();
+
+ if (bufferSource == NULL || portIndex != kPortIndexInput) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (size != sizeof(bool)) {
+ return INVALID_OPERATION;
+ }
+
+ bool suspend = *(bool *)data;
+ bufferSource->suspend(suspend);
+
+ return OK;
+ }
+
+ default:
+ return ERROR_UNSUPPORTED;
+ }
+}
+
void OMXNodeInstance::onMessage(const omx_message &msg) {
if (msg.type == omx_message::FILL_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
@@ -818,16 +860,11 @@
OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
- if (bufferSource != NULL && event == OMX_EventCmdComplete &&
- arg1 == OMX_CommandStateSet) {
- if (arg2 == OMX_StateExecuting) {
- bufferSource->omxExecuting();
- } else if (arg2 == OMX_StateLoaded) {
- // Must be shutting down -- won't have a GraphicBufferSource
- // on the way up.
- bufferSource->omxLoaded();
- setGraphicBufferSource(NULL);
- }
+ if (bufferSource != NULL
+ && event == OMX_EventCmdComplete
+ && arg1 == OMX_CommandStateSet
+ && arg2 == OMX_StateExecuting) {
+ bufferSource->omxExecuting();
}
}
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 21df1d7..12e4683 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -498,91 +498,91 @@
}
}
sleepNs = -1;
- if (isWarm) {
- if (sec > 0 || nsec > underrunNs) {
- ATRACE_NAME("underrun");
- // FIXME only log occasionally
- ALOGV("underrun: time since last cycle %d.%03ld sec",
- (int) sec, nsec / 1000000L);
- dumpState->mUnderruns++;
- ignoreNextOverrun = true;
- } else if (nsec < overrunNs) {
- if (ignoreNextOverrun) {
- ignoreNextOverrun = false;
- } else {
+ if (isWarm) {
+ if (sec > 0 || nsec > underrunNs) {
+ ATRACE_NAME("underrun");
// FIXME only log occasionally
- ALOGV("overrun: time since last cycle %d.%03ld sec",
+ ALOGV("underrun: time since last cycle %d.%03ld sec",
(int) sec, nsec / 1000000L);
- dumpState->mOverruns++;
- }
- // This forces a minimum cycle time. It:
- // - compensates for an audio HAL with jitter due to sample rate conversion
- // - works with a variable buffer depth audio HAL that never pulls at a rate
- // < than overrunNs per buffer.
- // - recovers from overrun immediately after underrun
- // It doesn't work with a non-blocking audio HAL.
- sleepNs = forceNs - nsec;
- } else {
- ignoreNextOverrun = false;
- }
- }
-#ifdef FAST_MIXER_STATISTICS
- if (isWarm) {
- // advance the FIFO queue bounds
- size_t i = bounds & (FastMixerDumpState::kSamplingN - 1);
- bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
- if (full) {
- bounds += 0x10000;
- } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) {
- full = true;
- }
- // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
- uint32_t monotonicNs = nsec;
- if (sec > 0 && sec < 4) {
- monotonicNs += sec * 1000000000;
- }
- // compute the raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
- uint32_t loadNs = 0;
- struct timespec newLoad;
- rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
- if (rc == 0) {
- if (oldLoadValid) {
- sec = newLoad.tv_sec - oldLoad.tv_sec;
- nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
- if (nsec < 0) {
- --sec;
- nsec += 1000000000;
+ dumpState->mUnderruns++;
+ ignoreNextOverrun = true;
+ } else if (nsec < overrunNs) {
+ if (ignoreNextOverrun) {
+ ignoreNextOverrun = false;
+ } else {
+ // FIXME only log occasionally
+ ALOGV("overrun: time since last cycle %d.%03ld sec",
+ (int) sec, nsec / 1000000L);
+ dumpState->mOverruns++;
}
- loadNs = nsec;
- if (sec > 0 && sec < 4) {
- loadNs += sec * 1000000000;
- }
+ // This forces a minimum cycle time. It:
+ // - compensates for an audio HAL with jitter due to sample rate conversion
+ // - works with a variable buffer depth audio HAL that never pulls at a
+ // rate < than overrunNs per buffer.
+ // - recovers from overrun immediately after underrun
+ // It doesn't work with a non-blocking audio HAL.
+ sleepNs = forceNs - nsec;
} else {
- // first time through the loop
- oldLoadValid = true;
+ ignoreNextOverrun = false;
}
- oldLoad = newLoad;
}
+#ifdef FAST_MIXER_STATISTICS
+ if (isWarm) {
+ // advance the FIFO queue bounds
+ size_t i = bounds & (FastMixerDumpState::kSamplingN - 1);
+ bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
+ if (full) {
+ bounds += 0x10000;
+ } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) {
+ full = true;
+ }
+ // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
+ uint32_t monotonicNs = nsec;
+ if (sec > 0 && sec < 4) {
+ monotonicNs += sec * 1000000000;
+ }
+ // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+ uint32_t loadNs = 0;
+ struct timespec newLoad;
+ rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
+ if (rc == 0) {
+ if (oldLoadValid) {
+ sec = newLoad.tv_sec - oldLoad.tv_sec;
+ nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
+ if (nsec < 0) {
+ --sec;
+ nsec += 1000000000;
+ }
+ loadNs = nsec;
+ if (sec > 0 && sec < 4) {
+ loadNs += sec * 1000000000;
+ }
+ } else {
+ // first time through the loop
+ oldLoadValid = true;
+ }
+ oldLoad = newLoad;
+ }
#ifdef CPU_FREQUENCY_STATISTICS
- // get the absolute value of CPU clock frequency in kHz
- int cpuNum = sched_getcpu();
- uint32_t kHz = tcu.getCpukHz(cpuNum);
- kHz = (kHz << 4) | (cpuNum & 0xF);
+ // get the absolute value of CPU clock frequency in kHz
+ int cpuNum = sched_getcpu();
+ uint32_t kHz = tcu.getCpukHz(cpuNum);
+ kHz = (kHz << 4) | (cpuNum & 0xF);
#endif
- // save values in FIFO queues for dumpsys
- // these stores #1, #2, #3 are not atomic with respect to each other,
- // or with respect to store #4 below
- dumpState->mMonotonicNs[i] = monotonicNs;
- dumpState->mLoadNs[i] = loadNs;
+ // save values in FIFO queues for dumpsys
+ // these stores #1, #2, #3 are not atomic with respect to each other,
+ // or with respect to store #4 below
+ dumpState->mMonotonicNs[i] = monotonicNs;
+ dumpState->mLoadNs[i] = loadNs;
#ifdef CPU_FREQUENCY_STATISTICS
- dumpState->mCpukHz[i] = kHz;
+ dumpState->mCpukHz[i] = kHz;
#endif
- // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
- // the newest open and oldest closed halves are atomic with respect to each other
- dumpState->mBounds = bounds;
- ATRACE_INT("cycle_ms", monotonicNs / 1000000);
- ATRACE_INT("load_us", loadNs / 1000);
- }
+ // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
+ // the newest open & oldest closed halves are atomic with respect to each other
+ dumpState->mBounds = bounds;
+ ATRACE_INT("cycle_ms", monotonicNs / 1000000);
+ ATRACE_INT("load_us", loadNs / 1000);
+ }
#endif
} else {
// first time through the loop
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index e33b3c6..9cde642 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -31,8 +31,14 @@
// and this may result in an audible artifact
// needs read-only access to a recent stable state,
// but not necessarily the most current one
+// only allocate and free memory when configuration changes
+// avoid conventional logging, as this is a form of I/O and could block
+// defer computation to other threads when feasible; for example
+// cycle times are collected by fast mixer thread but the floating-point
+// statistical calculations on these cycle times are computed by normal mixer
+// these requirements also apply to callouts such as AudioBufferProvider and VolumeProvider
// Normal mixer thread:
-// periodic with typical period ~40 ms
+// periodic with typical period ~20 ms
// SCHED_OTHER scheduling policy and nice priority == urgent audio
// ok to block, but prefer to avoid as much as possible
// needs read/write access to state
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7de6872..e15d98a 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -429,6 +429,8 @@
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+ // called with AudioFlinger lock held
void invalidateTracks(audio_stream_type_t streamType);
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index 96bde90..203d7c0 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -600,6 +600,7 @@
case Parameters::STOPPED:
case Parameters::WAITING_FOR_PREVIEW_WINDOW:
case Parameters::PREVIEW:
+ case Parameters::STILL_CAPTURE:
// OK
break;
default:
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index aae2504..efbbe57 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -110,7 +110,8 @@
if (!mCallbackToApp && mCallbackConsumer == 0) {
// Create CPU buffer queue endpoint, since app hasn't given us one
// Make it async to avoid disconnect deadlocks
- mCallbackConsumer = new CpuConsumer(kCallbackHeapCount,
+ sp<BufferQueue> bq = new BufferQueue();
+ mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount,
/*synchronized*/ false);
mCallbackConsumer->setFrameAvailableListener(this);
mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index f0a13ca..1d739cd 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -82,7 +82,8 @@
if (mCaptureConsumer == 0) {
// Create CPU buffer queue endpoint
- mCaptureConsumer = new CpuConsumer(1);
+ sp<BufferQueue> bq = new BufferQueue();
+ mCaptureConsumer = new CpuConsumer(bq, 1);
mCaptureConsumer->setFrameAvailableListener(this);
mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
mCaptureWindow = new Surface(
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index f7a6be7..76fa46c 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -319,7 +319,8 @@
// Create CPU buffer queue endpoint. We need one more buffer here so that we can
// always acquire and free a buffer when the heap is full; otherwise the consumer
// will have buffers in flight we'll never clear out.
- mRecordingConsumer = new BufferItemConsumer(
+ sp<BufferQueue> bq = new BufferQueue();
+ mRecordingConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_VIDEO_ENCODER,
mRecordingHeapCount + 1,
true);
@@ -617,7 +618,7 @@
if (client == 0) {
// Discard frames during shutdown
BufferItemConsumer::BufferItem imgBuffer;
- res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+ res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
if (res != OK) {
if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
@@ -635,7 +636,7 @@
SharedParameters::Lock l(client->getParameters());
Mutex::Autolock m(mMutex);
BufferItemConsumer::BufferItem imgBuffer;
- res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+ res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
if (res != OK) {
if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
index 94059cd..3c575f6 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -128,7 +128,8 @@
if (mZslConsumer == 0) {
// Create CPU buffer queue endpoint
- mZslConsumer = new BufferItemConsumer(
+ sp<BufferQueue> bq = new BufferQueue();
+ mZslConsumer = new BufferItemConsumer(bq,
GRALLOC_USAGE_HW_CAMERA_ZSL,
kZslBufferDepth,
true);
@@ -426,7 +427,7 @@
}
ALOGVV("Trying to get next buffer");
BufferItemConsumer::BufferItem item;
- res = zslConsumer->acquireBuffer(&item);
+ res = zslConsumer->acquireBuffer(&item, 0);
if (res != OK) {
if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
index 13e9c83..6d9acc3 100644
--- a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
@@ -211,7 +211,8 @@
mFrameCount = 0;
if (mConsumer.get() == 0) {
- mConsumer = new BufferItemConsumer(camera3_stream::usage,
+ sp<BufferQueue> bq = new BufferQueue();
+ mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
mTotalBufferCount,
/*synchronousMode*/true);
mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index dfa1066..7625735 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -284,7 +284,7 @@
/**
* Acquire new frame
*/
- err = acquireBufferLocked(&item);
+ err = acquireBufferLocked(&item, 0);
if (err != OK) {
if (err != NO_BUFFER_AVAILABLE) {
BI_LOGE("Error acquiring buffer: %s (%d)", strerror(err), err);
diff --git a/services/camera/libcameraservice/photography/CameraDeviceClient.cpp b/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
index bd6b60a..a6a2dc1 100644
--- a/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
@@ -391,28 +391,23 @@
return res;
}
-status_t CameraDeviceClient::getCameraInfo(int cameraId,
- /*out*/
- camera_metadata** info)
+status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
{
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
status_t res = OK;
- // TODO: remove cameraId. this should be device-specific info, not static.
- if (cameraId != mCameraId) {
- return INVALID_OPERATION;
- }
-
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
Mutex::Autolock icl(mBinderSerializationLock);
if (!mDevice.get()) return DEAD_OBJECT;
- CameraMetadata deviceInfo = mDevice->info();
- *info = deviceInfo.release();
+ if (info != NULL) {
+ *info = mDevice->info(); // static camera metadata
+ // TODO: merge with device-specific camera metadata
+ }
return res;
}
diff --git a/services/camera/libcameraservice/photography/CameraDeviceClient.h b/services/camera/libcameraservice/photography/CameraDeviceClient.h
index 806aa15..c6c241a 100644
--- a/services/camera/libcameraservice/photography/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/photography/CameraDeviceClient.h
@@ -85,9 +85,7 @@
// Get the static metadata for the camera
// -- Caller owns the newly allocated metadata
- virtual status_t getCameraInfo(int cameraId,
- /*out*/
- camera_metadata** info);
+ virtual status_t getCameraInfo(/*out*/CameraMetadata* info);
/**
* Interface used by CameraService