Merge "Remove integer sanitization from ringbuffer pointer arithmetic" into nyc-dev
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 8d7a107..c52e581 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -98,9 +98,37 @@
c->mStatus = NO_ERROR;
camera = c;
} else {
- ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
- (cs != nullptr) ? "Service not available" : ret.toString8().string());
- status = -EINVAL;
+ switch(ret.serviceSpecificErrorCode()) {
+ case hardware::ICameraService::ERROR_DISCONNECTED:
+ status = -ENODEV;
+ break;
+ case hardware::ICameraService::ERROR_CAMERA_IN_USE:
+ status = -EBUSY;
+ break;
+ case hardware::ICameraService::ERROR_INVALID_OPERATION:
+ status = -EINVAL;
+ break;
+ case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
+ status = -EUSERS;
+ break;
+ case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+ status = BAD_VALUE;
+ break;
+ case hardware::ICameraService::ERROR_DEPRECATED_HAL:
+ status = -EOPNOTSUPP;
+ break;
+ case hardware::ICameraService::ERROR_DISABLED:
+ status = -EACCES;
+ break;
+ case hardware::ICameraService::ERROR_PERMISSION_DENIED:
+ status = PERMISSION_DENIED;
+ break;
+ default:
+ status = -EINVAL;
+ ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
+ (cs != nullptr) ? "Service not available" : ret.toString8().string());
+ break;
+ }
c.clear();
}
return status;
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 587077a..cad8caf 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -53,6 +53,7 @@
meta->setInt32(kKeyChannelCount, mNumChannels);
meta->setInt32(kKeySampleRate, mSampleRate);
meta->setInt32(kKeyMaxInputSize, kBufferSize);
+ meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
return meta;
}
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 9e0e98b..bdd6372 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -830,6 +830,11 @@
bool isDirect_l() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
+ bool isPurePcmData_l() const
+ { return audio_is_linear_pcm(mFormat)
+ && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }
+
// increment mPosition by the delta of mServer, and return new value of mPosition
Modulo<uint32_t> updateAndGetPosition_l();
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 2365323..fab92bd 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -25,6 +25,7 @@
#include <media/stagefright/foundation/AHierarchicalStateMachine.h>
#include <media/stagefright/CodecBase.h>
#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/SkipCutBuffer.h>
#include <utils/NativeHandle.h>
#include <OMX_Audio.h>
@@ -36,6 +37,7 @@
struct ABuffer;
struct MemoryDealer;
struct DescribeColorFormat2Params;
+struct DataConverter;
struct ACodec : public AHierarchicalStateMachine, public CodecBase {
ACodec();
@@ -188,8 +190,11 @@
Status mStatus;
unsigned mDequeuedAt;
- sp<ABuffer> mData;
- sp<RefBase> mMemRef;
+ sp<ABuffer> mData; // the client's buffer; if not using data conversion, this is the
+ // codec buffer; otherwise, it is allocated separately
+ sp<RefBase> mMemRef; // and a reference to the IMemory, so it does not go away
+ sp<ABuffer> mCodecData; // the codec's buffer
+ sp<RefBase> mCodecRef; // and a reference to the IMemory
sp<GraphicBuffer> mGraphicBuffer;
sp<NativeHandle> mNativeHandle;
int mFenceFd;
@@ -248,6 +253,9 @@
sp<AMessage> mConfigFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
+
+ // Initial output format + configuration params that is reused as the base for all subsequent
+ // format updates. This will equal to mOutputFormat until the first actual frame is received.
sp<AMessage> mBaseOutputFormat;
FrameRenderTracker mRenderTracker; // render information for buffers rendered by ACodec
@@ -280,6 +288,7 @@
bool mLegacyAdaptiveExperiment;
int32_t mMetadataBuffersToSubmit;
size_t mNumUndequeuedBuffers;
+ sp<DataConverter> mConverter[2];
int64_t mRepeatFrameDelayUs;
int64_t mMaxPtsGapUs;
@@ -441,7 +450,8 @@
bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel);
status_t setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels,
+ AudioEncoding encoding = kAudioEncodingPcm16bit);
status_t setPriority(int32_t priority);
status_t setOperatingRate(float rateFloat, bool isVideo);
@@ -496,7 +506,10 @@
void notifyOfRenderedFrames(
bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
- void onOutputFormatChanged();
+ // Pass |expectedFormat| to print a warning if the format differs from it.
+ // Using sp<> instead of const sp<>& because expectedFormat is likely the current mOutputFormat
+ // which will get updated inside.
+ void onOutputFormatChanged(sp<const AMessage> expectedFormat = NULL);
void addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify);
void sendFormatChange();
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index 2d28432..be2835d 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -55,6 +55,10 @@
kWhatOutputFramesRendered = 'outR',
};
+ enum {
+ kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
+ };
+
virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 5f10487..035e8ae 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -56,7 +56,7 @@
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
virtual status_t pause();
- virtual sp<MetaData> getFormat() { return mMeta; }
+ virtual sp<MetaData> getFormat();
virtual status_t read(
MediaBuffer **buffer,
const ReadOptions *options = NULL);
@@ -105,7 +105,7 @@
sp<ALooper> mCodecLooper;
sp<AHandlerReflector<MediaCodecSource> > mReflector;
sp<AMessage> mOutputFormat;
- sp<MetaData> mMeta;
+ Mutexed<sp<MetaData>> mMeta;
sp<Puller> mPuller;
sp<MediaCodec> mEncoder;
uint32_t mFlags;
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index e5bcec6..5f2a32d 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -68,6 +68,15 @@
extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+ kAudioEncodingPcm16bit = 2,
+ kAudioEncodingPcm8bit = 3,
+ kAudioEncodingPcmFloat = 4,
+};
+
} // namespace android
#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 7d2208c..a9ae49b 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -49,6 +49,7 @@
kKeyChannelCount = '#chn', // int32_t
kKeyChannelMask = 'chnm', // int32_t
kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
+ kKeyPcmEncoding = 'PCMe', // int32_t (audio encoding enum)
kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
kKeyBitRate = 'brte', // int32_t (bps)
kKeyESDS = 'esds', // raw data
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index b8bb824..6606c58 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -84,6 +84,10 @@
kIsVorbis = 1,
};
+ enum {
+ kMaxTrackCount = 16384,
+ };
+
struct TrackInfo {
sp<IMediaSource> mSource;
size_t mTrackIndex;
@@ -113,7 +117,7 @@
void releaseTrackSamples();
bool getTotalBitrate(int64_t *bitRate) const;
- void updateDurationAndBitrate();
+ status_t updateDurationAndBitrate();
status_t appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer);
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 83b9444..09d2ad8 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -127,6 +127,15 @@
// their refcount incremented.
sp<AMessage> dup() const;
+ // Performs a shallow or deep comparison of |this| and |other| and returns
+ // an AMessage with the differences.
+ // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
+ // their refcount incremented.
+ // This is true for AMessages that have no corresponding AMessage equivalent in |other|.
+ // (E.g. there is no such key or the type is different.) On the other hand, changes in
+ // the AMessage (or AMessages if deep is |false|) are returned in new objects.
+ sp<AMessage> changesFrom(const sp<const AMessage> &other, bool deep = false) const;
+
AString debugString(int32_t indent = 0) const;
enum Type {
diff --git a/include/media/stagefright/foundation/Mutexed.h b/include/media/stagefright/foundation/Mutexed.h
index d4fd905..e905d86 100644
--- a/include/media/stagefright/foundation/Mutexed.h
+++ b/include/media/stagefright/foundation/Mutexed.h
@@ -110,6 +110,11 @@
inline T* operator->() const { return mLocked ? &mTreasure : nullptr; }
inline T& operator*() const { return mLocked ? mTreasure : *(T*)nullptr; }
+ // same as *
+ inline T& get() const { return mLocked ? mTreasure : *(T*)nullptr; }
+ // sets structure. this will abort if mLocked is false.
+ inline void set(T& o) const { get() = o; }
+
// Wait on the condition variable using lock. Must be locked.
inline status_t waitForCondition(Condition &cond) { return cond.wait(mLock); }
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index c6035bd..fcb3a99 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -178,7 +178,9 @@
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
- AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1
+ AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
+ AMEDIACODECRYPTOINFO_MODE_AES_WV = 2,
+ AMEDIACODECRYPTOINFO_MODE_AES_CBC = 3
} cryptoinfo_mode_t;
typedef struct {
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index aa79bc1..5ce1798 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -31,9 +31,29 @@
$(call include-path-for, audio-utils) \
external/sonic \
+# If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
+# the LOCAL_MULTILIB for all audioserver exclusive libraries.
+# This is relevant for 64 bit architectures where either or both
+# 32 and 64 bit libraries may be built.
+#
+# AUDIOSERVER_MULTILIB may be set as follows:
+# 32 to build 32 bit audioserver libraries and 32 bit audioserver.
+# 64 to build 64 bit audioserver libraries and 64 bit audioserver.
+# both to build both 32 bit and 64 bit libraries,
+# and use primary target architecture (32 or 64) for audioserver.
+# first to build libraries and audioserver for the primary target architecture only.
+# <empty> to build both 32 and 64 bit libraries and 32 bit audioserver.
+
+ifeq ($(strip $(AUDIOSERVER_MULTILIB)),)
+LOCAL_MULTILIB := 32
+else
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+endif
+
LOCAL_MODULE := audioserver
-LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := audioserver.rc
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 698da1f..4a7a988 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -56,6 +56,7 @@
sp<ProcessState> proc(ProcessState::self());
MediaLogService::instantiate();
ProcessState::self()->startThreadPool();
+ IPCThreadState::self()->joinThreadPool();
for (;;) {
siginfo_t info;
int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
diff --git a/media/libcpustats/Android.mk b/media/libcpustats/Android.mk
index ee283a6..57fe527 100644
--- a/media/libcpustats/Android.mk
+++ b/media/libcpustats/Android.mk
@@ -8,6 +8,6 @@
LOCAL_MODULE := libcpustats
-LOCAL_CFLAGS := -std=gnu++11 -Werror
+LOCAL_CFLAGS := -std=gnu++11 -Werror -Wall
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 4a41037..9823c55 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -141,6 +141,37 @@
}
#endif
+static bool Downmix_validChannelMask(uint32_t mask)
+{
+ if (!mask) {
+ return false;
+ }
+ // check against unsupported channels
+ if (mask & kUnsupported) {
+ ALOGE("Unsupported channels (top or front left/right of center)");
+ return false;
+ }
+ // verify has FL/FR
+ if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
+ ALOGE("Front channels must be present");
+ return false;
+ }
+ // verify uses SIDE as a pair (ok if not using SIDE at all)
+ if ((mask & kSides) != 0) {
+ if ((mask & kSides) != kSides) {
+ ALOGE("Side channels must be used as a pair");
+ return false;
+ }
+ }
+ // verify uses BACK as a pair (ok if not using BACK at all)
+ if ((mask & kBacks) != 0) {
+ if ((mask & kBacks) != kBacks) {
+ ALOGE("Back channels must be used as a pair");
+ return false;
+ }
+ }
+ return true;
+}
/*----------------------------------------------------------------------------
* Effect API implementation
@@ -624,9 +655,10 @@
pDownmixer->apply_volume_correction = false;
pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
} else {
- // when configuring the effect, do not allow a blank channel mask
- if (pConfig->inputCfg.channels == 0) {
- ALOGE("Downmix_Configure error: input channel mask can't be 0");
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+ ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+ pConfig->inputCfg.channels);
return -EINVAL;
}
pDownmixer->input_channel_count =
@@ -969,34 +1001,13 @@
*/
bool Downmix_foldGeneric(
uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- // check against unsupported channels
- if (mask & kUnsupported) {
- ALOGE("Unsupported channels (top or front left/right of center)");
+
+ if (!Downmix_validChannelMask(mask)) {
return false;
}
- // verify has FL/FR
- if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
- ALOGE("Front channels must be present");
- return false;
- }
- // verify uses SIDE as a pair (ok if not using SIDE at all)
- bool hasSides = false;
- if ((mask & kSides) != 0) {
- if ((mask & kSides) != kSides) {
- ALOGE("Side channels must be used as a pair");
- return false;
- }
- hasSides = true;
- }
- // verify uses BACK as a pair (ok if not using BACK at all)
- bool hasBacks = false;
- if ((mask & kBacks) != 0) {
- if ((mask & kBacks) != kBacks) {
- ALOGE("Back channels must be used as a pair");
- return false;
- }
- hasBacks = true;
- }
+
+ const bool hasSides = (mask & kSides) != 0;
+ const bool hasBacks = (mask & kBacks) != 0;
const int numChan = audio_channel_count_from_out_mask(mask);
const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 479ccbb..63f9ed7 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -7,7 +7,7 @@
LOCAL_MODULE:= libmedia_helper
LOCAL_MODULE_TAGS := optional
-LOCAL_C_FLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_CLANG := true
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 79ce75e..2976a5c 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -66,7 +66,7 @@
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord(const String16 &opPackageName)
- : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+ : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
@@ -88,7 +88,8 @@
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
- : mStatus(NO_INIT),
+ : mActive(false),
+ mStatus(NO_INIT),
mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -268,10 +269,9 @@
}
mStatus = NO_ERROR;
- mActive = false;
mUserData = user;
// TODO: add audio hardware input latency here
- mLatency = (1000*mFrameCount) / sampleRate;
+ mLatency = (1000 * mFrameCount) / mSampleRate;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index ef0ccc2..e70c611 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -163,6 +163,7 @@
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -192,6 +193,7 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -221,6 +223,7 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -477,7 +480,6 @@
}
mStatus = NO_ERROR;
- mState = STATE_STOPPED;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -552,19 +554,6 @@
mNewPosition = mPosition + mUpdatePeriod;
int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
- sp<AudioTrackThread> t = mAudioTrackThread;
- if (t != 0) {
- if (previousState == STATE_STOPPING) {
- mProxy->interrupt();
- } else {
- t->resume();
- }
- } else {
- mPreviousPriority = getpriority(PRIO_PROCESS, 0);
- get_sched_policy(0, &mPreviousSchedulingGroup);
- androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
- }
-
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
status = mAudioTrack->start();
@@ -576,7 +565,21 @@
status = restoreTrack_l("start");
}
- if (status != NO_ERROR) {
+ // resume or pause the callback thread as needed.
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (status == NO_ERROR) {
+ if (t != 0) {
+ if (previousState == STATE_STOPPING) {
+ mProxy->interrupt();
+ } else {
+ t->resume();
+ }
+ } else {
+ mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+ get_sched_policy(0, &mPreviousSchedulingGroup);
+ androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+ }
+ } else {
ALOGE("start() status %d", status);
mState = previousState;
if (t != 0) {
@@ -1013,7 +1016,11 @@
}
AutoMutex lock(mLock);
- if (isOffloadedOrDirect_l()) {
+ // FIXME: offloaded and direct tracks call into the HAL for render positions
+ // for compressed/synced data; however, we use proxy position for pure linear pcm data
+ // as we do not know the capability of the HAL for pcm position support and standby.
+ // There may be some latency differences between the HAL position and the proxy position.
+ if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
uint32_t dspFrames = 0;
if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 285b3f6..2396d87 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -88,13 +88,14 @@
uint32_t ClientProxy::setBufferSizeInFrames(uint32_t size)
{
- // TODO set minimum to 2X the fast mixer buffer size.
// The minimum should be greater than zero and less than the size
// at which underruns will occur.
- const uint32_t minimum = 128 * 2; // arbitrary
+ const uint32_t minimum = 16; // based on AudioMixer::BLOCKSIZE
const uint32_t maximum = frameCount();
uint32_t clippedSize = size;
- if (clippedSize < minimum) {
+ if (maximum < minimum) {
+ clippedSize = maximum;
+ } else if (clippedSize < minimum) {
clippedSize = minimum;
} else if (clippedSize > maximum) {
clippedSize = maximum;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 042eac5..7543b60 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -1355,7 +1355,7 @@
CHECK_INTERFACE(IAudioFlinger, data, reply);
struct audio_patch patch;
data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle = {};
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
ALOGE("b/23905951");
}
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 16e8f11..4ea67da 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -965,7 +965,7 @@
audio_channel_mask_t channelMask = data.readInt32();
audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
- audio_io_handle_t input = {};
+ audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
status_t status = getInputForAttr(&attr, &input, session, uid,
samplingRate, format, channelMask,
flags, selectedDeviceId);
@@ -1197,7 +1197,7 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_patch patch;
data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle = {};
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
ALOGE("b/23912202");
}
@@ -1275,9 +1275,9 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
data.readStrongBinder());
- audio_session_t session = {};
- audio_io_handle_t ioHandle = {};
- audio_devices_t device = {};
+ audio_session_t session = AUDIO_SESSION_NONE;
+ audio_io_handle_t ioHandle = AUDIO_IO_HANDLE_NONE;
+ audio_devices_t device = AUDIO_DEVICE_NONE;
status_t status = acquireSoundTriggerSession(&session, &ioHandle, &device);
reply->writeInt32(status);
if (status == NO_ERROR) {
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index ea4a966..61fba35 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -718,11 +718,12 @@
void *params = NULL;
size_t pageSize = 0;
size_t allocSize = 0;
- if ((index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits && size < 4) ||
- (code != SET_INTERNAL_OPTION && size < 8)) {
+ bool isUsageBits = (index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits);
+ if ((isUsageBits && size < 4) ||
+ (!isUsageBits && code != SET_INTERNAL_OPTION && size < 8)) {
// we expect the structure to contain at least the size and
// version, 8 bytes total
- ALOGE("b/27207275 (%zu)", size);
+ ALOGE("b/27207275 (%zu) (%d/%d)", size, int(index), int(code));
android_errorWriteLog(0x534e4554, "27207275");
} else {
err = NO_MEMORY;
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 53b6df6..2795101 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -207,7 +207,7 @@
ALOGV("invoke %zu", request.dataSize());
return mPlayer->invoke(request, reply);
}
- ALOGE("invoke failed: wrong state %X", mCurrentState);
+ ALOGE("invoke failed: wrong state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -252,7 +252,7 @@
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
- ALOGE("prepareAsync called in state %d", mCurrentState);
+ ALOGE("prepareAsync called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -318,7 +318,7 @@
}
}
} else {
- ALOGE("start called in state %d", mCurrentState);
+ ALOGE("start called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
ret = INVALID_OPERATION;
}
@@ -342,7 +342,7 @@
}
return ret;
}
- ALOGE("stop called in state %d", mCurrentState);
+ ALOGE("stop called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -361,7 +361,7 @@
}
return ret;
}
- ALOGE("pause called in state %d", mCurrentState);
+ ALOGE("pause called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -484,7 +484,8 @@
}
return ret;
}
- ALOGE("Attempt to call getDuration without a valid mediaplayer");
+ ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
+ mPlayer.get(), mCurrentState);
return INVALID_OPERATION;
}
@@ -691,7 +692,7 @@
if (mPlayer == 0 ||
(mCurrentState & MEDIA_PLAYER_IDLE) ||
(mCurrentState == MEDIA_PLAYER_STATE_ERROR )) {
- ALOGE("attachAuxEffect called in state %d", mCurrentState);
+ ALOGE("attachAuxEffect called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -906,6 +907,7 @@
}
status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
+ Mutex::Autolock _l(mLock);
if (mPlayer == NULL) {
return NO_INIT;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 6c54e3f..42a82ac 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1150,8 +1150,8 @@
}
restartAudio(
- positionUs, false /* forceNonOffload */,
- reason == Renderer::kDueToError /* needsToCreateAudioDecoder */);
+ positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
+ reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
}
break;
}
@@ -1490,9 +1490,11 @@
void NuPlayer::restartAudio(
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
- mAudioDecoder->pause();
- mAudioDecoder.clear();
- ++mAudioDecoderGeneration;
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->pause();
+ mAudioDecoder.clear();
+ ++mAudioDecoderGeneration;
+ }
if (mFlushingAudio == FLUSHING_DECODER) {
mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
mFlushingAudio = FLUSHED;
@@ -1520,7 +1522,7 @@
mOffloadAudio = false;
}
if (needsToCreateAudioDecoder) {
- instantiateDecoder(true /* audio */, &mAudioDecoder);
+ instantiateDecoder(true /* audio */, &mAudioDecoder, !forceNonOffload);
}
}
@@ -1557,7 +1559,8 @@
}
}
-status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
+status_t NuPlayer::instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
// The audio decoder could be cleared by tear down. If still in shut down
// process, no need to create a new audio decoder.
if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
@@ -1605,7 +1608,9 @@
++mAudioDecoderGeneration;
notify->setInt32("generation", mAudioDecoderGeneration);
- determineAudioModeChange();
+ if (checkAudioModeChange) {
+ determineAudioModeChange();
+ }
if (mOffloadAudio) {
mSource->setOffloadAudio(true /* offload */);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index a55aa5f..369590b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -234,10 +234,11 @@
void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
void closeAudioSink();
void restartAudio(
- int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
+ int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
void determineAudioModeChange();
- status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
+ status_t instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange = true);
status_t onInstantiateSecureDecoders();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 332fef6..06bb53d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -45,8 +45,7 @@
mPlayerFlags(0),
mAtEOS(false),
mLooping(false),
- mAutoLoop(false),
- mStartupSeekTimeUs(-1) {
+ mAutoLoop(false) {
ALOGV("NuPlayerDriver(%p)", this);
mLooper->setName("NuPlayerDriver Looper");
@@ -261,25 +260,11 @@
case STATE_PAUSED:
case STATE_STOPPED_AND_PREPARED:
- {
- if (mAtEOS && mStartupSeekTimeUs < 0) {
- mStartupSeekTimeUs = 0;
- mPositionUs = -1;
- }
-
- // fall through
- }
-
case STATE_PREPARED:
{
- mAtEOS = false;
mPlayer->start();
- if (mStartupSeekTimeUs >= 0) {
- mPlayer->seekToAsync(mStartupSeekTimeUs);
- mStartupSeekTimeUs = -1;
- }
- break;
+ // fall through
}
case STATE_RUNNING:
@@ -330,6 +315,7 @@
}
status_t NuPlayerDriver::pause() {
+ ALOGD("pause(%p)", this);
// The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
// down for audio offload mode. If that happens, the NuPlayerRenderer will no longer know the
// current position. So similar to seekTo, update |mPositionUs| to the pause position by calling
@@ -400,8 +386,6 @@
case STATE_PREPARED:
case STATE_STOPPED_AND_PREPARED:
case STATE_PAUSED:
- mStartupSeekTimeUs = seekTimeUs;
- // fall through.
case STATE_RUNNING:
{
mAtEOS = false;
@@ -502,7 +486,6 @@
mDurationUs = -1;
mPositionUs = -1;
- mStartupSeekTimeUs = -1;
mLooping = false;
return OK;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index d009fd7..d5b4ba1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -122,8 +122,6 @@
bool mLooping;
bool mAutoLoop;
- int64_t mStartupSeekTimeUs;
-
status_t prepare_l();
void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 0e6a6e6..cbb9d95 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -647,7 +647,10 @@
case kWhatAudioTearDown:
{
- onAudioTearDown(kDueToError);
+ int32_t reason;
+ CHECK(msg->findInt32("reason", &reason));
+
+ onAudioTearDown((AudioTearDownReason)reason);
break;
}
@@ -741,7 +744,7 @@
case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
{
ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
- me->notifyAudioTearDown();
+ me->notifyAudioTearDown(kDueToError);
break;
}
}
@@ -946,7 +949,7 @@
ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
// This can only happen when AudioSink was opened with doNotReconnect flag set to
// true, in which case the NuPlayer will handle the reconnect.
- notifyAudioTearDown();
+ notifyAudioTearDown(kDueToError);
}
break;
}
@@ -1299,8 +1302,10 @@
notify->post(delayUs);
}
-void NuPlayer::Renderer::notifyAudioTearDown() {
- (new AMessage(kWhatAudioTearDown, this))->post();
+void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
+ sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
+ msg->setInt32("reason", reason);
+ msg->post();
}
void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
@@ -1630,7 +1635,7 @@
status_t err = mAudioSink->start();
if (err != OK) {
ALOGE("cannot start AudioSink err %d", err);
- notifyAudioTearDown();
+ notifyAudioTearDown(kDueToError);
}
}
@@ -1823,6 +1828,9 @@
onDisableOffloadAudio();
mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
ALOGV("openAudioSink: offload failed");
+ if (offloadOnly) {
+ notifyAudioTearDown(kForceNonOffload);
+ }
} else {
mUseAudioCallback = true; // offload mode transfers data through callback
++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index c3ce511..004e21c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -92,8 +92,9 @@
};
enum AudioTearDownReason {
- kDueToError = 0,
+ kDueToError = 0, // Could restart with either offload or non-offload.
kDueToTimeout,
+ kForceNonOffload, // Restart only with non-offload.
};
protected:
@@ -262,7 +263,7 @@
void notifyPosition();
void notifyVideoLateBy(int64_t lateByUs);
void notifyVideoRenderingStart();
- void notifyAudioTearDown();
+ void notifyAudioTearDown(AudioTearDownReason reason);
void flushQueue(List<QueueEntry> *queue);
bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
index f53afbd..ee70306 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
@@ -144,8 +144,17 @@
copy = size;
}
+ if (entry->mIndex >= mBuffers.size()) {
+ return ERROR_MALFORMED;
+ }
+
+ sp<IMemory> mem = mBuffers.editItemAt(entry->mIndex);
+ if (mem == NULL || mem->size() < copy || mem->size() - copy < entry->mOffset) {
+ return ERROR_MALFORMED;
+ }
+
memcpy(data,
- (const uint8_t *)mBuffers.editItemAt(entry->mIndex)->pointer()
+ (const uint8_t *)mem->pointer()
+ entry->mOffset,
copy);
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 16c5040..e2f416b 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -20,9 +20,6 @@
#LOCAL_C_INCLUDES += path/to/libsndfile/src
#LOCAL_STATIC_LIBRARIES += libsndfile
-# uncomment for systrace
-# LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_AUDIO
-
LOCAL_MODULE := libnbaio
LOCAL_SHARED_LIBRARIES := \
@@ -34,4 +31,6 @@
LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index b096903..a879647 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -36,7 +36,12 @@
PipeReader::~PipeReader()
{
- int32_t readers = android_atomic_dec(&mPipe.mReaders);
+#if !LOG_NDEBUG
+ int32_t readers =
+#else
+ (void)
+#endif
+ android_atomic_dec(&mPipe.mReaders);
ALOG_ASSERT(readers > 0);
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 520f599..d6a9f53 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -52,6 +52,7 @@
#include <OMX_AsString.h>
#include "include/avc_utils.h"
+#include "include/DataConverter.h"
#include "omx/OMXUtils.h"
namespace android {
@@ -114,6 +115,13 @@
DISALLOW_EVIL_CONSTRUCTORS(MessageList);
};
+static sp<DataConverter> getCopyConverter() {
+ static pthread_once_t once = PTHREAD_ONCE_INIT; // const-inited
+ static sp<DataConverter> sCopyConverter; // zero-inited
+ pthread_once(&once, [](){ sCopyConverter = new DataConverter(); });
+ return sCopyConverter;
+}
+
struct CodecObserver : public BnOMXObserver {
CodecObserver() {}
@@ -505,6 +513,7 @@
mOutputMetadataType(kMetadataBufferTypeInvalid),
mLegacyAdaptiveExperiment(false),
mMetadataBuffersToSubmit(0),
+ mNumUndequeuedBuffers(0),
mRepeatFrameDelayUs(-1ll),
mMaxPtsGapUs(-1ll),
mMaxFps(-1),
@@ -781,7 +790,7 @@
if (err == OK) {
MetadataBufferType type =
portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
- int32_t bufSize = def.nBufferSize;
+ size_t bufSize = def.nBufferSize;
if (type == kMetadataBufferTypeGrallocSource) {
bufSize = sizeof(VideoGrallocMetadata);
} else if (type == kMetadataBufferTypeANWBuffer) {
@@ -792,23 +801,47 @@
// metadata size as we prefer to generate native source metadata, but component
// may require gralloc source. For camera source, allocate at least enough
// size for native metadata buffers.
- int32_t allottedSize = bufSize;
+ size_t allottedSize = bufSize;
if (portIndex == kPortIndexInput && type >= kMetadataBufferTypeGrallocSource) {
bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
} else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
- bufSize = max(bufSize, (int32_t)sizeof(VideoNativeMetadata));
+ bufSize = max(bufSize, sizeof(VideoNativeMetadata));
}
- ALOGV("[%s] Allocating %u buffers of size %d/%d (from %u using %s) on %s port",
+ size_t conversionBufferSize = 0;
+
+ sp<DataConverter> converter = mConverter[portIndex];
+ if (converter != NULL) {
+ // here we assume sane conversions of max 4:1, so result fits in int32
+ if (portIndex == kPortIndexInput) {
+ conversionBufferSize = converter->sourceSize(bufSize);
+ } else {
+ conversionBufferSize = converter->targetSize(bufSize);
+ }
+ }
+
+ size_t alignment = MemoryDealer::getAllocationAlignment();
+
+ ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
mComponentName.c_str(),
def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
portIndex == kPortIndexInput ? "input" : "output");
- if (bufSize == 0 || def.nBufferCountActual > SIZE_MAX / bufSize) {
+ // verify buffer sizes to avoid overflow in align()
+ if (bufSize == 0 || max(bufSize, conversionBufferSize) > kMaxCodecBufferSize) {
ALOGE("b/22885421");
return NO_MEMORY;
}
- size_t totalSize = def.nBufferCountActual * bufSize;
+
+ // don't modify bufSize as OMX may not expect it to increase after negotiation
+ size_t alignedSize = align(bufSize, alignment);
+ size_t alignedConvSize = align(conversionBufferSize, alignment);
+ if (def.nBufferCountActual > SIZE_MAX / (alignedSize + alignedConvSize)) {
+ ALOGE("b/22885421");
+ return NO_MEMORY;
+ }
+
+ size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
@@ -847,6 +880,7 @@
// because Widevine source only receives these base addresses.
info.mData = new ABuffer(ptr != NULL ? ptr : (void *)native_handle, bufSize);
info.mNativeHandle = NativeHandle::create(native_handle, true /* ownsHandle */);
+ info.mCodecData = info.mData;
} else if (mQuirks & requiresAllocateBufferBit) {
err = mOMX->allocateBufferWithBackup(
mNode, portIndex, mem, &info.mBufferID, allottedSize);
@@ -855,11 +889,27 @@
}
if (mem != NULL) {
- info.mData = new ABuffer(mem->pointer(), bufSize);
+ info.mCodecData = new ABuffer(mem->pointer(), bufSize);
+ info.mCodecRef = mem;
+
if (type == kMetadataBufferTypeANWBuffer) {
((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
}
- info.mMemRef = mem;
+
+ // if we require conversion, allocate conversion buffer for client use;
+ // otherwise, reuse codec buffer
+ if (mConverter[portIndex] != NULL) {
+ CHECK_GT(conversionBufferSize, (size_t)0);
+ mem = mDealer[portIndex]->allocate(conversionBufferSize);
+ if (mem == NULL|| mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+ info.mData = new ABuffer(mem->pointer(), conversionBufferSize);
+ info.mMemRef = mem;
+ } else {
+ info.mData = info.mCodecData;
+ info.mMemRef = info.mCodecRef;
+ }
}
mBuffers[portIndex].push(info);
@@ -1052,6 +1102,7 @@
info.mIsReadFence = false;
info.mRenderInfo = NULL;
info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
+ info.mCodecData = info.mData;
info.mGraphicBuffer = graphicBuffer;
mBuffers[kPortIndexOutput].push(info);
@@ -1116,7 +1167,7 @@
size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
- size_t totalSize = bufferCount * bufSize;
+ size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
// Dequeue buffers and send them to OMX
@@ -1136,11 +1187,13 @@
((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
}
info.mData = new ABuffer(mem->pointer(), mem->size());
+ info.mMemRef = mem;
+ info.mCodecData = info.mData;
+ info.mCodecRef = mem;
// we use useBuffer for metadata regardless of quirks
err = mOMX->useBuffer(
mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
- info.mMemRef = mem;
mBuffers[kPortIndexOutput].push(info);
ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
@@ -1934,6 +1987,10 @@
}
}
+ AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+ (void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+ // invalid encodings will default to PCM-16bit in setupRawAudioFormat.
+
if (video) {
// determine need for software renderer
bool usingSwRenderer = false;
@@ -2138,7 +2195,7 @@
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
@@ -2200,6 +2257,7 @@
err = setOperatingRate(rateFloat, video);
}
+ // NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
mBaseOutputFormat = outputFormat;
// trigger a kWhatOutputFormatChanged msg on first buffer
mLastOutputFormat.clear();
@@ -2212,6 +2270,25 @@
mOutputFormat = outputFormat;
}
}
+
+ // create data converters if needed
+ if (!video && err == OK) {
+ AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+ if (encoder) {
+ (void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+ mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
+ if (mConverter[kPortIndexInput] != NULL) {
+ mInputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ } else {
+ (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+ mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+ if (mConverter[kPortIndexOutput] != NULL) {
+ mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ }
+ }
+
return err;
}
@@ -2762,7 +2839,7 @@
}
status_t ACodec::setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels, AudioEncoding encoding) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
@@ -2795,9 +2872,23 @@
}
pcmParams.nChannels = numChannels;
- pcmParams.eNumData = OMX_NumericalDataSigned;
+ switch (encoding) {
+ case kAudioEncodingPcm8bit:
+ pcmParams.eNumData = OMX_NumericalDataUnsigned;
+ pcmParams.nBitPerSample = 8;
+ break;
+ case kAudioEncodingPcmFloat:
+ pcmParams.eNumData = OMX_NumericalDataFloat;
+ pcmParams.nBitPerSample = 32;
+ break;
+ case kAudioEncodingPcm16bit:
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.nBitPerSample = 16;
+ break;
+ default:
+ return BAD_VALUE;
+ }
pcmParams.bInterleaved = OMX_TRUE;
- pcmParams.nBitPerSample = 16;
pcmParams.nSamplingRate = sampleRate;
pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
@@ -2805,8 +2896,17 @@
return OMX_ErrorNone;
}
- return mOMX->setParameter(
+ err = mOMX->setParameter(
mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ // if we could not set up raw format to non-16-bit, try with 16-bit
+ // NOTE: we will also verify this via readback, in case codec ignores these fields
+ if (err != OK && encoding != kAudioEncodingPcm16bit) {
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.nBitPerSample = 16;
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ }
+ return err;
}
status_t ACodec::configureTunneledVideoPlayback(
@@ -3847,6 +3947,13 @@
h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level);
}
+ // XXX
+ if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
+ ALOGW("Use baseline profile instead of %d for AVC recording",
+ h264type.eProfile);
+ h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
+ }
+
if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
@@ -3864,23 +3971,6 @@
h264type.bDirect8x8Inference = OMX_FALSE;
h264type.bDirectSpatialTemporal = OMX_FALSE;
h264type.nCabacInitIdc = 0;
- } else if (h264type.eProfile == OMX_VIDEO_AVCProfileMain ||
- h264type.eProfile == OMX_VIDEO_AVCProfileHigh) {
- h264type.nSliceHeaderSpacing = 0;
- h264type.bUseHadamard = OMX_TRUE;
- h264type.nRefFrames = 2;
- h264type.nBFrames = 1;
- h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
- h264type.nAllowedPictureTypes =
- OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
- h264type.nRefIdx10ActiveMinus1 = 0;
- h264type.nRefIdx11ActiveMinus1 = 0;
- h264type.bEntropyCodingCABAC = OMX_TRUE;
- h264type.bWeightedPPrediction = OMX_TRUE;
- h264type.bconstIpred = OMX_TRUE;
- h264type.bDirect8x8Inference = OMX_TRUE;
- h264type.bDirectSpatialTemporal = OMX_TRUE;
- h264type.nCabacInitIdc = 1;
}
if (h264type.nBFrames != 0) {
@@ -4643,15 +4733,11 @@
if (params.nChannels <= 0
|| (params.nChannels != 1 && !params.bInterleaved)
- || params.nBitPerSample != 16u
- || params.eNumData != OMX_NumericalDataSigned
|| params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
- ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
+ ALOGE("unsupported PCM port: %u channels%s, %u-bit",
params.nChannels,
params.bInterleaved ? " interleaved" : "",
- params.nBitPerSample,
- asString(params.eNumData), params.eNumData,
- asString(params.ePCMMode), params.ePCMMode);
+ params.nBitPerSample);
return FAILED_TRANSACTION;
}
@@ -4659,6 +4745,22 @@
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ AudioEncoding encoding = kAudioEncodingPcm16bit;
+ if (params.eNumData == OMX_NumericalDataUnsigned
+ && params.nBitPerSample == 8u) {
+ encoding = kAudioEncodingPcm8bit;
+ } else if (params.eNumData == OMX_NumericalDataFloat
+ && params.nBitPerSample == 32u) {
+ encoding = kAudioEncodingPcmFloat;
+ } else if (params.nBitPerSample != 16u
+ || params.eNumData != OMX_NumericalDataSigned) {
+ ALOGE("unsupported PCM port: %s(%d), %s(%d) mode ",
+ asString(params.eNumData), params.eNumData,
+ asString(params.ePCMMode), params.ePCMMode);
+ return FAILED_TRANSACTION;
+ }
+ notify->setInt32("pcm-encoding", encoding);
+
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
}
@@ -4840,6 +4942,7 @@
notify->setString("mime", mime);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ notify->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
break;
}
@@ -4916,8 +5019,8 @@
transfer, asString((ColorTransfer)transfer));
}
-void ACodec::onOutputFormatChanged() {
- // store new output format
+void ACodec::onOutputFormatChanged(sp<const AMessage> expectedFormat) {
+ // store new output format, at the same time mark that this is no longer the first frame
mOutputFormat = mBaseOutputFormat->dup();
if (getPortFormat(kPortIndexOutput, mOutputFormat) != OK) {
@@ -4925,6 +5028,28 @@
return;
}
+ if (expectedFormat != NULL) {
+ sp<const AMessage> changes = expectedFormat->changesFrom(mOutputFormat);
+ sp<const AMessage> to = mOutputFormat->changesFrom(expectedFormat);
+ if (changes->countEntries() != 0 || to->countEntries() != 0) {
+ ALOGW("[%s] BAD CODEC: Output format changed unexpectedly from (diff) %s to (diff) %s",
+ mComponentName.c_str(),
+ changes->debugString(4).c_str(), to->debugString(4).c_str());
+ }
+ }
+
+ if (!mIsVideo && !mIsEncoder) {
+ AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+ (void)mConfigFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+ AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+ (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+
+ mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+ if (mConverter[kPortIndexOutput] != NULL) {
+ mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ }
+
if (mTunneled) {
sendFormatChange();
}
@@ -5452,20 +5577,21 @@
flags |= OMX_BUFFERFLAG_EOS;
}
- if (buffer != info->mData) {
+ if (buffer != info->mCodecData) {
ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
mCodec->mComponentName.c_str(),
bufferID,
- buffer.get(), info->mData.get());
+ buffer.get(), info->mCodecData.get());
- if (buffer->size() > info->mData->capacity()) {
- ALOGE("data size (%zu) is greated than buffer capacity (%zu)",
- buffer->size(), // this is the data received
- info->mData->capacity()); // this is out buffer size
- mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+ sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
+ if (converter == NULL) {
+ converter = getCopyConverter();
+ }
+ status_t err = converter->convert(buffer, info->mCodecData);
+ if (err != OK) {
+ mCodec->signalError(OMX_ErrorUndefined, err);
return;
}
- memcpy(info->mData->data(), buffer->data(), buffer->size());
}
if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -5508,7 +5634,7 @@
mCodec->mNode,
bufferID,
0,
- buffer->size(),
+ info->mCodecData->size(),
flags,
timeUs,
info->mFenceFd);
@@ -5687,6 +5813,10 @@
new AMessage(kWhatOutputBufferDrained, mCodec);
if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
+ // pretend that output format has changed on the first frame (we used to do this)
+ if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) {
+ mCodec->onOutputFormatChanged(mCodec->mOutputFormat);
+ }
mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
mCodec->sendFormatChange();
}
@@ -5710,8 +5840,17 @@
info->mData->meta()->setPointer("handle", handle);
info->mData->meta()->setInt32("rangeOffset", rangeOffset);
info->mData->meta()->setInt32("rangeLength", rangeLength);
- } else {
+ } else if (info->mData == info->mCodecData) {
info->mData->setRange(rangeOffset, rangeLength);
+ } else {
+ info->mCodecData->setRange(rangeOffset, rangeLength);
+ // in this case we know that mConverter is not null
+ status_t err = mCodec->mConverter[kPortIndexOutput]->convert(
+ info->mCodecData, info->mData);
+ if (err != OK) {
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+ return true;
+ }
}
#if 0
if (mCodec->mNativeWindow == NULL) {
@@ -5927,6 +6066,8 @@
mCodec->mFlags = 0;
mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mCodec->mConverter[0].clear();
+ mCodec->mConverter[1].clear();
mCodec->mComponentName.clear();
}
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 557971d..2445842 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -14,6 +14,7 @@
CameraSource.cpp \
CameraSourceTimeLapse.cpp \
CodecBase.cpp \
+ DataConverter.cpp \
DataSource.cpp \
DataURISource.cpp \
DRMExtractor.cpp \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index cb42847..b3fb8d4 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -25,6 +25,7 @@
#include <media/AudioTrack.h>
#include <media/openmax/OMX_Audio.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/MediaDefs.h>
@@ -71,6 +72,14 @@
mSource = source;
}
+ALookup<audio_format_t, int32_t> sAudioFormatToPcmEncoding {
+ {
+ { AUDIO_FORMAT_PCM_16_BIT, kAudioEncodingPcm16bit },
+ { AUDIO_FORMAT_PCM_8_BIT, kAudioEncodingPcm8bit },
+ { AUDIO_FORMAT_PCM_FLOAT, kAudioEncodingPcmFloat },
+ }
+};
+
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
@@ -129,6 +138,10 @@
}
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+ int32_t pcmEncoding;
+ if (format->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+ sAudioFormatToPcmEncoding.map(pcmEncoding, &audioFormat);
+ }
if (useOffload()) {
if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 6e4a1dd..f28ac58 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -184,6 +184,7 @@
meta->setInt32(kKeySampleRate, mSampleRate);
meta->setInt32(kKeyChannelCount, mRecord->channelCount());
meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
+ meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
return meta;
}
diff --git a/media/libstagefright/DataConverter.cpp b/media/libstagefright/DataConverter.cpp
new file mode 100644
index 0000000..aea47f3
--- /dev/null
+++ b/media/libstagefright/DataConverter.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataConverter"
+
+#include "include/DataConverter.h"
+
+#include <audio_utils/primitives.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+namespace android {
+
+status_t DataConverter::convert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+ CHECK(source->base() != target->base());
+ size_t size = targetSize(source->size());
+ status_t err = OK;
+ if (size > target->capacity()) {
+ ALOGE("data size (%zu) is greater than buffer capacity (%zu)",
+ size, // this is the data received/to be converted
+ target->capacity()); // this is out buffer size
+ err = FAILED_TRANSACTION;
+ } else {
+ err = safeConvert(source, target);
+ }
+ target->setRange(0, err == OK ? size : 0);
+ return err;
+}
+
+status_t DataConverter::safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+ memcpy(target->base(), source->data(), source->size());
+ return OK;
+}
+
+size_t DataConverter::sourceSize(size_t targetSize) {
+ return targetSize;
+}
+
+size_t DataConverter::targetSize(size_t sourceSize) {
+ return sourceSize;
+}
+
+DataConverter::~DataConverter() { }
+
+
+size_t SampleConverterBase::sourceSize(size_t targetSize) {
+ size_t numSamples = targetSize / mTargetSampleSize;
+ if (numSamples > SIZE_MAX / mSourceSampleSize) {
+ ALOGW("limiting source size due to overflow (%zu*%zu/%zu)",
+ targetSize, mSourceSampleSize, mTargetSampleSize);
+ return SIZE_MAX;
+ }
+ return numSamples * mSourceSampleSize;
+}
+
+size_t SampleConverterBase::targetSize(size_t sourceSize) {
+ // we round up on conversion
+ size_t numSamples = divUp(sourceSize, (size_t)mSourceSampleSize);
+ if (numSamples > SIZE_MAX / mTargetSampleSize) {
+ ALOGW("limiting target size due to overflow (%zu*%zu/%zu)",
+ sourceSize, mTargetSampleSize, mSourceSampleSize);
+ return SIZE_MAX;
+ }
+ return numSamples * mTargetSampleSize;
+}
+
+
+static size_t getAudioSampleSize(AudioEncoding e) {
+ switch (e) {
+ case kAudioEncodingPcm16bit: return 2;
+ case kAudioEncodingPcm8bit: return 1;
+ case kAudioEncodingPcmFloat: return 4;
+ default: return 0;
+ }
+}
+
+
+// static
+AudioConverter* AudioConverter::Create(AudioEncoding source, AudioEncoding target) {
+ uint32_t sourceSampleSize = getAudioSampleSize(source);
+ uint32_t targetSampleSize = getAudioSampleSize(target);
+ if (sourceSampleSize && targetSampleSize && sourceSampleSize != targetSampleSize) {
+ return new AudioConverter(source, sourceSampleSize, target, targetSampleSize);
+ }
+ return NULL;
+}
+
+status_t AudioConverter::safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt) {
+ if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcm16bit) {
+ memcpy_to_u8_from_i16((uint8_t*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+ } else if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcmFloat) {
+ memcpy_to_u8_from_float((uint8_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+ } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcm8bit) {
+ memcpy_to_i16_from_u8((int16_t*)tgt->base(), (const uint8_t*)src->data(), src->size());
+ } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcmFloat) {
+ memcpy_to_i16_from_float((int16_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+ } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm8bit) {
+ memcpy_to_float_from_u8((float*)tgt->base(), (const uint8_t*)src->data(), src->size());
+ } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm16bit) {
+ memcpy_to_float_from_i16((float*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+ } else {
+ return INVALID_OPERATION;
+ }
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 6e99d02..13b66f3 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -615,6 +615,7 @@
mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
mTrackMetadata->setInt32(kKeyChannelCount, getChannels());
mTrackMetadata->setInt32(kKeySampleRate, getSampleRate());
+ mTrackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
// sample rate is non-zero, so division by zero not possible
mTrackMetadata->setInt64(kKeyDuration,
(getTotalSamples() * 1000000LL) / getSampleRate());
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 322eab9..f5549e4 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -756,13 +756,22 @@
}
// Given a time in seconds since Jan 1 1904, produce a human-readable string.
-static void convertTimeToDate(int64_t time_1904, String8 *s) {
- time_t time_1970 = time_1904 - (((66 * 365 + 17) * 24) * 3600);
+static bool convertTimeToDate(int64_t time_1904, String8 *s) {
+ // delta between mpeg4 time and unix epoch time
+ static const int64_t delta = (((66 * 365 + 17) * 24) * 3600);
+ if (time_1904 < INT64_MIN + delta) {
+ return false;
+ }
+ time_t time_1970 = time_1904 - delta;
char tmp[32];
- strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", gmtime(&time_1970));
-
- s->setTo(tmp);
+ struct tm* tm = gmtime(&time_1970);
+ if (tm != NULL &&
+ strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", tm) > 0) {
+ s->setTo(tmp);
+ return true;
+ }
+ return false;
}
status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
@@ -1880,14 +1889,15 @@
}
duration = d32;
}
- if (duration != 0 && mHeaderTimescale != 0) {
+ if (duration != 0 && mHeaderTimescale != 0 && duration < UINT64_MAX / 1000000) {
mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
}
String8 s;
- convertTimeToDate(creationTime, &s);
+ if (convertTimeToDate(creationTime, &s)) {
+ mFileMetaData->setCString(kKeyDate, s.string());
+ }
- mFileMetaData->setCString(kKeyDate, s.string());
break;
}
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0a052d2..1acfca0 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -358,6 +358,11 @@
return OK;
}
+sp<MetaData> MediaCodecSource::getFormat() {
+ Mutexed<sp<MetaData>>::Locked meta(mMeta);
+ return *meta;
+}
+
sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
return mGraphicBufferProducer;
@@ -493,7 +498,9 @@
}
mEncoder->getOutputFormat(&mOutputFormat);
- convertMessageToMetaData(mOutputFormat, mMeta);
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(mOutputFormat, meta);
+ mMeta.lock().set(meta);
if (mFlags & FLAG_USE_SURFACE_INPUT) {
CHECK(mIsVideo);
@@ -787,7 +794,9 @@
signalEOS(err);
break;
}
- convertMessageToMetaData(mOutputFormat, mMeta);
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(mOutputFormat, meta);
+ mMeta.lock().set(meta);
} else if (cbID == MediaCodec::CB_OUTPUT_AVAILABLE) {
int32_t index;
size_t offset;
diff --git a/media/libstagefright/MidiExtractor.cpp b/media/libstagefright/MidiExtractor.cpp
index 7525f57..7930bbb 100644
--- a/media/libstagefright/MidiExtractor.cpp
+++ b/media/libstagefright/MidiExtractor.cpp
@@ -178,6 +178,7 @@
mEasConfig = EAS_Config();
trackMetadata->setInt32(kKeySampleRate, mEasConfig->sampleRate);
trackMetadata->setInt32(kKeyChannelCount, mEasConfig->numChannels);
+ trackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
}
mIsInitialized = true;
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index dd7f6b9..6d1a460 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -121,9 +121,10 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = dataSource;
-
- updateDurationAndBitrate();
+ status_t err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = dataSource;
+ }
return OK;
}
@@ -152,9 +153,10 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = fileSource;
-
- updateDurationAndBitrate();
+ err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = fileSource;
+ }
return OK;
}
@@ -177,14 +179,19 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = source;
+ err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = source;
+ }
- updateDurationAndBitrate();
-
- return OK;
+ return err;
}
-void NuMediaExtractor::updateDurationAndBitrate() {
+status_t NuMediaExtractor::updateDurationAndBitrate() {
+ if (mImpl->countTracks() > kMaxTrackCount) {
+ return ERROR_UNSUPPORTED;
+ }
+
mTotalBitrate = 0ll;
mDurationUs = -1ll;
@@ -212,6 +219,7 @@
mDurationUs = durationUs;
}
}
+ return OK;
}
size_t NuMediaExtractor::countTracks() const {
@@ -235,6 +243,12 @@
}
sp<MetaData> meta = mImpl->getTrackMetaData(index);
+ // Extractors either support trackID-s or not, so either all tracks have trackIDs or none.
+ // Generate trackID if missing.
+ int32_t trackID;
+ if (meta != NULL && !meta->findInt32(kKeyTrackID, &trackID)) {
+ meta->setInt32(kKeyTrackID, (int32_t)index + 1);
+ }
return convertMetaDataToMessage(meta, format);
}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index f5d9ec7..1bdd812 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -298,6 +298,7 @@
mDefaultSampleSize = U32_AT(&header[4]);
mNumSampleSizes = U32_AT(&header[8]);
if (mNumSampleSizes > (UINT32_MAX - 12) / 16) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
return ERROR_MALFORMED;
}
@@ -532,6 +533,9 @@
Mutex::Autolock autoLock(mLock);
if (mSampleTimeEntries != NULL || mNumSampleSizes == 0) {
+ if (mNumSampleSizes == 0) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
+ }
return;
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 34deaad..7daae20 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -167,6 +167,12 @@
msg->setInt32("is-sync-frame", 1);
}
+ // this only needs to be translated from meta to message as it is an extractor key
+ int32_t trackID;
+ if (meta->findInt32(kKeyTrackID, &trackID)) {
+ msg->setInt32("track-id", trackID);
+ }
+
if (!strncasecmp("video/", mime, 6)) {
int32_t width, height;
if (!meta->findInt32(kKeyWidth, &width)
@@ -232,13 +238,18 @@
int32_t isADTS;
if (meta->findInt32(kKeyIsADTS, &isADTS)) {
- msg->setInt32("is-adts", true);
+ msg->setInt32("is-adts", isADTS);
}
int32_t aacProfile = -1;
if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
msg->setInt32("aac-profile", aacProfile);
}
+
+ int32_t pcmEncoding;
+ if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+ msg->setInt32("pcm-encoding", pcmEncoding);
+ }
}
int32_t maxInputSize;
@@ -794,6 +805,11 @@
if (msg->findInt32("is-adts", &isADTS)) {
meta->setInt32(kKeyIsADTS, isADTS);
}
+
+ int32_t pcmEncoding;
+ if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
+ meta->setInt32(kKeyPcmEncoding, pcmEncoding);
+ }
}
int32_t maxInputSize;
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 5564926..03226c7 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -156,12 +156,12 @@
lastTime = time;
}
- int64_t div = numSamplesToUse * sumXX - sumX * sumX;
+ int64_t div = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
if (div == 0) {
return false;
}
- int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY;
+ int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
int64_t b_nom = sumXX * sumY - sumX * sumXY;
*a = divRound(a_nom, div);
*b = divRound(b_nom, div);
@@ -437,10 +437,10 @@
(renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
}
- mTimeCorrection += mVsyncPeriod / 2 - offset / N;
+ mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
renderTime += mTimeCorrection;
nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
- edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2);
+ edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
if (edgeRemainder <= mVsyncPeriod / 3) {
correctionLimit /= 2;
}
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 15e3845..38a2a06 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -308,6 +308,7 @@
mTrackMeta->setInt32(kKeyChannelCount, mNumChannels);
mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
+ mTrackMeta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
int64_t durationUs = 0;
if (mWaveFormat == WAVE_FORMAT_MSGSM) {
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index ac19ee3..edf648d 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -323,6 +323,13 @@
return;
}
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+ continue;
+ }
+
if (inHeader->nOffset == 0) {
mAnchorTimeUs = inHeader->nTimeStamp;
mNumSamplesOutput = 0;
@@ -332,6 +339,26 @@
int32_t numBytesRead;
if (mMode == MODE_NARROW) {
+ if (outHeader->nAllocLen < kNumSamplesPerFrameNB * sizeof(int16_t)) {
+ ALOGE("b/27662364: NB expected output buffer %zu bytes vs %u",
+ kNumSamplesPerFrameNB * sizeof(int16_t), outHeader->nAllocLen);
+ android_errorWriteLog(0x534e4554, "27662364");
+ notify(OMX_EventError, OMX_ErrorOverflow, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
+ int16 mode = ((inputPtr[0] >> 3) & 0x0f);
+ // for WMF since MIME_IETF is used when calling AMRDecode.
+ size_t frameSize = WmfDecBytesPerFrame[mode] + 1;
+
+ if (inHeader->nFilledLen < frameSize) {
+ ALOGE("b/27662364: expected %zu bytes vs %u", frameSize, inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
numBytesRead =
AMRDecode(mState,
(Frame_Type_3GPP)((inputPtr[0] >> 3) & 0x0f),
@@ -359,6 +386,15 @@
return;
}
} else {
+ if (outHeader->nAllocLen < kNumSamplesPerFrameWB * sizeof(int16_t)) {
+ ALOGE("b/27662364: WB expected output buffer %zu bytes vs %u",
+ kNumSamplesPerFrameWB * sizeof(int16_t), outHeader->nAllocLen);
+ android_errorWriteLog(0x534e4554, "27662364");
+ notify(OMX_EventError, OMX_ErrorOverflow, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
int16 mode = ((inputPtr[0] >> 3) & 0x0f);
if (mode >= 10 && mode <= 13) {
@@ -372,7 +408,12 @@
}
size_t frameSize = getFrameSize(mode);
- CHECK_GE(inHeader->nFilledLen, frameSize);
+ if (inHeader->nFilledLen < frameSize) {
+ ALOGE("b/27662364: expected %zu bytes vs %u", frameSize, inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
int16_t *outPtr = (int16_t *)outHeader->pBuffer;
diff --git a/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h b/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
index 0988e17f..f224fb6 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
+++ b/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
@@ -104,7 +104,6 @@
; INCLUDES
----------------------------------------------------------------------------*/
#include "typedef.h"
-#include "mode.h"
#include "frame_type_3gpp.h"
/*--------------------------------------------------------------------------*/
diff --git a/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h b/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
index 8f54ee8..dc64d67 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
+++ b/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
@@ -87,6 +87,7 @@
#include "gsm_amr_typedefs.h"
#include "frame_type_3gpp.h"
+#include "amrdecode.h"
/*--------------------------------------------------------------------------*/
#ifdef __cplusplus
@@ -136,19 +137,6 @@
Word8 *id);
/*
- * AMRDecode steps into the part of the library that decodes the raw data
- * speech bits for the decoding process. It returns the address offset of
- * the next frame to be decoded.
- */
- Word16 AMRDecode(
- void *state_data,
- enum Frame_Type_3GPP frame_type,
- UWord8 *speech_bits_ptr,
- Word16 *raw_pcm_buffer,
- Word16 input_format
- );
-
- /*
* This function resets the state memory used by the GSM AMR decoder. This
* function returns zero. It will return negative one if there is an error.
*/
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index ae0741d..92f5c97 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -381,7 +381,7 @@
resetPlugin();
}
-void SoftAVC::setDecodeArgs(
+bool SoftAVC::setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
@@ -389,7 +389,6 @@
size_t timeStampIx) {
size_t sizeY = outputBufferWidth() * outputBufferHeight();
size_t sizeUV;
- uint8_t *pBuf;
ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t);
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
@@ -409,22 +408,28 @@
ps_dec_ip->u4_num_Bytes = 0;
}
- if (outHeader) {
- pBuf = outHeader->pBuffer;
- } else {
- pBuf = mFlushOutBuffer;
- }
-
sizeUV = sizeY / 4;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV;
+ uint8_t *pBuf;
+ if (outHeader) {
+ if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ return false;
+ }
+ pBuf = outHeader->pBuffer;
+ } else {
+ // mFlushOutBuffer always has the right size.
+ pBuf = mFlushOutBuffer;
+ }
+
ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf;
ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY;
ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV;
ps_dec_ip->s_out_buffer.u4_num_bufs = 3;
- return;
+ return true;
}
void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
/* Once the output buffers are flushed, ignore any buffers that are held in decoder */
@@ -573,7 +578,12 @@
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
+ ALOGE("Decoder arg setup failed");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
// If input dump is enabled, then write to file
DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes);
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 9dcabb4..c710c76 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -109,7 +109,7 @@
status_t resetPlugin();
- void setDecodeArgs(
+ bool setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 4aa23c1..a891514 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -343,14 +343,13 @@
resetPlugin();
}
-void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
+bool SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
OMX_BUFFERHEADERTYPE *outHeader,
size_t timeStampIx) {
size_t sizeY = outputBufferWidth() * outputBufferHeight();
size_t sizeUV;
- uint8_t *pBuf;
ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t);
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
@@ -370,22 +369,28 @@
ps_dec_ip->u4_num_Bytes = 0;
}
- if (outHeader) {
- pBuf = outHeader->pBuffer;
- } else {
- pBuf = mFlushOutBuffer;
- }
-
sizeUV = sizeY / 4;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV;
+ uint8_t *pBuf;
+ if (outHeader) {
+ if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ return false;
+ }
+ pBuf = outHeader->pBuffer;
+ } else {
+ // mFlushOutBuffer always has the right size.
+ pBuf = mFlushOutBuffer;
+ }
+
ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf;
ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY;
ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV;
ps_dec_ip->s_out_buffer.u4_num_bufs = 3;
- return;
+ return true;
}
void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) {
/* Once the output buffers are flushed, ignore any buffers that are held in decoder */
@@ -520,7 +525,12 @@
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
+ ALOGE("Decoder arg setup failed");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
GETTIME(&mTimeStart, NULL);
/* Compute time elapsed between end of previous decode()
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
index 21bb99e..943edfd 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -106,7 +106,7 @@
status_t resetDecoder();
status_t resetPlugin();
- void setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
+ bool setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
OMX_BUFFERHEADERTYPE *outHeader,
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 4307c4e..e134d38 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -466,7 +466,7 @@
return ret;
}
-void SoftMPEG2::setDecodeArgs(
+bool SoftMPEG2::setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
@@ -474,7 +474,6 @@
size_t timeStampIx) {
size_t sizeY = outputBufferWidth() * outputBufferHeight();
size_t sizeUV;
- uint8_t *pBuf;
ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t);
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
@@ -494,22 +493,28 @@
ps_dec_ip->u4_num_Bytes = 0;
}
- if (outHeader) {
- pBuf = outHeader->pBuffer;
- } else {
- pBuf = mFlushOutBuffer;
- }
-
sizeUV = sizeY / 4;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV;
ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV;
+ uint8_t *pBuf;
+ if (outHeader) {
+ if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ return false;
+ }
+ pBuf = outHeader->pBuffer;
+ } else {
+ // mFlushOutBuffer always has the right size.
+ pBuf = mFlushOutBuffer;
+ }
+
ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf;
ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY;
ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV;
ps_dec_ip->s_out_buffer.u4_num_bufs = 3;
- return;
+ return true;
}
void SoftMPEG2::onPortFlushCompleted(OMX_U32 portIndex) {
/* Once the output buffers are flushed, ignore any buffers that are held in decoder */
@@ -622,7 +627,11 @@
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
+ ALOGE("Decoder arg setup failed");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
// If input dump is enabled, then write to file
DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes);
@@ -665,9 +674,9 @@
CHECK_EQ(reInitDecoder(), (status_t)OK);
- setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
-
- ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ if (setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
+ ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ }
return;
}
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index a625e08..f48b70b 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -117,7 +117,7 @@
status_t resetPlugin();
status_t reInitDecoder();
- void setDecodeArgs(
+ bool setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
OMX_BUFFERHEADERTYPE *inHeader,
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 6106a93..2a56ed5 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -156,15 +156,20 @@
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
-
- uint8_t *dst = outHeader->pBuffer;
- const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
- const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
- const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
- size_t srcYStride = mImg->stride[VPX_PLANE_Y];
- size_t srcUStride = mImg->stride[VPX_PLANE_U];
- size_t srcVStride = mImg->stride[VPX_PLANE_V];
- copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+ if (outHeader->nAllocLen >= outHeader->nFilledLen) {
+ uint8_t *dst = outHeader->pBuffer;
+ const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
+ size_t srcYStride = mImg->stride[VPX_PLANE_Y];
+ size_t srcUStride = mImg->stride[VPX_PLANE_U];
+ size_t srcVStride = mImg->stride[VPX_PLANE_V];
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+ } else {
+ ALOGE("b/27597103, buffer too small");
+ android_errorWriteLog(0x534e4554, "27597103");
+ outHeader->nFilledLen = 0;
+ }
mImg = NULL;
outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index a354690..5edfbb5 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -734,9 +734,10 @@
const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
+ size_t frameSize = mWidth * mHeight * 3 / 2;
if (mInputDataIsMeta) {
source = extractGraphicBuffer(
- mConversionBuffer, mWidth * mHeight * 3 / 2,
+ mConversionBuffer, frameSize,
source, inputBufferHeader->nFilledLen,
mWidth, mHeight);
if (source == NULL) {
@@ -744,11 +745,21 @@
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
- } else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
- ConvertYUV420SemiPlanarToYUV420Planar(
- source, mConversionBuffer, mWidth, mHeight);
+ } else {
+ if (inputBufferHeader->nFilledLen < frameSize) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+ return;
+ } else if (inputBufferHeader->nFilledLen > frameSize) {
+ ALOGW("Input buffer contains too many pixels");
+ }
- source = mConversionBuffer;
+ if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ ConvertYUV420SemiPlanarToYUV420Planar(
+ source, mConversionBuffer, mWidth, mHeight);
+
+ source = mConversionBuffer;
+ }
}
vpx_image_t raw_frame;
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
@@ -810,9 +821,14 @@
outputBufferHeader->nTimeStamp = encoded_packet->data.frame.pts;
outputBufferHeader->nFlags = 0;
if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY)
- outputBufferHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
+ outputBufferHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
outputBufferHeader->nOffset = 0;
outputBufferHeader->nFilledLen = encoded_packet->data.frame.sz;
+ if (outputBufferHeader->nFilledLen > outputBufferHeader->nAllocLen) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+ return;
+ }
memcpy(outputBufferHeader->pBuffer,
encoded_packet->data.frame.buf,
encoded_packet->data.frame.sz);
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index c4e0659..acb2b37 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -42,7 +42,9 @@
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mSignalledError(false),
mChannelCount(2),
- mSampleRate(44100) {
+ mSampleRate(44100),
+ mNumericalData(OMX_NumericalDataSigned),
+ mBitsPerSample(16) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -111,10 +113,10 @@
return OMX_ErrorUndefined;
}
- pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eNumData = (OMX_NUMERICALDATATYPE)mNumericalData;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
- pcmParams->nBitPerSample = 16;
+ pcmParams->nBitPerSample = mBitsPerSample;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -166,6 +168,8 @@
mChannelCount = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
+ mNumericalData = pcmParams->eNumData;
+ mBitsPerSample = pcmParams->nBitPerSample;
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 015c4a3..80906b4 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -50,6 +50,8 @@
int32_t mChannelCount;
int32_t mSampleRate;
+ int32_t mNumericalData;
+ int32_t mBitsPerSample;
void initPorts();
status_t initDecoder();
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 41d9d55..6a689c4 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -338,7 +338,13 @@
}
if (inHeader->nFilledLen || !mSawInputEos) {
- CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+ if (inHeader->nFilledLen < sizeof(numPageSamples)) {
+ notify(OMX_EventError, OMX_ErrorBadParameter, 0, NULL);
+ mSignalledError = true;
+ ALOGE("onQueueFilled, input header has nFilledLen %u, expected %zu",
+ inHeader->nFilledLen, sizeof(numPageSamples));
+ return;
+ }
memcpy(&numPageSamples,
inHeader->pBuffer
+ inHeader->nOffset + inHeader->nFilledLen - 4,
diff --git a/media/libstagefright/foundation/ABitReader.cpp b/media/libstagefright/foundation/ABitReader.cpp
index beb5cc0..1582b67 100644
--- a/media/libstagefright/foundation/ABitReader.cpp
+++ b/media/libstagefright/foundation/ABitReader.cpp
@@ -114,7 +114,7 @@
return false;
}
- ssize_t numBitsRemaining = n - mNumBitsLeft;
+ ssize_t numBitsRemaining = (ssize_t)n - (ssize_t)mNumBitsLeft;
size_t size = mSize;
const uint8_t *data = mData;
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 725a574..855ac95 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -749,6 +749,126 @@
}
}
+sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
+ if (other == NULL) {
+ return const_cast<AMessage*>(this);
+ }
+
+ sp<AMessage> diff = new AMessage;
+ if (mWhat != other->mWhat) {
+ diff->setWhat(mWhat);
+ }
+ if (mHandler != other->mHandler) {
+ diff->setTarget(mHandler.promote());
+ }
+
+ for (size_t i = 0; i < mNumItems; ++i) {
+ const Item &item = mItems[i];
+ const Item *oitem = other->findItem(item.mName, item.mType);
+ switch (item.mType) {
+ case kTypeInt32:
+ if (oitem == NULL || item.u.int32Value != oitem->u.int32Value) {
+ diff->setInt32(item.mName, item.u.int32Value);
+ }
+ break;
+
+ case kTypeInt64:
+ if (oitem == NULL || item.u.int64Value != oitem->u.int64Value) {
+ diff->setInt64(item.mName, item.u.int64Value);
+ }
+ break;
+
+ case kTypeSize:
+ if (oitem == NULL || item.u.sizeValue != oitem->u.sizeValue) {
+ diff->setSize(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeFloat:
+ if (oitem == NULL || item.u.floatValue != oitem->u.floatValue) {
+ diff->setFloat(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeDouble:
+ if (oitem == NULL || item.u.doubleValue != oitem->u.doubleValue) {
+ diff->setDouble(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeString:
+ if (oitem == NULL || *item.u.stringValue != *oitem->u.stringValue) {
+ diff->setString(item.mName, *item.u.stringValue);
+ }
+ break;
+
+ case kTypeRect:
+ if (oitem == NULL || memcmp(&item.u.rectValue, &oitem->u.rectValue, sizeof(Rect))) {
+ diff->setRect(
+ item.mName, item.u.rectValue.mLeft, item.u.rectValue.mTop,
+ item.u.rectValue.mRight, item.u.rectValue.mBottom);
+ }
+ break;
+
+ case kTypePointer:
+ if (oitem == NULL || item.u.ptrValue != oitem->u.ptrValue) {
+ diff->setPointer(item.mName, item.u.ptrValue);
+ }
+ break;
+
+ case kTypeBuffer:
+ {
+ sp<ABuffer> myBuf = static_cast<ABuffer *>(item.u.refValue);
+ if (myBuf == NULL) {
+ if (oitem == NULL || oitem->u.refValue != NULL) {
+ diff->setBuffer(item.mName, NULL);
+ }
+ break;
+ }
+ sp<ABuffer> oBuf = oitem == NULL ? NULL : static_cast<ABuffer *>(oitem->u.refValue);
+ if (oBuf == NULL
+ || myBuf->size() != oBuf->size()
+ || (!myBuf->data() ^ !oBuf->data()) // data nullness differs
+ || (myBuf->data() && memcmp(myBuf->data(), oBuf->data(), myBuf->size()))) {
+ diff->setBuffer(item.mName, myBuf);
+ }
+ break;
+ }
+
+ case kTypeMessage:
+ {
+ sp<AMessage> myMsg = static_cast<AMessage *>(item.u.refValue);
+ if (myMsg == NULL) {
+ if (oitem == NULL || oitem->u.refValue != NULL) {
+ diff->setMessage(item.mName, NULL);
+ }
+ break;
+ }
+ sp<AMessage> oMsg =
+ oitem == NULL ? NULL : static_cast<AMessage *>(oitem->u.refValue);
+ sp<AMessage> changes = myMsg->changesFrom(oMsg, deep);
+ if (changes->countEntries()) {
+ diff->setMessage(item.mName, deep ? changes : myMsg);
+ }
+ break;
+ }
+
+ case kTypeObject:
+ if (oitem == NULL || item.u.refValue != oitem->u.refValue) {
+ diff->setObject(item.mName, item.u.refValue);
+ }
+ break;
+
+ default:
+ {
+ ALOGE("Unknown type %d", item.mType);
+ TRESPASS();
+ }
+ }
+ }
+ return diff;
+}
+
size_t AMessage::countEntries() const {
return mNumItems;
}
diff --git a/media/libstagefright/include/DataConverter.h b/media/libstagefright/include/DataConverter.h
new file mode 100644
index 0000000..8d67921
--- /dev/null
+++ b/media/libstagefright/include/DataConverter.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_DATACONVERTER_H_
+#define STAGEFRIGHT_DATACONVERTER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+struct ABuffer;
+
+// DataConverter base class, defaults to memcpy
+struct DataConverter : public RefBase {
+ virtual size_t sourceSize(size_t targetSize); // will clamp to SIZE_MAX
+ virtual size_t targetSize(size_t sourceSize); // will clamp to SIZE_MAX
+
+ status_t convert(const sp<ABuffer> &source, sp<ABuffer> &target);
+ virtual ~DataConverter();
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target);
+};
+
+// SampleConverterBase uses a ratio to calculate the source and target sizes
+// based on source and target sample sizes.
+struct SampleConverterBase : public DataConverter {
+ virtual size_t sourceSize(size_t targetSize);
+ virtual size_t targetSize(size_t sourceSize);
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) = 0;
+
+ // sourceSize = sourceSampleSize / targetSampleSize * targetSize
+ SampleConverterBase(uint32_t sourceSampleSize, uint32_t targetSampleSize)
+ : mSourceSampleSize(sourceSampleSize),
+ mTargetSampleSize(targetSampleSize) { }
+ size_t mSourceSampleSize;
+ size_t mTargetSampleSize;
+};
+
+// AudioConverter converts between audio PCM formats
+struct AudioConverter : public SampleConverterBase {
+ // return nullptr if conversion is not needed or not supported
+ static AudioConverter *Create(AudioEncoding source, AudioEncoding target);
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt);
+
+private:
+ AudioConverter(
+ AudioEncoding source, size_t sourceSample,
+ AudioEncoding target, size_t targetSample)
+ : SampleConverterBase(sourceSample, targetSample),
+ mFrom(source),
+ mTo(target) { }
+ AudioEncoding mFrom;
+ AudioEncoding mTo;
+};
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index e5c24ca..34b9606 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -25,6 +25,8 @@
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
+#include "mpeg2ts/ATSParser.h"
+
namespace android {
struct AMessage;
@@ -55,6 +57,10 @@
sp<ATSParser> mParser;
+ // Used to remember SyncEvent occurred in feedMore() when called from init(),
+ // because init() needs to update |mSourceImpls| before adding SyncPoint.
+ ATSParser::SyncEvent mLastSyncEvent;
+
Vector<sp<AnotherPacketSource> > mSourceImpls;
Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
@@ -65,7 +71,14 @@
off64_t mOffset;
void init();
- status_t feedMore();
+ // Try to feed more data from source to parser.
+ // |isInit| means this function is called inside init(). This is a signal to
+ // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
+ // This function returns OK if expected amount of data is fed from DataSource to
+ // parser and is successfully parsed. Otherwise, various error codes could be
+ // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSource, or
+ // the data has syntax error during parsing, etc.
+ status_t feedMore(bool isInit = false);
status_t seek(int64_t seekTimeUs,
const MediaSource::ReadOptions::SeekMode& seekMode);
status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
@@ -73,6 +86,9 @@
status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+ // Add a SynPoint derived from |event|.
+ void addSyncPoint_l(const ATSParser::SyncEvent &event);
+
DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
};
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 2790a0e..fb43a38 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -122,7 +122,7 @@
void setPID(unsigned pid) { mElementaryPID = pid; }
// Parse the payload and set event when PES with a sync frame is detected.
- // This method knows when a PES starts; so record mPesStartOffset in that
+ // This method knows when a PES starts; so record mPesStartOffsets in that
// case.
status_t parse(
unsigned continuity_counter,
@@ -157,7 +157,7 @@
bool mEOSReached;
uint64_t mPrevPTS;
- off64_t mPesStartOffset;
+ List<off64_t> mPesStartOffsets;
ElementaryStreamQueue *mQueue;
@@ -205,16 +205,19 @@
};
ATSParser::SyncEvent::SyncEvent(off64_t offset)
- : mInit(false), mOffset(offset), mTimeUs(0) {}
+ : mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
int64_t timeUs) {
- mInit = true;
+ mHasReturnedData = true;
mOffset = offset;
mMediaSource = source;
mTimeUs = timeUs;
}
+void ATSParser::SyncEvent::reset() {
+ mHasReturnedData = false;
+}
////////////////////////////////////////////////////////////////////////////////
ATSParser::Program::Program(
@@ -661,6 +664,7 @@
ALOGI("discontinuity on stream pid 0x%04x", mElementaryPID);
mPayloadStarted = false;
+ mPesStartOffsets.clear();
mBuffer->setRange(0, 0);
mExpectedContinuityCounter = -1;
@@ -697,7 +701,7 @@
}
mPayloadStarted = true;
- mPesStartOffset = offset;
+ mPesStartOffsets.push_back(offset);
}
if (!mPayloadStarted) {
@@ -772,6 +776,7 @@
}
mPayloadStarted = false;
+ mPesStartOffsets.clear();
mEOSReached = false;
mBuffer->setRange(0, 0);
@@ -1105,7 +1110,9 @@
int64_t timeUs;
if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
found = true;
- event->init(mPesStartOffset, mSource, timeUs);
+ off64_t pesStartOffset = *mPesStartOffsets.begin();
+ event->init(pesStartOffset, mSource, timeUs);
+ mPesStartOffsets.erase(mPesStartOffsets.begin());
}
}
}
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 430a8d5..fb03cd6 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -69,16 +69,18 @@
void init(off64_t offset, const sp<MediaSource> &source,
int64_t timeUs);
- bool isInit() { return mInit; }
- off64_t getOffset() { return mOffset; }
- const sp<MediaSource> &getMediaSource() { return mMediaSource; }
- int64_t getTimeUs() { return mTimeUs; }
+ bool hasReturnedData() const { return mHasReturnedData; }
+ void reset();
+ off64_t getOffset() const { return mOffset; }
+ const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
+ int64_t getTimeUs() const { return mTimeUs; }
private:
- bool mInit;
+ bool mHasReturnedData;
/*
- * mInit == false: the current offset
- * mInit == true: the start offset of sync payload
+ * mHasReturnedData == false: the current offset (or undefined if the returned data
+ has been invalidated via reset())
+ * mHasReturnedData == true: the start offset of sync payload
*/
off64_t mOffset;
/* The media source object for this event. */
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index daf6b3d..96ca405 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -592,6 +592,7 @@
mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
mFormat->setInt32(kKeyChannelCount, 2);
mFormat->setInt32(kKeySampleRate, 48000);
+ mFormat->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
}
static const size_t kFramesPerAU = 80;
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 0b456c3..fb5e079 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -112,6 +112,7 @@
MPEG2TSExtractor::MPEG2TSExtractor(const sp<DataSource> &source)
: mDataSource(source),
mParser(new ATSParser),
+ mLastSyncEvent(0),
mOffset(0) {
init();
}
@@ -149,8 +150,10 @@
bool haveVideo = false;
int64_t startTime = ALooper::GetNowUs();
- while (feedMore() == OK) {
+ while (feedMore(true /* isInit */) == OK) {
if (haveAudio && haveVideo) {
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
break;
}
if (!haveVideo) {
@@ -181,6 +184,9 @@
}
}
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
+
// Wait only for 2 seconds to detect audio/video streams.
if (ALooper::GetNowUs() - startTime > 2000000ll) {
break;
@@ -245,7 +251,7 @@
haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
}
-status_t MPEG2TSExtractor::feedMore() {
+status_t MPEG2TSExtractor::feedMore(bool isInit) {
Mutex::Autolock autoLock(mLock);
uint8_t packet[kTSPacketSize];
@@ -261,29 +267,41 @@
ATSParser::SyncEvent event(mOffset);
mOffset += n;
status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
- if (event.isInit()) {
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- if (mSourceImpls[i].get() == event.getMediaSource().get()) {
- KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
- syncPoints->add(event.getTimeUs(), event.getOffset());
- // We're keeping the size of the sync points at most 5mb per a track.
- size_t size = syncPoints->size();
- if (size >= 327680) {
- int64_t firstTimeUs = syncPoints->keyAt(0);
- int64_t lastTimeUs = syncPoints->keyAt(size - 1);
- if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
- syncPoints->removeItemsAt(0, 4096);
- } else {
- syncPoints->removeItemsAt(size - 4096, 4096);
- }
- }
- break;
- }
+ if (event.hasReturnedData()) {
+ if (isInit) {
+ mLastSyncEvent = event;
+ } else {
+ addSyncPoint_l(event);
}
}
return err;
}
+void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
+ if (!event.hasReturnedData()) {
+ return;
+ }
+
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+ KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
+ syncPoints->add(event.getTimeUs(), event.getOffset());
+ // We're keeping the size of the sync points at most 5mb per a track.
+ size_t size = syncPoints->size();
+ if (size >= 327680) {
+ int64_t firstTimeUs = syncPoints->keyAt(0);
+ int64_t lastTimeUs = syncPoints->keyAt(size - 1);
+ if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
+ syncPoints->removeItemsAt(0, 4096);
+ } else {
+ syncPoints->removeItemsAt(size - 4096, 4096);
+ }
+ }
+ break;
+ }
+ }
+}
+
uint32_t MPEG2TSExtractor::flags() const {
return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
}
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 759648b..4d89ba1 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -183,7 +183,12 @@
Mutex::Autolock autoLock(mLock);
ssize_t index = mLiveNodes.indexOfKey(the_late_who);
- CHECK(index >= 0);
+
+ if (index < 0) {
+ ALOGE("b/27597103, nonexistent observer on binderDied");
+ android_errorWriteLog(0x534e4554, "27597103");
+ return;
+ }
instance = mLiveNodes.editValueAt(index);
mLiveNodes.removeItemsAt(index);
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index ed5a404..3ecb52b 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -957,10 +957,12 @@
format->setInt32("level-idc", levelIdc);
format->setInt32("constraint-set", constraintSet);
} else {
- format->setString(
- "mime",
- usePCMAudio
- ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
+ if (usePCMAudio) {
+ format->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
+ format->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
+ } else {
+ format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+ }
}
notify = new AMessage(kWhatConverterNotify, this);
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index a398aca..bd89a51 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -611,7 +611,7 @@
return NULL;
if (!readData())
return NULL;
- MtpResponseCode ret = readResponse();
+ const MtpResponseCode ret = readResponse();
if (ret == MTP_RESPONSE_OK) {
MtpProperty* property = new MtpProperty;
if (property->read(mData))
@@ -622,6 +622,25 @@
return NULL;
}
+bool MtpDevice::getObjectPropValue(MtpObjectHandle handle, MtpProperty* property) {
+ if (property == nullptr)
+ return false;
+
+ Mutex::Autolock autoLock(mMutex);
+
+ mRequest.reset();
+ mRequest.setParameter(1, handle);
+ mRequest.setParameter(2, property->getPropertyCode());
+ if (!sendRequest(MTP_OPERATION_GET_OBJECT_PROP_VALUE))
+ return false;
+ if (!readData())
+ return false;
+ if (readResponse() != MTP_RESPONSE_OK)
+ return false;
+ property->setCurrentValue(mData);
+ return true;
+}
+
bool MtpDevice::readObject(MtpObjectHandle handle,
ReadObjectCallback callback,
uint32_t expectedLength,
@@ -679,11 +698,6 @@
return false;
}
- if (mData.getContainerType() == MTP_CONTAINER_TYPE_RESPONSE) {
- mResponse.copyFrom(mData);
- return mResponse.getResponseCode() == MTP_RESPONSE_OK ? 0 : -1;
- }
-
// If object size 0 byte, the remote device can reply response packet
// without sending any data packets.
if (mData.getContainerType() == MTP_CONTAINER_TYPE_RESPONSE) {
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index ce60811..4be44cf 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -107,6 +107,9 @@
MtpProperty* getDevicePropDesc(MtpDeviceProperty code);
MtpProperty* getObjectPropDesc(MtpObjectProperty code, MtpObjectFormat format);
+ // Reads value of |property| for |handle|. Returns true on success.
+ bool getObjectPropValue(MtpObjectHandle handle, MtpProperty* property);
+
bool readObject(MtpObjectHandle handle, ReadObjectCallback callback,
uint32_t objectSize, void* clientData);
bool readObject(MtpObjectHandle handle, const char* destPath, int group,
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index 2be2d79..039e4f5 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -236,6 +236,12 @@
mCurrentValue.str = NULL;
}
+void MtpProperty::setCurrentValue(MtpDataPacket& packet) {
+ free(mCurrentValue.str);
+ mCurrentValue.str = NULL;
+ readValue(packet, mCurrentValue);
+}
+
void MtpProperty::setFormRange(int min, int max, int step) {
mFormFlag = kFormRange;
switch (mType) {
diff --git a/media/mtp/MtpProperty.h b/media/mtp/MtpProperty.h
index 2e2ead1..03c08e1 100644
--- a/media/mtp/MtpProperty.h
+++ b/media/mtp/MtpProperty.h
@@ -81,13 +81,16 @@
int defaultValue = 0);
virtual ~MtpProperty();
- inline MtpPropertyCode getPropertyCode() const { return mCode; }
+ MtpPropertyCode getPropertyCode() const { return mCode; }
+ MtpDataType getDataType() const { return mType; }
bool read(MtpDataPacket& packet);
void write(MtpDataPacket& packet);
void setDefaultValue(const uint16_t* string);
void setCurrentValue(const uint16_t* string);
+ void setCurrentValue(MtpDataPacket& packet);
+ const MtpPropertyValue& getCurrentValue() { return mCurrentValue; }
void setFormRange(int min, int max, int step);
void setFormEnum(const int* values, int count);
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 302e4dc..6700f6e 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -60,8 +60,9 @@
libcpustats \
libmedia_helper
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudioflinger
-LOCAL_32_BIT_ONLY := true
LOCAL_SRC_FILES += \
AudioWatchdog.cpp \
@@ -79,6 +80,8 @@
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
#
@@ -107,6 +110,8 @@
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
@@ -127,6 +132,8 @@
LOCAL_MODULE := libaudioresampler
+LOCAL_CFLAGS := -Werror -Wall
+
# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
#LOCAL_CFLAGS += -DUSE_NEON=false
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d07ca85..e5c7177 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -219,8 +219,6 @@
void AudioFlinger::onFirstRef()
{
- int rc = 0;
-
Mutex::Autolock _l(mLock);
/* TODO: move all this work into an Init() function */
@@ -1246,8 +1244,6 @@
status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
audio_io_handle_t output) const
{
- status_t status;
-
Mutex::Autolock _l(mLock);
PlaybackThread *playbackThread = checkPlaybackThread_l(output);
@@ -1306,7 +1302,7 @@
bool removed = false;
for (size_t i = 0; i< num; ) {
AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
- ALOGV(" pid %d @ %d", ref->mPid, i);
+ ALOGV(" pid %d @ %zu", ref->mPid, i);
if (ref->mPid == pid) {
ALOGV(" removing entry for pid %d session %d", pid, ref->mSessionid);
mAudioSessionRefs.removeAt(i);
@@ -1410,10 +1406,6 @@
// ----------------------------------------------------------------------------
-static bool deviceRequiresCaptureAudioOutputPermission(audio_devices_t inDevice) {
- return audio_is_remote_submix_device(inDevice);
-}
-
sp<IAudioRecord> AudioFlinger::openRecord(
audio_io_handle_t input,
uint32_t sampleRate,
@@ -1548,10 +1540,10 @@
audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
{
if (name == NULL) {
- return 0;
+ return AUDIO_MODULE_HANDLE_NONE;
}
if (!settingsAllowed()) {
- return 0;
+ return AUDIO_MODULE_HANDLE_NONE;
}
Mutex::Autolock _l(mLock);
return loadHwModule_l(name);
@@ -1571,16 +1563,16 @@
int rc = load_audio_interface(name, &dev);
if (rc) {
- ALOGI("loadHwModule() error %d loading module %s ", rc, name);
- return 0;
+ ALOGE("loadHwModule() error %d loading module %s", rc, name);
+ return AUDIO_MODULE_HANDLE_NONE;
}
mHardwareStatus = AUDIO_HW_INIT;
rc = dev->init_check(dev);
mHardwareStatus = AUDIO_HW_IDLE;
if (rc) {
- ALOGI("loadHwModule() init check error %d for module %s ", rc, name);
- return 0;
+ ALOGE("loadHwModule() init check error %d for module %s", rc, name);
+ return AUDIO_MODULE_HANDLE_NONE;
}
// Check and cache this HAL's level of support for master mute and master
@@ -1627,7 +1619,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
- audio_module_handle_t handle = nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
+ audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
@@ -1771,8 +1763,6 @@
return 0;
}
- audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
-
if (*output == AUDIO_IO_HANDLE_NONE) {
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
} else {
@@ -2867,7 +2857,7 @@
{
audio_session_t session = chain->sessionId();
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("putOrphanEffectChain_l session %d index %d", session, index);
+ ALOGV("putOrphanEffectChain_l session %d index %zd", session, index);
if (index >= 0) {
ALOGW("putOrphanEffectChain_l chain for session %d already present", session);
return ALREADY_EXISTS;
@@ -2880,7 +2870,7 @@
{
sp<EffectChain> chain;
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("getOrphanEffectChain_l session %d index %d", session, index);
+ ALOGV("getOrphanEffectChain_l session %d index %zd", session, index);
if (index >= 0) {
chain = mOrphanEffectChains.valueAt(index);
mOrphanEffectChains.removeItemsAt(index);
@@ -2893,11 +2883,11 @@
Mutex::Autolock _l(mLock);
audio_session_t session = effect->sessionId();
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("updateOrphanEffectChains session %d index %d", session, index);
+ ALOGV("updateOrphanEffectChains session %d index %zd", session, index);
if (index >= 0) {
sp<EffectChain> chain = mOrphanEffectChains.valueAt(index);
if (chain->removeEffect_l(effect) == 0) {
- ALOGV("updateOrphanEffectChains removing effect chain at index %d", index);
+ ALOGV("updateOrphanEffectChains removing effect chain at index %zd", index);
mOrphanEffectChains.removeItemsAt(index);
}
return true;
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 6a324ad..9c3c7cb 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -75,7 +75,6 @@
int16_t *in = mBuffer.i16;
while (outputIndex < outputSampleCount) {
- int32_t sample;
int32_t x;
// calculate output sample
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 618b56c..e615700 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -282,7 +282,6 @@
return;
}
int32_t oldSampleRate = mInSampleRate;
- int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs;
uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
bool useS32 = false;
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index f600d6c..320b8cf 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -141,6 +141,8 @@
// ----------------------------------------------------------------------------
+#if !USE_NEON
+
static inline
int32_t mulRL(int left, int32_t in, uint32_t vRL)
{
@@ -202,6 +204,8 @@
#endif
}
+#endif // !USE_NEON
+
// ----------------------------------------------------------------------------
AudioResamplerSinc::AudioResamplerSinc(
diff --git a/services/audioflinger/AutoPark.h b/services/audioflinger/AutoPark.h
new file mode 100644
index 0000000..e539e47
--- /dev/null
+++ b/services/audioflinger/AutoPark.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+
+// T is FastMixer or FastCapture
+template<typename T> class AutoPark {
+public:
+
+ // Park the specific FastThread, which can be nullptr, in hot idle if not currently idling
+ AutoPark(const sp<T>& fastThread) : mFastThread(fastThread)
+ {
+ mPreviousCommand = FastThreadState::HOT_IDLE;
+ if (fastThread != nullptr) {
+ auto sq = mFastThread->sq();
+ FastThreadState *state = sq->begin();
+ if (!(state->mCommand & FastThreadState::IDLE)) {
+ mPreviousCommand = state->mCommand;
+ state->mCommand = FastThreadState::HOT_IDLE;
+ sq->end();
+ sq->push(sq->BLOCK_UNTIL_ACKED);
+ } else {
+ sq->end(false /*didModify*/);
+ }
+ }
+ }
+
+ // Remove the FastThread from hot idle if necessary
+ ~AutoPark()
+ {
+ if (!(mPreviousCommand & FastThreadState::IDLE)) {
+ ALOG_ASSERT(mFastThread != nullptr);
+ auto sq = mFastThread->sq();
+ FastThreadState *state = sq->begin();
+ ALOG_ASSERT(state->mCommand == FastThreadState::HOT_IDLE);
+ state->mCommand = mPreviousCommand;
+ sq->end();
+ sq->push(sq->BLOCK_UNTIL_PUSHED);
+ }
+ }
+
+private:
+ const sp<T> mFastThread;
+ // if !&IDLE, holds the FastThread state to restore after new parameters processed
+ FastThreadState::Command mPreviousCommand;
+}; // class AutoPark
+
+} // namespace
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 060ffe9..00304b2 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -138,7 +138,7 @@
} else {
status = ALREADY_EXISTS;
}
- ALOGV("addHandle() %p added handle %p in position %d", this, handle, i);
+ ALOGV("addHandle() %p added handle %p in position %zu", this, handle, i);
mHandles.insertAt(handle, i);
return status;
}
@@ -156,7 +156,7 @@
if (i == size) {
return size;
}
- ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i);
+ ALOGV("removeHandle() %p removed handle %p in position %zu", this, handle, i);
mHandles.removeAt(i);
// if removed from first place, move effect control from this handle to next in line
@@ -380,7 +380,7 @@
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
- ALOGV("configure() %p thread %p buffer %p framecount %d",
+ ALOGV("configure() %p thread %p buffer %p framecount %zu",
this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
@@ -677,7 +677,6 @@
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
(mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
- status_t cmdStatus;
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
@@ -934,7 +933,7 @@
int len = s.length();
if (s.length() > 2) {
- char *str = s.lockBuffer(len);
+ (void) s.lockBuffer(len);
s.unlockBuffer(len - 2);
}
return s;
@@ -1051,7 +1050,7 @@
mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
if (mCblkMemory == 0 ||
(mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
- ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
+ ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
sizeof(effect_param_cblk_t));
mCblkMemory.clear();
return;
@@ -1580,7 +1579,7 @@
}
mEffects.insertAt(effect, idx_insert);
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this,
+ ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
idx_insert);
}
effect->configure();
@@ -1612,7 +1611,7 @@
}
}
mEffects.removeAt(i);
- ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(),
+ ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
break;
}
@@ -1727,7 +1726,7 @@
String8 result;
size_t numEffects = mEffects.size();
- snprintf(buffer, SIZE, " %d effects for session %d\n", numEffects, mSessionId);
+ snprintf(buffer, SIZE, " %zu effects for session %d\n", numEffects, mSessionId);
result.append(buffer);
if (numEffects) {
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index bb83858..d202169 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -104,8 +104,10 @@
} else {
mFormat = mInputSource->format();
mSampleRate = Format_sampleRate(mFormat);
+#if !LOG_NDEBUG
unsigned channelCount = Format_channelCount(mFormat);
ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+#endif
}
dumpState->mSampleRate = mSampleRate;
eitherChanged = true;
@@ -186,7 +188,6 @@
ALOG_ASSERT(mPipeSink != NULL);
ALOG_ASSERT(mReadBuffer != NULL);
if (mReadBufferState < 0) {
- unsigned channelCount = Format_channelCount(mFormat);
memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat));
mReadBufferState = frameCount;
}
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index d31b8d3..26cd1f9 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -47,7 +47,6 @@
/*static*/ const FastMixerState FastMixer::sInitial;
FastMixer::FastMixer() : FastThread(),
- mSlopNs(0),
// mFastTrackNames
// mGenerations
mOutputSink(NULL),
@@ -338,6 +337,11 @@
if ((command & FastMixerState::MIX) && (mMixer != NULL) && mIsWarm) {
ALOG_ASSERT(mMixerBuffer != NULL);
+
+ // AudioMixer::mState.enabledTracks is undefined if mState.hook == process__validate,
+ // so we keep a side copy of enabledTracks
+ bool anyEnabledTracks = false;
+
// for each track, update volume and check for underrun
unsigned currentTrackMask = current->mTrackMask;
while (currentTrackMask != 0) {
@@ -398,19 +402,26 @@
underruns.mBitFields.mPartial++;
underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
} else {
underruns.mBitFields.mFull++;
underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
ftDump->mUnderruns = underruns;
ftDump->mFramesReady = framesReady;
}
- // process() is CPU-bound
- mMixer->process();
- mMixerBufferState = MIXED;
+ if (anyEnabledTracks) {
+ // process() is CPU-bound
+ mMixer->process();
+ mMixerBufferState = MIXED;
+ } else if (mMixerBufferState != ZEROED) {
+ mMixerBufferState = UNDEFINED;
+ }
+
} else if (mMixerBufferState == MIXED) {
mMixerBufferState = UNDEFINED;
}
@@ -422,7 +433,8 @@
}
if (mMasterMono.load()) { // memory_order_seq_cst
- mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount, true /*limit*/);
+ mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount,
+ true /*limit*/);
}
// prepare the buffer used to write to sink
void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 3cc7c9f..bdfd8a0 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -57,7 +57,6 @@
static const FastMixerState sInitial;
FastMixerState mPreIdle; // copy of state before we went into idle
- long mSlopNs; // accumulated time we've woken up too early (> 0) or too late (< 0)
int mFastTrackNames[FastMixerState::kMaxFastTracks];
// handles used by mixer to identify tracks
int mGenerations[FastMixerState::kMaxFastTracks];
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a99becf..d85ac87 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -349,7 +349,7 @@
exit:
ALOGV("createAudioPatch() status %d", status);
if (status == NO_ERROR) {
- *handle = audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
+ *handle = (audio_patch_handle_t) audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
newPatch->mHandle = *handle;
newPatch->mHalHandle = halHandle;
mPatches.add(newPatch);
@@ -401,7 +401,7 @@
shift = playbackShift;
}
size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
- ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ",
+ ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
playbackFrameCount, recordFramecount, frameCount);
// create a special record track to capture from record thread
@@ -614,7 +614,6 @@
status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
{
ALOGV("setAudioPortConfig");
- status_t status = NO_ERROR;
sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
if (audioflinger == 0) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e2932f1..b322a45 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -71,6 +71,8 @@
#include <cpustats/ThreadCpuUsage.h>
#endif
+#include "AutoPark.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -707,8 +709,6 @@
status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
{
- status_t status;
-
ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
Mutex::Autolock _l(mLock);
@@ -727,7 +727,7 @@
return status;
}
mConfigEvents.add(event);
- ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType);
+ ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
mWaitWorkCV.signal();
mLock.unlock();
{
@@ -819,7 +819,7 @@
bool configChanged = false;
while (!mConfigEvents.isEmpty()) {
- ALOGV("processConfigEvents_l() remaining events %d", mConfigEvents.size());
+ ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
sp<ConfigEvent> event = mConfigEvents[0];
mConfigEvents.removeAt(0);
switch (event->mType) {
@@ -918,7 +918,7 @@
}
const int len = s.length();
if (len > 2) {
- char *str = s.lockBuffer(len); // needed?
+ (void) s.lockBuffer(len); // needed?
s.unlockBuffer(len - 2); // remove trailing ", "
}
return s;
@@ -951,7 +951,7 @@
dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
- dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize);
+ dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
dprintf(fd, " Channel count: %u\n", mChannelCount);
dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
@@ -1687,10 +1687,10 @@
size_t numtracks = mTracks.size();
size_t numactive = mActiveTracks.size();
- dprintf(fd, " %d Tracks", numtracks);
+ dprintf(fd, " %zu Tracks", numtracks);
size_t numactiveseen = 0;
if (numtracks) {
- dprintf(fd, " of which %d are active\n", numactive);
+ dprintf(fd, " of which %zu are active\n", numactive);
Track::appendDumpHeader(result);
for (size_t i = 0; i < numtracks; ++i) {
sp<Track> track = mTracks[i];
@@ -1731,7 +1731,8 @@
dumpBase(fd, args);
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
- dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
+ dprintf(fd, " Last write occurred (msecs): %llu\n",
+ (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -1785,20 +1786,6 @@
// client expresses a preference for FAST, but we get the final say
if (*flags & IAudioFlinger::TRACK_FAST) {
if (
- // either of these use cases:
- (
- // use case 1: shared buffer with any frame count
- (
- (sharedBuffer != 0)
- ) ||
- // use case 2: frame count is default or at least as large as HAL
- (
- // we formerly checked for a callback handler (non-0 tid),
- // but that is no longer required for TRANSFER_OBTAIN mode
- ((frameCount == 0) ||
- (frameCount >= mFrameCount))
- )
- ) &&
// PCM data
audio_is_linear_pcm(format) &&
// TODO: extract as a data library function that checks that a computationally
@@ -1816,20 +1803,20 @@
// FIXME test that MixerThread for this fast track has a capable output HAL
// FIXME add a permission test also?
) {
- // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
- if (frameCount == 0) {
+ // static tracks can have any nonzero framecount, streaming tracks check against minimum.
+ if (sharedBuffer == 0) {
// read the fast track multiplier property the first time it is needed
int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
if (ok != 0) {
ALOGE("%s pthread_once failed: %d", __func__, ok);
}
- frameCount = mFrameCount * sFastTrackMultiplier;
+ frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
}
- ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%d "
- "mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+ "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
"sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
@@ -2261,7 +2248,7 @@
mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
mFrameCount = mBufferSize / mFrameSize;
if (mFrameCount & 15) {
- ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
+ ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -2347,7 +2334,7 @@
if (mType == MIXER || mType == DUPLICATING) {
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
}
- ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
+ ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
mNormalFrameCount);
// Check if we want to throttle the processing to no more than 2x normal rate
@@ -2684,7 +2671,7 @@
void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
{
- ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+ ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
this, streamType, mTracks.size());
Mutex::Autolock _l(mLock);
@@ -3128,7 +3115,7 @@
if ((now - lastWarning) > kWarningThrottleNs) {
ATRACE_NAME("underrun");
ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- ns2ms(delta), mNumDelayedWrites, this);
+ (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
lastWarning = now;
}
}
@@ -3284,31 +3271,9 @@
status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
- status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
+ AutoPark<FastMixer> park(mFastMixer);
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
+ status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
return status;
}
@@ -3391,33 +3356,10 @@
status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
{
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
+ AutoPark<FastMixer> park(mFastMixer);
status_t status = PlaybackThread::releaseAudioPatch_l(handle);
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
-
return status;
}
@@ -3473,8 +3415,8 @@
// mNormalSink below
{
ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
- ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
- "mFrameCount=%d, mNormalFrameCount=%d",
+ ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%zu, "
+ "mFrameCount=%zu, mNormalFrameCount=%zu",
mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
mNormalFrameCount);
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
@@ -3489,7 +3431,12 @@
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
- ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
// initialize fast mixer depending on configuration
@@ -3524,7 +3471,9 @@
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
+#ifdef TEE_SINK
NBAIO_Format origformat = format;
+#endif
// adjust format to match that of the Fast Mixer
ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
@@ -3536,7 +3485,12 @@
MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
const NBAIO_Format offers[1] = {format};
size_t numCounterOffers = 0;
- ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
monoPipe->setAvgFrames((mScreenState & 1) ?
(monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
@@ -4341,7 +4295,6 @@
}
} // local variable scope to avoid goto warning
-track_is_ready: ;
}
@@ -4454,20 +4407,7 @@
status = NO_ERROR;
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
+ AutoPark<FastMixer> park(mFastMixer);
AudioParameter param = AudioParameter(keyValuePair);
int value;
@@ -4562,26 +4502,12 @@
}
}
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
-
return reconfig || a2dpDeviceChanged;
}
void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
PlaybackThread::dumpInternals(fd, args);
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
@@ -4662,7 +4588,6 @@
void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
{
- audio_track_cblk_t* cblk = track->cblk();
float left, right;
if (mMasterMute || mStreamTypes[track->streamType()].mute) {
@@ -4751,7 +4676,9 @@
}
Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
audio_track_cblk_t* cblk = track->cblk();
+#endif
// Only consider last track started for volume and mixer state control.
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
@@ -4959,7 +4886,7 @@
// For compressed offload, use faster sleep time when underruning until more than an
// entire buffer was written to the audio HAL
if (!audio_has_proportional_frames(mFormat) &&
- (mType == OFFLOAD) && (mBytesWritten < mBufferSize)) {
+ (mType == OFFLOAD) && (mBytesWritten < (int64_t) mBufferSize)) {
mSleepTimeUs = kDirectMinSleepTimeUs;
} else {
mSleepTimeUs = mActiveSleepTimeUs;
@@ -4996,6 +4923,10 @@
bool trackPaused = false;
bool trackStopped = false;
+ if ((mType == DIRECT) && audio_is_linear_pcm(mFormat) && !usesHwAvSync()) {
+ return !mStandby;
+ }
+
// do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
// after a timeout and we will enter standby then.
if (mTracks.size() > 0) {
@@ -5266,7 +5197,7 @@
bool doHwPause = false;
bool doHwResume = false;
- ALOGV("OffloadThread::prepareTracks_l active tracks %d", count);
+ ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
// find out which tracks need to be processed
for (size_t i = 0; i < count; i++) {
@@ -5276,7 +5207,9 @@
continue;
}
Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
audio_track_cblk_t* cblk = track->cblk();
+#endif
// Only consider last track started for volume and mixer state control.
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
@@ -5728,7 +5661,12 @@
mInputSource = new AudioStreamInSource(input->stream);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
- ssize_t index = mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
// initialize fast capture depending on configuration
@@ -5806,7 +5744,7 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
#endif
@@ -6078,8 +6016,10 @@
}
// otherwise use the HAL / AudioStreamIn directly
} else {
+ ATRACE_BEGIN("read");
ssize_t bytesRead = mInput->stream->read(mInput->stream,
(uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
+ ATRACE_END();
if (bytesRead < 0) {
framesRead = bytesRead;
} else {
@@ -6110,7 +6050,7 @@
// ALOGD("%s", mTimestamp.toString().c_str());
if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
- ALOGE("read failed: framesRead=%d", framesRead);
+ ALOGE("read failed: framesRead=%zd", framesRead);
// Force input into standby so that it tries to recover at next read attempt
inputStandBy();
sleepUs = kRecordThreadSleepUs;
@@ -6346,10 +6286,10 @@
// there are sufficient fast track slots available
mFastTrackAvail
) {
- ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
+ ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+ ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
"format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
"hasFastCapture=%d tid=%d mFastTrackAvail=%d",
frameCount, mFrameCount, mPipeFramesP2,
@@ -6642,9 +6582,9 @@
size_t numtracks = mTracks.size();
size_t numactive = mActiveTracks.size();
size_t numactiveseen = 0;
- dprintf(fd, " %d Tracks", numtracks);
+ dprintf(fd, " %zu Tracks", numtracks);
if (numtracks) {
- dprintf(fd, " of which %d are active\n", numactive);
+ dprintf(fd, " of which %zu are active\n", numactive);
RecordTrack::appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<RecordTrack> track = mTracks[i];
@@ -7037,6 +6977,10 @@
AudioParameter param = AudioParameter(keyValuePair);
int value;
+
+ // scope for AutoPark extends to end of method
+ AutoPark<FastCapture> park(mFastCapture);
+
// TODO Investigate when this code runs. Check with audio policy when a sample rate and
// channel count change can be requested. Do we mandate the first client defines the
// HAL sampling rate and channel count or do we allow changes on the fly?
@@ -7308,7 +7252,7 @@
{
ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
ALOGW_IF(mEffectChains.size() != 1,
- "removeEffectChain_l() %p invalid chain size %d on thread %p",
+ "removeEffectChain_l() %p invalid chain size %zu on thread %p",
chain.get(), mEffectChains.size(), this);
if (mEffectChains.size() == 1) {
mEffectChains.removeAt(0);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f575918..7cbb6b8 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -122,7 +122,7 @@
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory == 0 ||
(mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
- ALOGE("not enough memory for AudioTrack size=%u", size);
+ ALOGE("not enough memory for AudioTrack size=%zu", size);
client->heap()->dump("AudioTrack");
mCblkMemory.clear();
return;
@@ -959,9 +959,9 @@
if (isOffloaded()) {
complete = true;
} else if (isDirect() || isFastTrack()) { // these do not go through linear map
- complete = framesWritten >= mPresentationCompleteFrames;
+ complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
} else { // Normal tracks, OutputTracks, and PatchTracks
- complete = framesWritten >= mPresentationCompleteFrames
+ complete = framesWritten >= (int64_t) mPresentationCompleteFrames
&& mAudioTrackServerProxy->isDrained();
}
@@ -1016,7 +1016,7 @@
if (isTerminated() || mState == PAUSED ||
((framesReady() == 0) && ((mSharedBuffer != 0) ||
(mState == STOPPED)))) {
- ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
+ ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %zu",
mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
event->cancel();
return INVALID_OPERATION;
@@ -1137,7 +1137,7 @@
mOutBuffer.frameCount = 0;
playbackThread->mTracks.add(this);
ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
- "frameCount %u, mChannelMask 0x%08x",
+ "frameCount %zu, mChannelMask 0x%08x",
mCblk, mBuffer,
frameCount, mChannelMask);
// since client and server are in the same process,
@@ -1246,7 +1246,7 @@
mBufferQueue.removeAt(0);
free(pInBuffer->mBuffer);
delete pInBuffer;
- ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
+ ALOGV("OutputTrack::write() %p thread %p released overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
} else {
break;
@@ -1265,7 +1265,7 @@
pInBuffer->raw = pInBuffer->mBuffer;
memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
mBufferQueue.add(pInBuffer);
- ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
+ ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
} else {
ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
diff --git a/services/audioflinger/audio-resampler/Android.mk b/services/audioflinger/audio-resampler/Android.mk
index ba37b19..bb2807c 100644
--- a/services/audioflinger/audio-resampler/Android.mk
+++ b/services/audioflinger/audio-resampler/Android.mk
@@ -11,4 +11,6 @@
LOCAL_SHARED_LIBRARIES := libutils liblog
+LOCAL_CFLAGS += -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 6182de0..3505e0f 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -23,6 +23,8 @@
LOCAL_MODULE := resampler_tests
LOCAL_MODULE_TAGS := tests
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_NATIVE_TEST)
#
@@ -61,4 +63,6 @@
LOCAL_CXX_STL := libc++
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 8218edd..8b45adc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -45,13 +45,14 @@
libmedia_helper \
libaudiopolicycomponents
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicyservice
LOCAL_CFLAGS += -fvisibility=hidden
include $(BUILD_SHARED_LIBRARY)
-
ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
include $(CLEAR_VARS)
@@ -101,6 +102,8 @@
LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicymanagerdefault
include $(BUILD_SHARED_LIBRARY)
@@ -122,6 +125,8 @@
$(TOPDIR)frameworks/av/services/audiopolicy/common/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicymanager
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 5c81410..3b4ae6b 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -58,6 +58,8 @@
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH)/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE := libaudiopolicycomponents
include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
index 7e1e24d..4ab7cf0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
@@ -50,7 +50,7 @@
};
class AudioSourceCollection :
- public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
+ public DefaultKeyedVector< audio_io_handle_t, sp<AudioSourceDescriptor> >
{
public:
status_t dump(int fd) const;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index d4992b0..6dacaa4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -29,7 +29,7 @@
AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
- mProfile(profile), mPatchHandle(0), mId(0)
+ mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -48,7 +48,7 @@
audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
{
if (mProfile == 0) {
- return 0;
+ return AUDIO_MODULE_HANDLE_NONE;
}
return mProfile->getModuleHandle();
}
@@ -157,7 +157,7 @@
return mSessions.removeSession(session);
}
-audio_port_handle_t AudioInputDescriptor::getPatchHandle() const
+audio_patch_handle_t AudioInputDescriptor::getPatchHandle() const
{
return mPatchHandle;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index c5fee50..79bbc54 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,7 +34,7 @@
AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
AudioPolicyClientInterface *clientInterface)
: mPort(port), mDevice(AUDIO_DEVICE_NONE),
- mClientInterface(clientInterface), mPatchHandle(0), mId(0)
+ mClientInterface(clientInterface), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
{
// clear usage count for all stream types
for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 9c28e8f..b8c0550 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -31,7 +31,7 @@
mHandle(static_cast<audio_patch_handle_t>(android_atomic_inc(&mNextUniqueId))),
mPatch(*patch),
mUid(uid),
- mAfPatchHandle(0)
+ mAfPatchHandle(AUDIO_PATCH_HANDLE_NONE)
{
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 4af3d54..7ee98b6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "APM::AudioPolicyMix"
+#define LOG_TAG "APM_AudioPolicyMix"
//#define LOG_NDEBUG 0
#include "AudioPolicyMix.h"
@@ -107,6 +107,7 @@
status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
sp<SwAudioOutputDescriptor> &desc)
{
+ ALOGV("getOutputForAttr() querying %zu mixes:", size());
desc = 0;
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
@@ -129,7 +130,8 @@
// iterate over all mix criteria to list what rules this mix contains
for (size_t j = 0; j < mix->mCriteria.size(); j++) {
- ALOGV("getOutputForAttr: inspecting mix %zu of %zu", i, mix->mCriteria.size());
+ ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
+ i, j, mix->mCriteria.size());
// if there is an address match, prioritize that match
if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 19b179e..17ed537 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -45,7 +45,7 @@
audio_module_handle_t AudioPort::getModuleHandle() const
{
if (mModule == 0) {
- return 0;
+ return AUDIO_MODULE_HANDLE_NONE;
}
return mModule->mHandle;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index cf7c8fc..fe03429 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -245,7 +245,7 @@
// without the test?
// This has been demonstrated to NOT be true (at start up)
// ALOG_ASSERT(mModule != NULL);
- dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_IO_HANDLE_NONE;
+ dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_MODULE_HANDLE_NONE;
strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index b7c7879..2d67bd2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -27,7 +27,7 @@
HwModule::HwModule(const char *name, uint32_t halVersion)
: mName(String8(name)),
- mHandle(0),
+ mHandle(AUDIO_MODULE_HANDLE_NONE),
mHalVersion(halVersion)
{
}
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index e6b5f85..6dba75b 100755
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -35,6 +35,7 @@
$(call include-path-for, audio-utils) \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE := libaudiopolicyengineconfigurable
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index 6348648..0e44f2c 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -30,6 +30,8 @@
libparameter \
liblog \
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_STATIC_LIBRARIES := libpfw_utility
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 096f913..f4283a8 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -18,6 +18,8 @@
LOCAL_STATIC_LIBRARIES := \
libmedia_helper \
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicypfwwrapper
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index bb12714..85d1822 100755
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -29,6 +29,7 @@
$(call include-path-for, bionic) \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE := libaudiopolicyenginedefault
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index f8ba3f2..f2224fd 100755
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -231,20 +231,20 @@
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
- const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
- const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+ DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+ DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
- return getDeviceForStrategyInt(strategy, (DeviceVector&)availableOutputDevices,
+ return getDeviceForStrategyInt(strategy, availableOutputDevices,
availableInputDevices, outputs);
}
audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector &availableOutputDevices,
- const DeviceVector &availableInputDevices,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
const SwAudioOutputCollection &outputs) const
{
uint32_t device = AUDIO_DEVICE_NONE;
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index ed93d1c..606ad28 100755
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -126,8 +126,8 @@
routing_strategy getStrategyForUsage(audio_usage_t usage);
audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector &availableOutputDevices,
- const DeviceVector &availableInputDevices,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
const SwAudioOutputCollection &outputs) const;
audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
audio_mode_t mPhoneState; /**< current phone state. */
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index b2b014a..d25dabd 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AudioPolicyManager"
+#define LOG_TAG "APM_AudioPolicyManager"
//#define LOG_NDEBUG 0
//#define VERY_VERBOSE_LOGGING
@@ -2051,7 +2051,7 @@
String8 address = mixes[i].mDeviceAddress;
if (mPolicyMixes.registerMix(address, mixes[i], 0 /*output desc*/) != NO_ERROR) {
- ALOGE(" Error regisering mix %zu for address %s", i, address.string());
+ ALOGE(" Error registering mix %zu for address %s", i, address.string());
res = INVALID_OPERATION;
break;
}
@@ -2076,21 +2076,25 @@
address.string(), "remote-submix");
}
} else if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
- ALOGV("registerPolicyMixes() mix %zu of %zu is RENDER", i, mixes.size());
String8 address = mixes[i].mDeviceAddress;
-
audio_devices_t device = mixes[i].mDeviceType;
+ ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
+ i, mixes.size(), device, address.string());
+ bool foundOutput = false;
for (size_t j = 0 ; j < mOutputs.size() ; j++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
if ((patch != 0) && (patch->mPatch.num_sinks != 0)
&& (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
&& (patch->mPatch.sinks[0].ext.device.type == device)
- && (patch->mPatch.sinks[0].ext.device.address == address)) {
+ && (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
if (mPolicyMixes.registerMix(address, mixes[i], desc) != NO_ERROR) {
res = INVALID_OPERATION;
+ } else {
+ foundOutput = true;
}
break;
}
@@ -2098,7 +2102,12 @@
if (res != NO_ERROR) {
ALOGE(" Error registering mix %zu for device 0x%X addr %s",
- i,device, address.string());
+ i, device, address.string());
+ res = INVALID_OPERATION;
+ break;
+ } else if (!foundOutput) {
+ ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
+ i, device, address.string());
res = INVALID_OPERATION;
break;
}
@@ -2403,7 +2412,7 @@
return INVALID_OPERATION;
}
} else {
- *handle = 0;
+ *handle = AUDIO_PATCH_HANDLE_NONE;
}
if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
@@ -4683,7 +4692,7 @@
sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, delayMs);
ALOGV("resetOutputDevice() releaseAudioPatch returned %d", status);
- outputDesc->setPatchHandle(0);
+ outputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
removeAudioPatch(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
@@ -4769,7 +4778,7 @@
sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
ALOGV("resetInputDevice() releaseAudioPatch returned %d", status);
- inputDesc->setPatchHandle(0);
+ inputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
removeAudioPatch(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
@@ -5202,23 +5211,30 @@
// Modify the list of surround sound formats supported.
void AudioPolicyManager::filterSurroundFormats(FormatVector &formats) {
+ // TODO Change the ALOGIs to ALOGVs in this function after the feature is verified.
+
+ // TODO Set this based on Config properties.
+ const bool alwaysForceAC3 = true;
audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
ALOGI("%s: forced use = %d", __FUNCTION__, forceUse);
// Analyze original support for various formats.
- bool supportsRawSurround = false;
+ bool supportsAC3 = false;
+ bool supportsOtherSurround = false;
bool supportsIEC61937 = false;
for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
audio_format_t format = formats[formatIndex];
- ALOGI("%s: original formats: #%x", __FUNCTION__, format);
+ ALOGI("%s: original formats: 0x%08x", __FUNCTION__, format);
switch (format) {
case AUDIO_FORMAT_AC3:
+ supportsAC3 = true;
+ break;
case AUDIO_FORMAT_E_AC3:
case AUDIO_FORMAT_DTS:
case AUDIO_FORMAT_DTS_HD:
- supportsRawSurround = true;
+ supportsOtherSurround = true;
break;
case AUDIO_FORMAT_IEC61937:
supportsIEC61937 = true;
@@ -5227,55 +5243,67 @@
break;
}
}
- ALOGI("%s: supportsRawSurround = %d, supportsIEC61937 = %d",
- __FUNCTION__, supportsRawSurround, supportsIEC61937);
+ ALOGI("%s: original, supportsAC3 = %d, supportsOtherSurround = %d, supportsIEC61937 = %d",
+ __FUNCTION__, supportsAC3, supportsOtherSurround, supportsIEC61937);
// Modify formats based on surround preferences.
// If NEVER, remove support for surround formats.
- if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER)
- && (supportsRawSurround || supportsIEC61937)) {
- // Remove surround sound related formats.
- for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
- audio_format_t format = formats[formatIndex];
- switch(format) {
- case AUDIO_FORMAT_AC3:
- case AUDIO_FORMAT_E_AC3:
- case AUDIO_FORMAT_DTS:
- case AUDIO_FORMAT_DTS_HD:
- case AUDIO_FORMAT_IEC61937:
- ALOGI("%s: remove #%x", __FUNCTION__, format);
- formats.removeAt(formatIndex);
- break;
- default:
- formatIndex++; // keep it
- break;
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+ if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
+ // Remove surround sound related formats.
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+ audio_format_t format = formats[formatIndex];
+ switch(format) {
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_HD:
+ case AUDIO_FORMAT_IEC61937:
+ ALOGI("%s: remove format 0x%08x", __FUNCTION__, format);
+ formats.removeAt(formatIndex);
+ break;
+ default:
+ formatIndex++; // keep it
+ break;
+ }
}
+ supportsAC3 = false;
+ supportsOtherSurround = false;
+ supportsIEC61937 = false;
}
- supportsRawSurround = false;
- supportsIEC61937 = false;
- }
- // If ALWAYS, add support for raw surround formats if all are missing.
- // This assumes that if any of these formats are reported by the HAL
- // then the report is valid and should not be modified.
- if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS)
- && !supportsRawSurround) {
- formats.add(AUDIO_FORMAT_AC3);
- formats.add(AUDIO_FORMAT_E_AC3);
- formats.add(AUDIO_FORMAT_DTS);
- formats.add(AUDIO_FORMAT_DTS_HD);
- supportsRawSurround = true;
- }
- // Add support for IEC61937 if raw surround supported.
- // The HAL could do this but add it here, just in case.
- if (supportsRawSurround && !supportsIEC61937) {
- formats.add(AUDIO_FORMAT_IEC61937);
- // supportsIEC61937 = true;
+ } else { // AUTO or ALWAYS
+ // Most TVs support AC3 even if they do not report it in the EDID.
+ if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
+ && !supportsAC3) {
+ formats.add(AUDIO_FORMAT_AC3);
+ supportsAC3 = true;
+ }
+
+ // If ALWAYS, add support for raw surround formats if all are missing.
+ // This assumes that if any of these formats are reported by the HAL
+ // then the report is valid and should not be modified.
+ if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS)
+ && !supportsOtherSurround) {
+ formats.add(AUDIO_FORMAT_E_AC3);
+ formats.add(AUDIO_FORMAT_DTS);
+ formats.add(AUDIO_FORMAT_DTS_HD);
+ supportsOtherSurround = true;
+ }
+
+ // Add support for IEC61937 if any raw surround supported.
+ // The HAL could do this but add it here, just in case.
+ if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
+ formats.add(AUDIO_FORMAT_IEC61937);
+ supportsIEC61937 = true;
+ }
}
// Just for debugging.
for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
audio_format_t format = formats[formatIndex];
- ALOGI("%s: final formats: #%x", __FUNCTION__, format);
+ ALOGI("%s: final formats: 0x%08x", __FUNCTION__, format);
}
+ ALOGI("%s: final, supportsAC3 = %d, supportsOtherSurround = %d, supportsIEC61937 = %d",
+ __FUNCTION__, supportsAC3, supportsOtherSurround, supportsIEC61937);
}
void AudioPolicyManager::updateAudioProfiles(audio_io_handle_t ioHandle,
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 08f9cc1..dbcc070 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -30,7 +30,7 @@
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
- return 0;
+ return AUDIO_MODULE_HANDLE_NONE;
}
return af->loadHwModule(name);
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
index 580d740..09a931f 100644
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
@@ -111,7 +111,7 @@
uint32_t *pLatencyMs,
audio_output_flags_t flags)
{
- return open_output((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+ return open_output(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask,
pLatencyMs, flags, NULL);
}
@@ -219,7 +219,7 @@
audio_channel_mask_t *pChannelMask,
audio_in_acoustics_t acoustics __unused)
{
- return open_input((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+ return open_input(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask);
}
audio_io_handle_t aps_open_input_on_module(void *service __unused,
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index af8fc74..0c88dad 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -912,7 +912,8 @@
}
Status CameraService::validateConnectLocked(const String8& cameraId,
- const String8& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid) const {
+ const String8& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid,
+ /*out*/int& originalClientPid) const {
int callingPid = getCallingPid();
int callingUid = getCallingUid();
@@ -954,6 +955,7 @@
// Only use passed in clientPid to check permission. Use calling PID as the client PID that's
// connected to camera service directly.
+ originalClientPid = clientPid;
clientPid = callingPid;
if (!mModule) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index e29b01c..11b1351 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -488,8 +488,14 @@
virtual void onFirstRef();
// Check if we can connect, before we acquire the service lock.
+ // The returned originalClientPid is the PID of the original process that wants to connect to
+ // camera.
+ // The returned clientPid is the PID of the client that directly connects to camera.
+ // originalClientPid and clientPid are usually the same except when the application uses
+ // mediaserver to connect to camera (using MediaRecorder to connect to camera). In that case,
+ // clientPid is the PID of mediaserver and originalClientPid is the PID of the application.
binder::Status validateConnectLocked(const String8& cameraId, const String8& clientName8,
- /*inout*/int& clientUid, /*inout*/int& clientPid) const;
+ /*inout*/int& clientUid, /*inout*/int& clientPid, /*out*/int& originalClientPid) const;
// Handle active client evictions, and update service state.
// Only call with with mServiceLock held.
@@ -819,6 +825,8 @@
String8 clientName8(clientPackageName);
+ int originalClientPid = 0;
+
ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
"Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
(halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
@@ -840,7 +848,7 @@
// Enforce client permissions and do basic sanity checks
if(!(ret = validateConnectLocked(cameraId, clientName8,
- /*inout*/clientUid, /*inout*/clientPid)).isOk()) {
+ /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
return ret;
}
@@ -857,7 +865,7 @@
sp<BasicClient> clientTmp = nullptr;
std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
- if ((err = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
+ if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
/*out*/&partial)) != NO_ERROR) {
switch (err) {
@@ -909,9 +917,30 @@
if ((err = client->initialize(mModule)) != OK) {
ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
- strerror(-err), err);
+ // Errors could be from the HAL module open call or from AppOpsManager
+ switch(err) {
+ case BAD_VALUE:
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Camera \"%s\" is already open", cameraId.string());
+ case -EUSERS:
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Too many cameras already open, cannot open camera \"%s\"",
+ cameraId.string());
+ case PERMISSION_DENIED:
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to open camera \"%s\"", cameraId.string());
+ case -EACCES:
+ return STATUS_ERROR_FMT(ERROR_DISABLED,
+ "Camera \"%s\" disabled by policy", cameraId.string());
+ case -ENODEV:
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
+ strerror(-err), err);
+ }
}
// Update shim paremeters for legacy clients
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 37f4c8f..1086340 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -479,6 +479,12 @@
void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
Mutex::Autolock lock(mLock);
if (checkPidAndHardware() != NO_ERROR) return;
+ if (mem == nullptr) {
+ android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26164272",
+ IPCThreadState::self()->getCallingUid(), nullptr, 0);
+ return;
+ }
+
mHardware->releaseRecordingFrame(mem);
}
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 61e1442..e3d6906 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -41,6 +41,7 @@
mNewAEState(false),
mNewFrameReceived(false),
mNewCaptureReceived(false),
+ mNewCaptureErrorCnt(0),
mShutterNotified(false),
mHalNotifiedShutter(false),
mShutterCaptureId(-1),
@@ -131,7 +132,7 @@
}
void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
- sp<MemoryBase> captureBuffer) {
+ sp<MemoryBase> captureBuffer, bool captureError) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
Mutex::Autolock l(mInputMutex);
@@ -139,6 +140,11 @@
mCaptureBuffer = captureBuffer;
if (!mNewCaptureReceived) {
mNewCaptureReceived = true;
+ if (captureError) {
+ mNewCaptureErrorCnt++;
+ } else {
+ mNewCaptureErrorCnt = 0;
+ }
mNewCaptureSignal.signal();
}
}
@@ -623,6 +629,17 @@
break;
}
}
+ if (mNewCaptureReceived) {
+ if (mNewCaptureErrorCnt > kMaxRetryCount) {
+ ALOGW("Exceeding multiple retry limit of %d due to buffer drop", kMaxRetryCount);
+ return DONE;
+ } else if (mNewCaptureErrorCnt > 0) {
+ ALOGW("Capture error happened, retry %d...", mNewCaptureErrorCnt);
+ mNewCaptureReceived = false;
+ return STANDARD_CAPTURE;
+ }
+ }
+
if (mTimeoutCount <= 0) {
ALOGW("Timed out waiting for capture to complete");
return DONE;
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index b05207e..a7c61d2 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -69,7 +69,7 @@
virtual void onResultAvailable(const CaptureResult &result);
// Notifications from the JPEG processor
- void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
+ void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer, bool captureError);
void dump(int fd, const Vector<String16>& args);
@@ -94,6 +94,7 @@
Condition mNewFrameSignal;
bool mNewCaptureReceived;
+ int32_t mNewCaptureErrorCnt;
nsecs_t mCaptureTimestamp;
sp<MemoryBase> mCaptureBuffer;
Condition mNewCaptureSignal;
@@ -110,6 +111,7 @@
static const int kMaxTimeoutsForPrecaptureStart = 10; // 1 sec
static const int kMaxTimeoutsForPrecaptureEnd = 20; // 2 sec
static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec
+ static const int kMaxRetryCount = 3; // 3 retries in case of buffer drop
wp<Camera2Client> mClient;
wp<ZslProcessor> mZslProcessor;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index e97618c..ffe96fc 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -42,7 +42,8 @@
mDevice(client->getCameraDevice()),
mSequencer(sequencer),
mId(client->getCameraId()),
- mCaptureAvailable(false),
+ mCaptureDone(false),
+ mCaptureSuccess(false),
mCaptureStreamId(NO_STREAM) {
}
@@ -53,9 +54,26 @@
void JpegProcessor::onFrameAvailable(const BufferItem& /*item*/) {
Mutex::Autolock l(mInputMutex);
- if (!mCaptureAvailable) {
- mCaptureAvailable = true;
- mCaptureAvailableSignal.signal();
+ ALOGV("%s", __FUNCTION__);
+ if (!mCaptureDone) {
+ mCaptureDone = true;
+ mCaptureSuccess = true;
+ mCaptureDoneSignal.signal();
+ }
+}
+
+void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+ // Intentionally left empty
+}
+
+void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mInputMutex);
+ ALOGV("%s", __FUNCTION__);
+
+ if (bufferInfo.mError) {
+ mCaptureDone = true;
+ mCaptureSuccess = false;
+ mCaptureDoneSignal.signal();
}
}
@@ -154,6 +172,12 @@
return res;
}
+ res = device->addBufferListenerForStream(mCaptureStreamId, this);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
}
return OK;
}
@@ -192,24 +216,26 @@
bool JpegProcessor::threadLoop() {
status_t res;
+ bool captureSuccess = false;
{
Mutex::Autolock l(mInputMutex);
- while (!mCaptureAvailable) {
- res = mCaptureAvailableSignal.waitRelative(mInputMutex,
+
+ while (!mCaptureDone) {
+ res = mCaptureDoneSignal.waitRelative(mInputMutex,
kWaitDuration);
if (res == TIMED_OUT) return true;
}
- mCaptureAvailable = false;
+
+ captureSuccess = mCaptureSuccess;
+ mCaptureDone = false;
}
- do {
- res = processNewCapture();
- } while (res == OK);
+ res = processNewCapture(captureSuccess);
return true;
}
-status_t JpegProcessor::processNewCapture() {
+status_t JpegProcessor::processNewCapture(bool captureSuccess) {
ATRACE_CALL();
status_t res;
sp<Camera2Heap> captureHeap;
@@ -217,7 +243,7 @@
CpuConsumer::LockedBuffer imgBuffer;
- {
+ if (captureSuccess) {
Mutex::Autolock l(mInputMutex);
if (mCaptureStreamId == NO_STREAM) {
ALOGW("%s: Camera %d: No stream is available", __FUNCTION__, mId);
@@ -269,7 +295,7 @@
sp<CaptureSequencer> sequencer = mSequencer.promote();
if (sequencer != 0) {
- sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer);
+ sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer, !captureSuccess);
}
return OK;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index ac6f5c7..7187ad9 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -41,7 +41,8 @@
* Still image capture output image processing
*/
class JpegProcessor:
- public Thread, public CpuConsumer::FrameAvailableListener {
+ public Thread, public CpuConsumer::FrameAvailableListener,
+ public camera3::Camera3StreamBufferListener {
public:
JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~JpegProcessor();
@@ -49,6 +50,10 @@
// CpuConsumer listener implementation
void onFrameAvailable(const BufferItem& item);
+ // Camera3StreamBufferListener implementation
+ void onBufferAcquired(const BufferInfo& bufferInfo) override;
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+
status_t updateStream(const Parameters ¶ms);
status_t deleteStream();
int getStreamId() const;
@@ -61,8 +66,9 @@
int mId;
mutable Mutex mInputMutex;
- bool mCaptureAvailable;
- Condition mCaptureAvailableSignal;
+ bool mCaptureDone;
+ bool mCaptureSuccess;
+ Condition mCaptureDoneSignal;
enum {
NO_STREAM = -1
@@ -75,7 +81,7 @@
virtual bool threadLoop();
- status_t processNewCapture();
+ status_t processNewCapture(bool captureSuccess);
size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 316cfda..51c8148 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -342,7 +342,11 @@
}
status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
- if (err != OK) {
+ if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Unsupported set of inputs/outputs provided",
+ mCameraId);
+ } else if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
"Camera %d: Error configuring streams: %s (%d)",
mCameraId, strerror(-err), err);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index ccb3bc8..d570d4b 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -296,6 +296,12 @@
virtual status_t tearDown(int streamId) = 0;
/**
+ * Add buffer listener for a particular stream in the device.
+ */
+ virtual status_t addBufferListenerForStream(int streamId,
+ wp<camera3::Camera3StreamBufferListener> listener) = 0;
+
+ /**
* Prepare stream by preallocating up to maxCount buffers for it asynchronously.
* Calls notifyPrepared() once allocation is complete.
*/
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 331f10d..1caf157 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1652,6 +1652,26 @@
return stream->tearDown();
}
+status_t Camera3Device::addBufferListenerForStream(int streamId,
+ wp<Camera3StreamBufferListener> listener) {
+ ATRACE_CALL();
+ ALOGV("%s: Camera %d: Adding buffer listener for stream %d", __FUNCTION__, mId, streamId);
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ sp<Camera3StreamInterface> stream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d does not exist", streamId);
+ return BAD_VALUE;
+ }
+
+ stream = mOutputStreams.editValueAt(outputStreamIdx);
+ stream->addBufferListener(listener);
+
+ return OK;
+}
+
uint32_t Camera3Device::getDeviceVersion() {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ba092d0..96ca7b7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -146,6 +146,9 @@
virtual status_t tearDown(int streamId);
+ virtual status_t addBufferListenerForStream(int streamId,
+ wp<camera3::Camera3StreamBufferListener> listener);
+
virtual status_t prepare(int maxCount, int streamId);
virtual uint32_t getDeviceVersion();
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 7dab2e3..f781ded 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -116,6 +116,7 @@
bufferFound = true;
bufferItem = tmp;
mBuffersInFlight.erase(it);
+ break;
}
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 50f7a91..a4714a7 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -560,7 +560,7 @@
}
void Camera3Stream::fireBufferListenersLocked(
- const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
+ const camera3_stream_buffer& buffer, bool acquired, bool output) {
List<wp<Camera3StreamBufferListener> >::iterator it, end;
// TODO: finish implementing
@@ -568,6 +568,7 @@
Camera3StreamBufferListener::BufferInfo info =
Camera3StreamBufferListener::BufferInfo();
info.mOutput = output;
+ info.mError = (buffer.status == CAMERA3_BUFFER_STATUS_ERROR);
// TODO: rest of fields
for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 62ea6c0..2db333d 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -34,6 +34,7 @@
uint32_t mScalingMode;
int64_t mTimestamp;
uint64_t mFrameNumber;
+ bool mError;
};
// Buffer was acquired by the HAL
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
index 4be96d5..0afaa15 100644
--- a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
+++ b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
@@ -40,6 +40,7 @@
statfs64: 1
sched_setscheduler: 1
fstatat64: 1
+ugetrlimit: 1
# for attaching to debuggerd on process crash
sigaction: 1
@@ -47,3 +48,4 @@
socket: 1
connect: 1
fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
index 5bbd4e3..cc9a580 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
@@ -32,6 +32,7 @@
gettid: 1
rt_sigprocmask: 1
sched_yield: 1
+ugetrlimit: 1
# for attaching to debuggerd on process crash
sigaction: 1
@@ -39,3 +40,4 @@
socket: 1
connect: 1
fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
index 3d258c7..516ca60 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
@@ -29,6 +29,8 @@
rt_sigreturn: 1
faccessat: 1
sched_setscheduler: 1
+ugetrlimit: 1
+getrlimit: 1
# for attaching to debuggerd on process crash
socketcall: 1
@@ -36,3 +38,4 @@
tgkill: 1
rt_sigprocmask: 1
fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 03438bf..88f98cf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -6,10 +6,12 @@
LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libnbaio
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libmedialogservice
-LOCAL_32_BIT_ONLY := true
-
LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index e1235b8..64534bf 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -96,8 +96,11 @@
if (binder != NULL) {
sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder);
for (size_t i = 0; i < resources.size(); ++i) {
- service->notifyResourceGranted(pid, String16(asString(resources[i].mType)),
- String16(asString(resources[i].mSubType)), resources[i].mValue);
+ if (resources[i].mSubType == MediaResource::kAudioCodec) {
+ service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_AUDIO_CODEC);
+ } else if (resources[i].mSubType == MediaResource::kVideoCodec) {
+ service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_VIDEO_CODEC);
+ }
}
}
}
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 6aae31d..f5d74d3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -33,6 +33,8 @@
LOCAL_CFLAGS += -Wall -Wextra -Werror
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libradioservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ecc49ae..e8e18b8 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -38,6 +38,8 @@
LOCAL_C_INCLUDES += \
$(TOPDIR)frameworks/av/services/audioflinger
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libsoundtriggerservice
include $(BUILD_SHARED_LIBRARY)