Merge "LiveSession: added onSwitchDown" into lmp-dev
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index a3cc396..72e51f9 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -430,7 +430,7 @@
* - NO_ERROR: successful operation
* - BAD_VALUE: position is NULL
*/
- status_t getPosition(uint32_t *position) const;
+ status_t getPosition(uint32_t *position);
/* For static buffer mode only, this returns the current playback position in frames
* relative to start of buffer. It is analogous to the position units used by
@@ -581,6 +581,7 @@
* if you need a high resolution mapping between frame position and presentation time,
* consider implementing that at application level, based on the low resolution timestamps.
* Returns NO_ERROR if timestamp is valid.
+ * The timestamp parameter is undefined on return, if status is not NO_ERROR.
*/
status_t getTimestamp(AudioTimestamp& timestamp);
@@ -639,7 +640,7 @@
// caller must hold lock on mLock for all _l methods
- status_t createTrack_l(size_t epoch);
+ status_t createTrack_l();
// can only be called when mState != STATE_ACTIVE
void flush_l();
@@ -659,6 +660,9 @@
bool isDirect_l() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ // increment mPosition by the delta of mServer, and return new value of mPosition
+ uint32_t updateAndGetPosition_l();
+
// Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
@@ -731,6 +735,18 @@
bool mMarkerReached;
uint32_t mNewPosition; // in frames
uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
+ uint32_t mServer; // in frames, last known mProxy->getPosition()
+ // which is count of frames consumed by server,
+ // reset by new IAudioTrack,
+ // whether it is reset by stop() is TBD
+ uint32_t mPosition; // in frames, like mServer except continues
+ // monotonically after new IAudioTrack,
+ // and could be easily widened to uint64_t
+ uint32_t mReleased; // in frames, count of frames released to server
+ // but not necessarily consumed by server,
+ // reset by stop() but continues monotonically
+ // after new IAudioTrack to restore mPosition,
+ // and could be easily widened to uint64_t
audio_output_flags_t mFlags;
// const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
diff --git a/media/libmedia/CharacterEncodingDetector.h b/include/media/CharacterEncodingDetector.h
similarity index 96%
rename from media/libmedia/CharacterEncodingDetector.h
rename to include/media/CharacterEncodingDetector.h
index 7b5ed86..deaa377 100644
--- a/media/libmedia/CharacterEncodingDetector.h
+++ b/include/media/CharacterEncodingDetector.h
@@ -43,7 +43,7 @@
const UCharsetMatch *getPreferred(
const char *input, size_t len,
const UCharsetMatch** ucma, size_t matches,
- bool *goodmatch);
+ bool *goodmatch, int *highestmatch);
bool isFrequent(const uint16_t *values, uint32_t c);
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 5c8a484..619ac78 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -88,7 +88,7 @@
/* Send parameters to the audio hardware */
virtual status_t setParameters(const String8& keyValuePairs) = 0;
- /* Return NO_ERROR if timestamp is valid */
+ /* Return NO_ERROR if timestamp is valid. timestamp is undefined otherwise. */
virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0;
/* Signal the playback thread for a change in control block */
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 253c557..f061d22 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -54,7 +54,8 @@
CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
- CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2004,
+ CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
+ CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
};
/**
diff --git a/media/libmedia/StringArray.h b/include/media/StringArray.h
similarity index 100%
rename from media/libmedia/StringArray.h
rename to include/media/StringArray.h
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 2442219..9cc208e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -278,6 +278,7 @@
bool mPrepareSync;
status_t mPrepareStatus;
audio_stream_type_t mStreamType;
+ Parcel* mAudioAttributesParcel;
bool mLoop;
float mLeftVolume;
float mRightVolume;
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
index 5213bdc..d555279 100644
--- a/include/media/mediascanner.h
+++ b/include/media/mediascanner.h
@@ -122,7 +122,6 @@
protected:
// default encoding from MediaScanner::mLocale
String8 mLocale;
- CharacterEncodingDetector *mEncodingDetector;
};
}; // namespace android
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index be0c15b..d422576 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -227,7 +227,7 @@
// Returns NO_ERROR if a timestamp is available. The timestamp includes the total number
// of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
- // as of this presentation count.
+ // as of this presentation count. The timestamp parameter is undefined if error is returned.
virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
protected:
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index eb31c77..da4c20c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -234,7 +234,7 @@
status_t setComponentRole(bool isEncoder, const char *mime);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
- status_t configureTunneledVideoPlayback(int64_t audioHwSync,
+ status_t configureTunneledVideoPlayback(int32_t audioHwSync,
const sp<ANativeWindow> &nativeWindow);
status_t setVideoPortFormatType(
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 7540e07..2e663ec 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -58,20 +58,22 @@
// drm/drm_framework_common.h
DRM_ERROR_BASE = -2000,
- ERROR_DRM_UNKNOWN = DRM_ERROR_BASE,
- ERROR_DRM_NO_LICENSE = DRM_ERROR_BASE - 1,
- ERROR_DRM_LICENSE_EXPIRED = DRM_ERROR_BASE - 2,
- ERROR_DRM_SESSION_NOT_OPENED = DRM_ERROR_BASE - 3,
- ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 4,
- ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5,
- ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6,
- ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7,
- ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8,
- ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9,
- ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10,
+ ERROR_DRM_UNKNOWN = DRM_ERROR_BASE,
+ ERROR_DRM_NO_LICENSE = DRM_ERROR_BASE - 1,
+ ERROR_DRM_LICENSE_EXPIRED = DRM_ERROR_BASE - 2,
+ ERROR_DRM_SESSION_NOT_OPENED = DRM_ERROR_BASE - 3,
+ ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED = DRM_ERROR_BASE - 4,
+ ERROR_DRM_DECRYPT = DRM_ERROR_BASE - 5,
+ ERROR_DRM_CANNOT_HANDLE = DRM_ERROR_BASE - 6,
+ ERROR_DRM_TAMPER_DETECTED = DRM_ERROR_BASE - 7,
+ ERROR_DRM_NOT_PROVISIONED = DRM_ERROR_BASE - 8,
+ ERROR_DRM_DEVICE_REVOKED = DRM_ERROR_BASE - 9,
+ ERROR_DRM_RESOURCE_BUSY = DRM_ERROR_BASE - 10,
+ ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION = DRM_ERROR_BASE - 11,
+ ERROR_DRM_LAST_USED_ERRORCODE = DRM_ERROR_BASE - 11,
- ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500,
- ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999,
+ ERROR_DRM_VENDOR_MAX = DRM_ERROR_BASE - 500,
+ ERROR_DRM_VENDOR_MIN = DRM_ERROR_BASE - 999,
// Heartbeat Error Codes
HEARTBEAT_ERROR_BASE = -3000,
@@ -100,7 +102,7 @@
// returns true if err is a recognized DRM error code
static inline bool isCryptoError(status_t err) {
- return (ERROR_DRM_RESOURCE_BUSY <= err && err <= ERROR_DRM_UNKNOWN)
+ return (ERROR_DRM_LAST_USED_ERRORCODE <= err && err <= ERROR_DRM_UNKNOWN)
|| (ERROR_DRM_VENDOR_MIN <= err && err <= ERROR_DRM_VENDOR_MAX);
}
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index 940fc55..4d76b64 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -56,8 +56,6 @@
KeyedVector<uint32_t, sp<AMessage> > mReplies;
- status_t postMessage_l(const sp<AMessage> &msg, int64_t delayUs);
-
DISALLOW_EVIL_CONSTRUCTORS(ALooperRoster);
};
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 5846d6b..a9e235b 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -137,7 +137,9 @@
Rect rectValue;
} u;
const char *mName;
+ size_t mNameLength;
Type mType;
+ void setName(const char *name, size_t len);
};
enum {
@@ -147,12 +149,14 @@
size_t mNumItems;
Item *allocateItem(const char *name);
- void freeItem(Item *item);
+ void freeItemValue(Item *item);
const Item *findItem(const char *name, Type type) const;
void setObjectInternal(
const char *name, const sp<RefBase> &obj, Type type);
+ size_t findItemIndex(const char *name, size_t len) const;
+
DISALLOW_EVIL_CONSTRUCTORS(AMessage);
};
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3be0651..e012116 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,15 +70,16 @@
LOCAL_STATIC_LIBRARIES += libinstantssq
-LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE:= libmedia
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/av/include/media/ \
$(TOP)/frameworks/av/media/libstagefright \
- external/icu/icu4c/source/common \
- external/icu/icu4c/source/i18n \
+ $(TOP)/external/icu/icu4c/source/common \
+ $(TOP)/external/icu/icu4c/source/i18n \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 3486d21..1742fbe 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -582,9 +582,13 @@
}
binder->linkToDeath(gAudioPolicyServiceClient);
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
- gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
gLock.unlock();
+ // Registering the client takes the AudioPolicyService lock.
+ // Don't hold the AudioSystem lock at the same time.
+ gAudioPolicyService->registerClient(gAudioPolicyServiceClient);
} else {
+ // There exists a benign race condition where gAudioPolicyService
+ // is set, but gAudioPolicyServiceClient is not yet registered.
gLock.unlock();
}
return gAudioPolicyService;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d87e6f5..ff7da83 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -398,7 +398,7 @@
}
// create the IAudioTrack
- status = createTrack_l(0 /*epoch*/);
+ status = createTrack_l();
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -417,6 +417,9 @@
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
+ mServer = 0;
+ mPosition = 0;
+ mReleased = 0;
AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
mSequence = 1;
mObservedSequence = mSequence;
@@ -443,14 +446,16 @@
} else {
mState = STATE_ACTIVE;
}
+ (void) updateAndGetPosition_l();
if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
// reset current position as seen by client to 0
- mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+ mPosition = 0;
+ mReleased = 0;
// force refresh of remaining frames by processAudioBuffer() as last
// write before stop could be partial.
mRefreshRemaining = true;
}
- mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+ mNewPosition = mPosition + mUpdatePeriod;
int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
sp<AudioTrackThread> t = mAudioTrackThread;
@@ -709,7 +714,7 @@
{
// FIXME If setting a loop also sets position to start of loop, then
// this is correct. Otherwise it should be removed.
- mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+ mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
}
@@ -751,7 +756,7 @@
}
AutoMutex lock(mLock);
- mNewPosition = mProxy->getPosition() + updatePeriod;
+ mNewPosition = updateAndGetPosition_l() + updatePeriod;
mUpdatePeriod = updatePeriod;
return NO_ERROR;
@@ -791,7 +796,7 @@
if (mState == STATE_ACTIVE) {
return INVALID_OPERATION;
}
- mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+ mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
mLoopPeriod = 0;
// FIXME Check whether loops and setting position are incompatible in old code.
// If we use setLoop for both purposes we lose the capability to set the position while looping.
@@ -800,7 +805,7 @@
return NO_ERROR;
}
-status_t AudioTrack::getPosition(uint32_t *position) const
+status_t AudioTrack::getPosition(uint32_t *position)
{
if (position == NULL) {
return BAD_VALUE;
@@ -823,8 +828,8 @@
*position = dspFrames;
} else {
// IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
- *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
- mProxy->getPosition();
+ *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
+ 0 : updateAndGetPosition_l();
}
return NO_ERROR;
}
@@ -881,7 +886,7 @@
// -------------------------------------------------------------------------
// must be called with mLock held
-status_t AudioTrack::createTrack_l(size_t epoch)
+status_t AudioTrack::createTrack_l()
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -1184,7 +1189,6 @@
mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
mProxy->setSendLevel(mSendLevel);
mProxy->setSampleRate(mSampleRate);
- mProxy->setEpoch(epoch);
mProxy->setMinimum(mNotificationFramesAct);
mDeathNotifier = new DeathNotifier(this);
@@ -1319,6 +1323,7 @@
buffer.mRaw = audioBuffer->raw;
AutoMutex lock(mLock);
+ mReleased += stepCount;
mInUnderrun = false;
mProxy->releaseBuffer(&buffer);
@@ -1531,7 +1536,7 @@
}
// Get current position of server
- size_t position = mProxy->getPosition();
+ size_t position = updateAndGetPosition_l();
// Manage marker callback
bool markerReached = false;
@@ -1796,14 +1801,18 @@
return DEAD_OBJECT;
}
- // if the new IAudioTrack is created, createTrack_l() will modify the
+ // save the old static buffer position
+ size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+
+ // If a new IAudioTrack is successfully created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
- // It will also delete the strong references on previous IAudioTrack and IMemory
+ // It will also delete the strong references on previous IAudioTrack and IMemory.
+ // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
+ result = createTrack_l();
// take the frames that will be lost by track recreation into account in saved position
- size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
- size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
- result = createTrack_l(position /*epoch*/);
+ (void) updateAndGetPosition_l();
+ mPosition = mReleased;
if (result == NO_ERROR) {
// continue playback from last known position, but
@@ -1838,6 +1847,27 @@
return result;
}
+uint32_t AudioTrack::updateAndGetPosition_l()
+{
+ // This is the sole place to read server consumed frames
+ uint32_t newServer = mProxy->getPosition();
+ int32_t delta = newServer - mServer;
+ mServer = newServer;
+ // TODO There is controversy about whether there can be "negative jitter" in server position.
+ // This should be investigated further, and if possible, it should be addressed.
+ // A more definite failure mode is infrequent polling by client.
+ // One could call (void)getPosition_l() in releaseBuffer(),
+ // so mReleased and mPosition are always lock-step as best possible.
+ // That should ensure delta never goes negative for infrequent polling
+ // unless the server has more than 2^31 frames in its buffer,
+ // in which case the use of uint32_t for these counters has bigger issues.
+ if (delta < 0) {
+ ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
+ delta = 0;
+ }
+ return mPosition += (uint32_t) delta;
+}
+
status_t AudioTrack::setParameters(const String8& keyValuePairs)
{
AutoMutex lock(mLock);
@@ -1854,9 +1884,34 @@
if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
return INVALID_OPERATION;
}
+ // The presented frame count must always lag behind the consumed frame count.
+ // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
status_t status = mAudioTrack->getTimestamp(timestamp);
if (status == NO_ERROR) {
- timestamp.mPosition += mProxy->getEpoch();
+ // Update the mapping between local consumed (mPosition) and server consumed (mServer)
+ (void) updateAndGetPosition_l();
+ // Server consumed (mServer) and presented both use the same server time base,
+ // and server consumed is always >= presented.
+ // The delta between these represents the number of frames in the buffer pipeline.
+ // If this delta between these is greater than the client position, it means that
+ // actually presented is still stuck at the starting line (figuratively speaking),
+ // waiting for the first frame to go by. So we can't report a valid timestamp yet.
+ if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
+ return INVALID_OPERATION;
+ }
+ // Convert timestamp position from server time base to client time base.
+ // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
+ // But if we change it to 64-bit then this could fail.
+ // If (mPosition - mServer) can be negative then should use:
+ // (int32_t)(mPosition - mServer)
+ timestamp.mPosition += mPosition - mServer;
+ // Immediately after a call to getPosition_l(), mPosition and
+ // mServer both represent the same frame position. mPosition is
+ // in client's point of view, and mServer is in server's point of
+ // view. So the difference between them is the "fudge factor"
+ // between client and server views due to stop() and/or new
+ // IAudioTrack. And timestamp.mPosition is initially in server's
+ // point of view, so we need to apply the same fudge factor to it.
}
return status;
}
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 7d1ddfd..41994dc 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "CharacterEncodingDector"
#include <utils/Log.h>
-#include "CharacterEncodingDetector.h"
+#include <CharacterEncodingDetector.h>
#include "CharacterEncodingDetectorTables.h"
#include "utils/Vector.h"
@@ -118,10 +118,12 @@
int32_t matches;
const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status);
bool goodmatch = true;
+ int highest = 0;
const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf),
- ucma, matches, &goodmatch);
+ ucma, matches, &goodmatch, &highest);
- if (!goodmatch && strlen(buf) < 20) {
+ ALOGV("goodmatch: %s, highest: %d", goodmatch ? "true" : "false", highest);
+ if (!goodmatch && (highest < 15 || strlen(buf) < 20)) {
ALOGV("not a good match, trying with more data");
// This string might be too short for ICU to do anything useful with.
// (real world example: "Björk" in ISO-8859-1 might be detected as GB18030, because
@@ -146,9 +148,10 @@
ucsdet_setText(csd, buf, strlen(buf), &status);
ucma = ucsdet_detectAll(csd, &matches, &status);
bestCombinedMatch = getPreferred(buf, strlen(buf),
- ucma, matches, &goodmatch);
- if (!goodmatch) {
+ ucma, matches, &goodmatch, &highest);
+ if (!goodmatch && highest <= 15) {
ALOGV("still not a good match after adding printable tags");
+ bestCombinedMatch = NULL;
}
} else {
ALOGV("no printable tags to add");
@@ -157,6 +160,8 @@
if (bestCombinedMatch != NULL) {
combinedenc = ucsdet_getName(bestCombinedMatch, &status);
+ } else {
+ combinedenc = "ISO-8859-1";
}
}
@@ -199,10 +204,17 @@
if (strcmp(enc,"UTF-8") != 0) {
// only convert if the source encoding isn't already UTF-8
ALOGV("@@@ using converter %s for %s", enc, mNames.getEntry(i));
+ status = U_ZERO_ERROR;
UConverter *conv = ucnv_open(enc, &status);
if (U_FAILURE(status)) {
- ALOGE("could not create UConverter for %s", enc);
- continue;
+ ALOGW("could not create UConverter for %s (%d), falling back to ISO-8859-1",
+ enc, status);
+ status = U_ZERO_ERROR;
+ conv = ucnv_open("ISO-8859-1", &status);
+ if (U_FAILURE(status)) {
+ ALOGW("could not create UConverter for ISO-8859-1 either");
+ continue;
+ }
}
// convert from native encoding to UTF-8
@@ -224,7 +236,16 @@
} else {
// zero terminate
*target = 0;
- mValues.setEntry(i, buffer);
+ // strip trailing spaces
+ while (--target > buffer && *target == ' ') {
+ *target = 0;
+ }
+ // skip leading spaces
+ char *start = buffer;
+ while (*start == ' ') {
+ start++;
+ }
+ mValues.setEntry(i, start);
}
delete[] buffer;
@@ -261,7 +282,7 @@
const UCharsetMatch *CharacterEncodingDetector::getPreferred(
const char *input, size_t len,
const UCharsetMatch** ucma, size_t nummatches,
- bool *goodmatch) {
+ bool *goodmatch, int *highestmatch) {
*goodmatch = false;
Vector<const UCharsetMatch*> matches;
@@ -316,11 +337,17 @@
}
ALOGV("%zu: %s %d", i, encname, confidence);
+ status = U_ZERO_ERROR;
UConverter *conv = ucnv_open(encname, &status);
+ int demerit = 0;
+ if (U_FAILURE(status)) {
+ ALOGV("failed to open %s: %d", encname, status);
+ confidence = 0;
+ demerit += 1000;
+ }
const char *source = input;
const char *sourceLimit = input + len;
status = U_ZERO_ERROR;
- int demerit = 0;
int frequentchars = 0;
int totalchars = 0;
while (true) {
@@ -337,7 +364,8 @@
if (c < 0x20 || (c >= 0x7f && c <= 0x009f)) {
ALOGV("control character %x", c);
demerit += 100;
- } else if ((c >= 0xa0 && c <= 0xbe) // symbols, superscripts
+ } else if ((c == 0xa0) // no-break space
+ || (c >= 0xa2 && c <= 0xbe) // symbols, superscripts
|| (c == 0xd7) || (c == 0xf7) // multiplication and division signs
|| (c >= 0x2000 && c <= 0x209f)) { // punctuation, superscripts
ALOGV("unlikely character %x", c);
@@ -408,10 +436,14 @@
} else {
ALOGV("runner up: '%s' w/ %d confidence",
ucsdet_getName(matches[runnerupidx], &status), runnerup);
+ if (runnerup < 0) {
+ runnerup = 0;
+ }
if ((highest - runnerup) > 15) {
*goodmatch = true;
}
}
+ *highestmatch = highest;
return matches[highestidx];
}
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index d2e181b..e2e6042 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -87,6 +87,7 @@
{"highspeed480p", CAMCORDER_QUALITY_HIGH_SPEED_480P},
{"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
{"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
+ {"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
};
#if LOG_NDEBUG
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index 1661f04..9f803cb 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -25,14 +25,10 @@
namespace android {
-MediaScannerClient::MediaScannerClient()
- : mEncodingDetector(NULL)
-{
+MediaScannerClient::MediaScannerClient() {
}
-MediaScannerClient::~MediaScannerClient()
-{
- delete mEncodingDetector;
+MediaScannerClient::~MediaScannerClient() {
}
void MediaScannerClient::setLocale(const char* locale)
@@ -40,31 +36,16 @@
mLocale = locale; // not currently used
}
-void MediaScannerClient::beginFile()
-{
- delete mEncodingDetector;
- mEncodingDetector = new CharacterEncodingDetector();
+void MediaScannerClient::beginFile() {
}
status_t MediaScannerClient::addStringTag(const char* name, const char* value)
{
- mEncodingDetector->addTag(name, value);
+ handleStringTag(name, value);
return OK;
}
-void MediaScannerClient::endFile()
-{
- mEncodingDetector->detectAndConvert();
-
- int size = mEncodingDetector->size();
- if (size) {
- for (int i = 0; i < size; i++) {
- const char *name;
- const char *value;
- mEncodingDetector->getTag(i, &name, &value);
- handleStringTag(name, value);
- }
- }
+void MediaScannerClient::endFile() {
}
} // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 6cd377a..9611ac7 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -50,6 +50,7 @@
mListener = NULL;
mCookie = NULL;
mStreamType = AUDIO_STREAM_MUSIC;
+ mAudioAttributesParcel = NULL;
mCurrentPosition = -1;
mSeekPosition = -1;
mCurrentState = MEDIA_PLAYER_IDLE;
@@ -68,6 +69,10 @@
MediaPlayer::~MediaPlayer()
{
ALOGV("destructor");
+ if (mAudioAttributesParcel != NULL) {
+ delete mAudioAttributesParcel;
+ mAudioAttributesParcel = NULL;
+ }
AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
disconnect();
IPCThreadState::self()->flushCommands();
@@ -237,6 +242,9 @@
{
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
+ if (mAudioAttributesParcel != NULL) {
+ mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
+ }
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
@@ -662,8 +670,17 @@
if (mPlayer != NULL) {
return mPlayer->setParameter(key, request);
}
- ALOGV("setParameter: no active player");
- return INVALID_OPERATION;
+ switch (key) {
+ case KEY_PARAMETER_AUDIO_ATTRIBUTES:
+ // no player, save the marshalled audio attributes
+ if (mAudioAttributesParcel != NULL) { delete mAudioAttributesParcel; };
+ mAudioAttributesParcel = new Parcel();
+ mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
+ return OK;
+ default:
+ ALOGV("setParameter: no active player");
+ return INVALID_OPERATION;
+ }
}
status_t MediaPlayer::getParameter(int key, Parcel *reply)
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 0c7e590c..adc066d 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -28,6 +28,7 @@
libcamera_client \
libcrypto \
libcutils \
+ libdrmframework \
liblog \
libdl \
libgui \
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index dacb144..3e0fc0d 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -60,7 +60,7 @@
return OK;
}
-player_type MediaPlayerFactory::getDefaultPlayerType() {
+static player_type getDefaultPlayerType() {
char value[PROPERTY_VALUE_MAX];
if (property_get("media.stagefright.use-awesome", value, NULL)
&& (!strcmp("1", value) || !strcasecmp("true", value))) {
@@ -181,16 +181,19 @@
int64_t offset,
int64_t /*length*/,
float /*curScore*/) {
- char buf[20];
- lseek(fd, offset, SEEK_SET);
- read(fd, buf, sizeof(buf));
- lseek(fd, offset, SEEK_SET);
+ if (getDefaultPlayerType()
+ == STAGEFRIGHT_PLAYER) {
+ char buf[20];
+ lseek(fd, offset, SEEK_SET);
+ read(fd, buf, sizeof(buf));
+ lseek(fd, offset, SEEK_SET);
- uint32_t ident = *((uint32_t*)buf);
+ uint32_t ident = *((uint32_t*)buf);
- // Ogg vorbis?
- if (ident == 0x5367674f) // 'OggS'
- return 1.0;
+ // Ogg vorbis?
+ if (ident == 0x5367674f) // 'OggS'
+ return 1.0;
+ }
return 0.0;
}
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index 5ddde19..55ff918 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -71,7 +71,6 @@
static status_t registerFactory_l(IFactory* factory,
player_type type);
- static player_type getDefaultPlayerType();
static Mutex sLock;
static tFactoryMap sFactoryMap;
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index b5bd988..c8cb7ed 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -204,6 +204,8 @@
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | content_type |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | flags |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | kAudioAttributesMarshallTagFlattenTags | // ignore tags if not found
@@ -219,6 +221,7 @@
{
attributes->usage = (audio_usage_t) parcel.readInt32();
attributes->content_type = (audio_content_type_t) parcel.readInt32();
+ attributes->source = (audio_source_t) parcel.readInt32();
attributes->flags = (audio_flags_mask_t) parcel.readInt32();
const bool hasFlattenedTag = (parcel.readInt32() == kAudioAttributesMarshallTagFlattenTags);
if (hasFlattenedTag) {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e2bcb1e..b904aa8 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -183,11 +183,7 @@
return BAD_VALUE;
}
- if (ve == VIDEO_ENCODER_DEFAULT) {
- mVideoEncoder = VIDEO_ENCODER_H263;
- } else {
- mVideoEncoder = ve;
- }
+ mVideoEncoder = ve;
return OK;
}
@@ -1033,6 +1029,7 @@
if (mAudioSource != AUDIO_SOURCE_CNT) {
source = createAudioSource();
} else {
+ setDefaultVideoEncoderIfNecessary();
sp<MediaSource> mediaSource;
status_t err = setupMediaSource(&mediaSource);
@@ -1074,6 +1071,7 @@
if (mVideoSource < VIDEO_SOURCE_LIST_END) {
if (mVideoEncoder != VIDEO_ENCODER_H264) {
+ ALOGE("MPEG2TS recording only supports H.264 encoding!");
return ERROR_UNSUPPORTED;
}
@@ -1108,6 +1106,12 @@
void StagefrightRecorder::clipVideoFrameRate() {
ALOGV("clipVideoFrameRate: encoder %d", mVideoEncoder);
+ if (mFrameRate == -1) {
+ mFrameRate = mEncoderProfiles->getCamcorderProfileParamByName(
+ "vid.fps", mCameraId, CAMCORDER_QUALITY_LOW);
+ ALOGW("Using default video fps %d", mFrameRate);
+ }
+
int minFrameRate = mEncoderProfiles->getVideoEncoderParamByName(
"enc.vid.fps.min", mVideoEncoder);
int maxFrameRate = mEncoderProfiles->getVideoEncoderParamByName(
@@ -1243,6 +1247,27 @@
}
}
+void StagefrightRecorder::setDefaultVideoEncoderIfNecessary() {
+ if (mVideoEncoder == VIDEO_ENCODER_DEFAULT) {
+ if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
+ // default to VP8 for WEBM recording
+ mVideoEncoder = VIDEO_ENCODER_VP8;
+ } else {
+ // pick the default encoder for CAMCORDER_QUALITY_LOW
+ int videoCodec = mEncoderProfiles->getCamcorderProfileParamByName(
+ "vid.codec", mCameraId, CAMCORDER_QUALITY_LOW);
+
+ if (videoCodec > VIDEO_ENCODER_DEFAULT &&
+ videoCodec < VIDEO_ENCODER_LIST_END) {
+ mVideoEncoder = (video_encoder)videoCodec;
+ } else {
+ // default to H.264 if camcorder profile not available
+ mVideoEncoder = VIDEO_ENCODER_H264;
+ }
+ }
+ }
+}
+
status_t StagefrightRecorder::checkAudioEncoderCapabilities() {
clipAudioBitRate();
clipAudioSampleRate();
@@ -1562,6 +1587,7 @@
}
if (mVideoSource < VIDEO_SOURCE_LIST_END) {
+ setDefaultVideoEncoderIfNecessary();
sp<MediaSource> mediaSource;
err = setupMediaSource(&mediaSource);
@@ -1721,7 +1747,7 @@
// Default parameters
mOutputFormat = OUTPUT_FORMAT_THREE_GPP;
mAudioEncoder = AUDIO_ENCODER_AMR_NB;
- mVideoEncoder = VIDEO_ENCODER_H263;
+ mVideoEncoder = VIDEO_ENCODER_DEFAULT;
mVideoWidth = 176;
mVideoHeight = 144;
mFrameRate = -1;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 9062f30..54c38d3 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -178,6 +178,7 @@
void clipAudioSampleRate();
void clipNumberOfAudioChannels();
void setDefaultProfileIfNecessary();
+ void setDefaultVideoEncoderIfNecessary();
StagefrightRecorder(const StagefrightRecorder &);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f0f4e45..ec1a9a0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,8 @@
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
#include "../../libstagefright/include/WVMExtractor.h"
@@ -49,20 +51,28 @@
mIsWidevine(false),
mUIDValid(uidValid),
mUID(uid),
+ mDrmManagerClient(NULL),
mMetaDataSize(-1ll),
mBitrate(-1ll),
- mPollBufferingGeneration(0) {
+ mPollBufferingGeneration(0),
+ mPendingReadBufferTypes(0) {
resetDataSource();
DataSource::RegisterDefaultSniffers();
}
void NuPlayer::GenericSource::resetDataSource() {
+ mAudioTimeUs = 0;
+ mVideoTimeUs = 0;
mHTTPService.clear();
mUri.clear();
mUriHeaders.clear();
mFd = -1;
mOffset = 0;
mLength = 0;
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mDecryptHandle = NULL;
+ mDrmManagerClient = NULL;
+ mStarted = false;
}
status_t NuPlayer::GenericSource::setDataSource(
@@ -130,6 +140,10 @@
return UNKNOWN_ERROR;
}
+ if (extractor->getDrmFlag()) {
+ checkDrmStatus(mDataSource);
+ }
+
sp<MetaData> fileMeta = extractor->getMetaData();
if (fileMeta != NULL) {
int64_t duration;
@@ -203,6 +217,28 @@
return OK;
}
+void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
+ dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
+ if (mDecryptHandle != NULL) {
+ CHECK(mDrmManagerClient);
+ if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
+ sp<AMessage> msg = dupNotify();
+ msg->setInt32("what", kWhatDrmNoLicense);
+ msg->post();
+ }
+ }
+}
+
+int64_t NuPlayer::GenericSource::getLastReadPosition() {
+ if (mAudioTrack.mSource != NULL) {
+ return mAudioTimeUs;
+ } else if (mVideoTrack.mSource != NULL) {
+ return mVideoTimeUs;
+ } else {
+ return 0;
+ }
+}
+
status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
if (mIsWidevine && !audio) {
@@ -284,7 +320,14 @@
}
if (mVideoTrack.mSource != NULL) {
- notifyVideoSizeChanged(getFormat(false /* audio */));
+ sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+ sp<AMessage> msg = new AMessage;
+ err = convertMetaDataToMessage(meta, &msg);
+ if(err != OK) {
+ notifyPreparedAndCleanup(err);
+ return;
+ }
+ notifyVideoSizeChanged(msg);
}
notifyFlagsChanged(
@@ -388,7 +431,7 @@
mAudioTrack.mPackets =
new AnotherPacketSource(mAudioTrack.mSource->getFormat());
- readBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
}
if (mVideoTrack.mSource != NULL) {
@@ -396,8 +439,37 @@
mVideoTrack.mPackets =
new AnotherPacketSource(mVideoTrack.mSource->getFormat());
- readBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
+
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::stop() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::pause() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::resume() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
+ if (mDecryptHandle != NULL) {
+ mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
+ }
+ mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
+ mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
}
status_t NuPlayer::GenericSource::feedMoreTSData() {
@@ -554,6 +626,37 @@
}
break;
}
+
+ case kWhatGetFormat:
+ {
+ onGetFormatMeta(msg);
+ break;
+ }
+
+ case kWhatGetSelectedTrack:
+ {
+ onGetSelectedTrack(msg);
+ break;
+ }
+
+ case kWhatSelectTrack:
+ {
+ onSelectTrack(msg);
+ break;
+ }
+
+ case kWhatSeek:
+ {
+ onSeek(msg);
+ break;
+ }
+
+ case kWhatReadBuffer:
+ {
+ onReadBuffer(msg);
+ break;
+ }
+
default:
Source::onMessageReceived(msg);
break;
@@ -629,6 +732,34 @@
}
sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
+ sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+ msg->setInt32("audio", audio);
+
+ sp<AMessage> response;
+ void *format;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findPointer("format", &format));
+ return (MetaData *)format;
+ } else {
+ return NULL;
+ }
+}
+
+void NuPlayer::GenericSource::onGetFormatMeta(sp<AMessage> msg) const {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ sp<AMessage> response = new AMessage;
+ sp<MetaData> format = doGetFormatMeta(audio);
+ response->setPointer("format", format.get());
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
if (source == NULL) {
@@ -648,7 +779,7 @@
if (mIsWidevine && !audio) {
// try to read a buffer as we may not have been able to the last time
- readBuffer(MEDIA_TRACK_TYPE_VIDEO, -1ll);
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
status_t finalResult;
@@ -659,18 +790,7 @@
status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
if (!track->mPackets->hasBufferAvailable(&finalResult)) {
- readBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO, -1ll);
- }
-
- if (mSubtitleTrack.mSource == NULL && mTimedTextTrack.mSource == NULL) {
- return result;
- }
-
- if (mSubtitleTrack.mSource != NULL) {
- CHECK(mSubtitleTrack.mPackets != NULL);
- }
- if (mTimedTextTrack.mSource != NULL) {
- CHECK(mTimedTextTrack.mPackets != NULL);
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
}
if (result != OK) {
@@ -764,6 +884,35 @@
}
ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
+ sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+ msg->setInt32("type", type);
+
+ sp<AMessage> response;
+ int32_t index;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("index", &index));
+ return index;
+ } else {
+ return -1;
+ }
+}
+
+void NuPlayer::GenericSource::onGetSelectedTrack(sp<AMessage> msg) const {
+ int32_t tmpType;
+ CHECK(msg->findInt32("type", &tmpType));
+ media_track_type type = (media_track_type)tmpType;
+
+ sp<AMessage> response = new AMessage;
+ ssize_t index = doGetSelectedTrack(type);
+ response->setInt32("index", index);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
const Track *track = NULL;
switch (type) {
case MEDIA_TRACK_TYPE_VIDEO:
@@ -791,6 +940,34 @@
status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select) {
ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+ sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+ msg->setInt32("trackIndex", trackIndex);
+ msg->setInt32("select", trackIndex);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer::GenericSource::onSelectTrack(sp<AMessage> msg) {
+ int32_t trackIndex, select;
+ CHECK(msg->findInt32("trackIndex", &trackIndex));
+ CHECK(msg->findInt32("select", &select));
+
+ sp<AMessage> response = new AMessage;
+ status_t err = doSelectTrack(trackIndex, select);
+ response->setInt32("err", err);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select) {
if (trackIndex >= mSources.size()) {
return BAD_INDEX;
}
@@ -861,6 +1038,32 @@
}
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+ sp<AMessage> msg = new AMessage(kWhatSeek, id());
+ msg->setInt64("seekTimeUs", seekTimeUs);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer::GenericSource::onSeek(sp<AMessage> msg) {
+ int64_t seekTimeUs;
+ CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+
+ sp<AMessage> response = new AMessage;
+ status_t err = doSeek(seekTimeUs);
+ response->setInt32("err", err);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
if (mVideoTrack.mSource != NULL) {
int64_t actualTimeUs;
readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
@@ -872,6 +1075,10 @@
readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
}
+ setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
+ if (!mStarted) {
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ }
return OK;
}
@@ -941,6 +1148,31 @@
return ab;
}
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
+ Mutex::Autolock _l(mReadBufferLock);
+
+ if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
+ mPendingReadBufferTypes |= (1 << trackType);
+ sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+ msg->setInt32("trackType", trackType);
+ msg->post();
+ }
+}
+
+void NuPlayer::GenericSource::onReadBuffer(sp<AMessage> msg) {
+ int32_t tmpType;
+ CHECK(msg->findInt32("trackType", &tmpType));
+ media_track_type trackType = (media_track_type)tmpType;
+ {
+ // only protect the variable change, as readBuffer may
+ // take considerable time. This may result in one extra
+ // read being processed, but that is benign.
+ Mutex::Autolock _l(mReadBufferLock);
+ mPendingReadBufferTypes &= ~(1 << trackType);
+ }
+ readBuffer(trackType);
+}
+
void NuPlayer::GenericSource::readBuffer(
media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
Track *track;
@@ -989,6 +1221,14 @@
options.clearSeekTo();
if (err == OK) {
+ int64_t timeUs;
+ CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ mAudioTimeUs = timeUs;
+ } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ mVideoTimeUs = timeUs;
+ }
+
// formatChange && seeking: track whose source is changed during selection
// formatChange && !seeking: track whose source is not changed during selection
// !formatChange: normal seek
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 663bfae..c70c48e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -27,6 +27,8 @@
namespace android {
+class DecryptHandle;
+class DrmManagerClient;
struct AnotherPacketSource;
struct ARTSPController;
struct DataSource;
@@ -49,6 +51,9 @@
virtual void prepareAsync();
virtual void start();
+ virtual void stop();
+ virtual void pause();
+ virtual void resume();
virtual status_t feedMoreTSData();
@@ -79,6 +84,11 @@
kWhatSendTimedTextData,
kWhatChangeAVSource,
kWhatPollBuffering,
+ kWhatGetFormat,
+ kWhatGetSelectedTrack,
+ kWhatSelectTrack,
+ kWhatSeek,
+ kWhatReadBuffer,
};
Vector<sp<MediaSource> > mSources;
@@ -90,7 +100,9 @@
};
Track mAudioTrack;
+ int64_t mAudioTimeUs;
Track mVideoTrack;
+ int64_t mVideoTimeUs;
Track mSubtitleTrack;
Track mTimedTextTrack;
@@ -111,22 +123,42 @@
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<WVMExtractor> mWVMExtractor;
+ DrmManagerClient *mDrmManagerClient;
+ sp<DecryptHandle> mDecryptHandle;
+ bool mStarted;
String8 mContentType;
AString mSniffedMIME;
off64_t mMetaDataSize;
int64_t mBitrate;
int32_t mPollBufferingGeneration;
+ uint32_t mPendingReadBufferTypes;
+ mutable Mutex mReadBufferLock;
sp<ALooper> mLooper;
void resetDataSource();
status_t initFromDataSource();
+ void checkDrmStatus(const sp<DataSource>& dataSource);
+ int64_t getLastReadPosition();
+ void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
status_t prefillCacheIfNecessary();
void notifyPreparedAndCleanup(status_t err);
+ void onGetFormatMeta(sp<AMessage> msg) const;
+ sp<MetaData> doGetFormatMeta(bool audio) const;
+
+ void onGetSelectedTrack(sp<AMessage> msg) const;
+ ssize_t doGetSelectedTrack(media_track_type type) const;
+
+ void onSelectTrack(sp<AMessage> msg);
+ status_t doSelectTrack(size_t trackIndex, bool select);
+
+ void onSeek(sp<AMessage> msg);
+ status_t doSeek(int64_t seekTimeUs);
+
void onPrepareAsync();
void fetchTextData(
@@ -142,6 +174,8 @@
media_track_type trackType,
int64_t *actualTimeUs = NULL);
+ void postReadBuffer(media_track_type trackType);
+ void onReadBuffer(sp<AMessage> msg);
void readBuffer(
media_track_type trackType,
int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 2b7457b..df3e992 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -145,6 +145,7 @@
NuPlayer::NuPlayer()
: mUIDValid(false),
mSourceFlags(0),
+ mCurrentPositionUs(0),
mVideoIsAVC(false),
mOffloadAudio(false),
mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
@@ -540,6 +541,14 @@
static_cast<NativeWindowWrapper *>(obj.get())));
if (obj != NULL) {
+ if (mStarted && mVideoDecoder != NULL) {
+ // Issue a seek to refresh the video screen only if started otherwise
+ // the extractor may not yet be started and will assert.
+ // If the video decoder is not set (perhaps audio only in this case)
+ // do not perform a seek as it is not needed.
+ mDeferredActions.push_back(new SeekAction(mCurrentPositionUs));
+ }
+
// If there is a new surface texture, instantiate decoders
// again if possible.
mDeferredActions.push_back(
@@ -860,6 +869,7 @@
} else if (what == Renderer::kWhatPosition) {
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
+ mCurrentPositionUs = positionUs;
CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs));
@@ -984,6 +994,8 @@
ALOGV("both audio and video are flushed now.");
+ mPendingAudioAccessUnit.clear();
+
if (mTimeDiscontinuityPending) {
mRenderer->signalTimeDiscontinuity();
mTimeDiscontinuityPending = false;
@@ -1232,7 +1244,8 @@
CHECK(msg->findMessage("reply", &reply));
if ((audio && mFlushingAudio != NONE)
- || (!audio && mFlushingVideo != NONE)) {
+ || (!audio && mFlushingVideo != NONE)
+ || mSource == NULL) {
reply->setInt32("err", INFO_DISCONTINUITY);
reply->post();
return OK;
@@ -1240,14 +1253,47 @@
sp<ABuffer> accessUnit;
+ // Aggregate smaller buffers into a larger buffer.
+ // The goal is to reduce power consumption.
+ // Unfortunately this does not work with the software AAC decoder.
+ // TODO optimize buffer size for power consumption
+ // The offload read buffer size is 32 KB but 24 KB uses less power.
+ const int kAudioBigBufferSizeBytes = 24 * 1024;
+ bool doBufferAggregation = (audio && mOffloadAudio);
+ sp<ABuffer> biggerBuffer;
+ bool needMoreData = false;
+ int numSmallBuffers = 0;
+ bool gotTime = false;
+
bool dropAccessUnit;
do {
- status_t err = mSource->dequeueAccessUnit(audio, &accessUnit);
+ status_t err;
+ // Did we save an accessUnit earlier because of a discontinuity?
+ if (audio && (mPendingAudioAccessUnit != NULL)) {
+ accessUnit = mPendingAudioAccessUnit;
+ mPendingAudioAccessUnit.clear();
+ err = mPendingAudioErr;
+ ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit");
+ } else {
+ err = mSource->dequeueAccessUnit(audio, &accessUnit);
+ }
if (err == -EWOULDBLOCK) {
- return err;
+ if (biggerBuffer == NULL) {
+ return err;
+ } else {
+ break; // Reply with data that we already have.
+ }
} else if (err != OK) {
if (err == INFO_DISCONTINUITY) {
+ if (biggerBuffer != NULL) {
+ // We already have some data so save this for later.
+ mPendingAudioErr = err;
+ mPendingAudioAccessUnit = accessUnit;
+ accessUnit.clear();
+ ALOGD("feedDecoderInputData() save discontinuity for later");
+ break;
+ }
int32_t type;
CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
@@ -1352,7 +1398,52 @@
dropAccessUnit = true;
++mNumFramesDropped;
}
- } while (dropAccessUnit);
+
+ size_t smallSize = accessUnit->size();
+ needMoreData = false;
+ if (doBufferAggregation && (biggerBuffer == NULL)
+ // Don't bother if only room for a few small buffers.
+ && (smallSize < (kAudioBigBufferSizeBytes / 3))) {
+ // Create a larger buffer for combining smaller buffers from the extractor.
+ biggerBuffer = new ABuffer(kAudioBigBufferSizeBytes);
+ biggerBuffer->setRange(0, 0); // start empty
+ }
+
+ if (biggerBuffer != NULL) {
+ int64_t timeUs;
+ bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
+ // Will the smaller buffer fit?
+ size_t bigSize = biggerBuffer->size();
+ size_t roomLeft = biggerBuffer->capacity() - bigSize;
+ // Should we save this small buffer for the next big buffer?
+ // If the first small buffer did not have a timestamp then save
+ // any buffer that does have a timestamp until the next big buffer.
+ if ((smallSize > roomLeft)
+ || (!gotTime && (numSmallBuffers > 0) && smallTimestampValid)) {
+ mPendingAudioErr = err;
+ mPendingAudioAccessUnit = accessUnit;
+ accessUnit.clear();
+ } else {
+ // Append small buffer to the bigger buffer.
+ memcpy(biggerBuffer->base() + bigSize, accessUnit->data(), smallSize);
+ bigSize += smallSize;
+ biggerBuffer->setRange(0, bigSize);
+
+ // Keep looping until we run out of room in the biggerBuffer.
+ needMoreData = true;
+
+ // Grab time from first small buffer if available.
+ if ((numSmallBuffers == 0) && smallTimestampValid) {
+ biggerBuffer->meta()->setInt64("timeUs", timeUs);
+ gotTime = true;
+ }
+
+ ALOGV("feedDecoderInputData() #%d, smallSize = %zu, bigSize = %zu, capacity = %zu",
+ numSmallBuffers, smallSize, bigSize, biggerBuffer->capacity());
+ numSmallBuffers++;
+ }
+ }
+ } while (dropAccessUnit || needMoreData);
// ALOGV("returned a valid buffer of %s data", audio ? "audio" : "video");
@@ -1368,7 +1459,13 @@
mCCDecoder->decode(accessUnit);
}
- reply->setBuffer("buffer", accessUnit);
+ if (biggerBuffer != NULL) {
+ ALOGV("feedDecoderInputData() reply with aggregated buffer, %d", numSmallBuffers);
+ reply->setBuffer("buffer", biggerBuffer);
+ } else {
+ reply->setBuffer("buffer", accessUnit);
+ }
+
reply->post();
return OK;
@@ -1536,6 +1633,10 @@
ALOGE_IF(mFlushingVideo != NONE,
"video flushDecoder() is called in state %d", mFlushingVideo);
mFlushingVideo = newStatus;
+
+ if (mCCDecoder != NULL) {
+ mCCDecoder->flush();
+ }
}
}
@@ -1661,6 +1762,14 @@
seekTimeUs,
seekTimeUs / 1E6);
+ if (mSource == NULL) {
+ // This happens when reset occurs right before the loop mode
+ // asynchronously seeks to the start of the stream.
+ LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL,
+ "mSource is NULL and decoders not NULL audio(%p) video(%p)",
+ mAudioDecoder.get(), mVideoDecoder.get());
+ return;
+ }
mSource->seekTo(seekTimeUs);
++mTimedTextGeneration;
@@ -1723,6 +1832,9 @@
++mScanSourcesGeneration;
mScanSourcesPending = false;
+ ++mAudioDecoderGeneration;
+ ++mVideoDecoderGeneration;
+
if (mRendererLooper != NULL) {
if (mRenderer != NULL) {
mRendererLooper->unregisterHandler(mRenderer->id());
@@ -1915,6 +2027,12 @@
break;
}
+ case Source::kWhatDrmNoLicense:
+ {
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
+ break;
+ }
+
default:
TRESPASS();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 511d752..89ae11c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -121,6 +121,7 @@
sp<Source> mSource;
uint32_t mSourceFlags;
sp<NativeWindowWrapper> mNativeWindow;
+ int64_t mCurrentPositionUs;
sp<MediaPlayerBase::AudioSink> mAudioSink;
sp<Decoder> mVideoDecoder;
bool mVideoIsAVC;
@@ -157,6 +158,9 @@
// notion of time has changed.
bool mTimeDiscontinuityPending;
+ sp<ABuffer> mPendingAudioAccessUnit;
+ status_t mPendingAudioErr;
+
FlushStatus mFlushingAudio;
FlushStatus mFlushingVideo;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index d1aac50..163a0b5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -220,6 +220,8 @@
void NuPlayer::Decoder::handleError(int32_t err)
{
+ mCodec->release();
+
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatError);
notify->setInt32("err", err);
@@ -593,7 +595,18 @@
{
if (!isStaleReply(msg)) {
onInputBufferFilled(msg);
+ } else {
+ /* release any MediaBuffer passed in the stale buffer */
+ sp<ABuffer> buffer;
+ MediaBuffer *mediaBuffer = NULL;
+ if (msg->findBuffer("buffer", &buffer) &&
+ buffer->meta()->findPointer(
+ "mediaBuffer", (void **)&mediaBuffer) &&
+ mediaBuffer != NULL) {
+ mediaBuffer->release();
+ }
}
+
break;
}
@@ -714,72 +727,28 @@
return seamless;
}
-struct NuPlayer::CCDecoder::CCData {
+struct CCData {
CCData(uint8_t type, uint8_t data1, uint8_t data2)
: mType(type), mData1(data1), mData2(data2) {
}
+ bool getChannel(size_t *channel) const {
+ if (mData1 >= 0x10 && mData1 <= 0x1f) {
+ *channel = (mData1 >= 0x18 ? 1 : 0) + (mType ? 2 : 0);
+ return true;
+ }
+ return false;
+ }
uint8_t mType;
uint8_t mData1;
uint8_t mData2;
};
-NuPlayer::CCDecoder::CCDecoder(const sp<AMessage> ¬ify)
- : mNotify(notify),
- mTrackCount(0),
- mSelectedTrack(-1) {
-}
-
-size_t NuPlayer::CCDecoder::getTrackCount() const {
- return mTrackCount;
-}
-
-sp<AMessage> NuPlayer::CCDecoder::getTrackInfo(size_t index) const {
- CHECK(index == 0);
-
- sp<AMessage> format = new AMessage();
-
- format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
- format->setString("language", "und");
- format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
- format->setInt32("auto", 1);
- format->setInt32("default", 1);
- format->setInt32("forced", 0);
-
- return format;
-}
-
-status_t NuPlayer::CCDecoder::selectTrack(size_t index, bool select) {
- CHECK(index < mTrackCount);
-
- if (select) {
- if (mSelectedTrack == (ssize_t)index) {
- ALOGE("track %zu already selected", index);
- return BAD_VALUE;
- }
- ALOGV("selected track %zu", index);
- mSelectedTrack = index;
- } else {
- if (mSelectedTrack != (ssize_t)index) {
- ALOGE("track %zu is not selected", index);
- return BAD_VALUE;
- }
- ALOGV("unselected track %zu", index);
- mSelectedTrack = -1;
- }
-
- return OK;
-}
-
-bool NuPlayer::CCDecoder::isSelected() const {
- return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)mTrackCount;
-}
-
-bool NuPlayer::CCDecoder::isNullPad(CCData *cc) const {
+static bool isNullPad(CCData *cc) {
return cc->mData1 < 0x10 && cc->mData2 < 0x10;
}
-void NuPlayer::CCDecoder::dumpBytePair(const sp<ABuffer> &ccBuf) const {
+static void dumpBytePair(const sp<ABuffer> &ccBuf) {
size_t offset = 0;
AString out;
@@ -841,6 +810,78 @@
ALOGI("%s", out.c_str());
}
+NuPlayer::CCDecoder::CCDecoder(const sp<AMessage> ¬ify)
+ : mNotify(notify),
+ mCurrentChannel(0),
+ mSelectedTrack(-1) {
+ for (size_t i = 0; i < sizeof(mTrackIndices)/sizeof(mTrackIndices[0]); ++i) {
+ mTrackIndices[i] = -1;
+ }
+}
+
+size_t NuPlayer::CCDecoder::getTrackCount() const {
+ return mFoundChannels.size();
+}
+
+sp<AMessage> NuPlayer::CCDecoder::getTrackInfo(size_t index) const {
+ if (!isTrackValid(index)) {
+ return NULL;
+ }
+
+ sp<AMessage> format = new AMessage();
+
+ format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
+ format->setString("language", "und");
+ format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
+ //CC1, field 0 channel 0
+ bool isDefaultAuto = (mFoundChannels[index] == 0);
+ format->setInt32("auto", isDefaultAuto);
+ format->setInt32("default", isDefaultAuto);
+ format->setInt32("forced", 0);
+
+ return format;
+}
+
+status_t NuPlayer::CCDecoder::selectTrack(size_t index, bool select) {
+ if (!isTrackValid(index)) {
+ return BAD_VALUE;
+ }
+
+ if (select) {
+ if (mSelectedTrack == (ssize_t)index) {
+ ALOGE("track %zu already selected", index);
+ return BAD_VALUE;
+ }
+ ALOGV("selected track %zu", index);
+ mSelectedTrack = index;
+ } else {
+ if (mSelectedTrack != (ssize_t)index) {
+ ALOGE("track %zu is not selected", index);
+ return BAD_VALUE;
+ }
+ ALOGV("unselected track %zu", index);
+ mSelectedTrack = -1;
+ }
+
+ return OK;
+}
+
+bool NuPlayer::CCDecoder::isSelected() const {
+ return mSelectedTrack >= 0 && mSelectedTrack < (int32_t) getTrackCount();
+}
+
+bool NuPlayer::CCDecoder::isTrackValid(size_t index) const {
+ return index < getTrackCount();
+}
+
+int32_t NuPlayer::CCDecoder::getTrackIndex(size_t channel) const {
+ if (channel < sizeof(mTrackIndices)/sizeof(mTrackIndices[0])) {
+ return mTrackIndices[channel];
+ }
+ return -1;
+}
+
+// returns true if a new CC track is found
bool NuPlayer::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
int64_t timeUs;
CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
@@ -850,7 +891,7 @@
return false;
}
- bool hasCC = false;
+ bool trackAdded = false;
NALBitReader br(sei->data() + 1, sei->size() - 1);
// sei_message()
@@ -885,8 +926,6 @@
&& itu_t_t35_provider_code == 0x0031
&& user_identifier == 'GA94'
&& user_data_type_code == 0x3) {
- hasCC = true;
-
// MPEG_cc_data()
// ATSC A/53 Part 4: 6.2.3.1
br.skipBits(1); //process_em_data_flag
@@ -916,6 +955,12 @@
&& (cc_type == 0 || cc_type == 1)) {
CCData cc(cc_type, cc_data_1, cc_data_2);
if (!isNullPad(&cc)) {
+ size_t channel;
+ if (cc.getChannel(&channel) && getTrackIndex(channel) < 0) {
+ mTrackIndices[channel] = mFoundChannels.size();
+ mFoundChannels.push_back(channel);
+ trackAdded = true;
+ }
memcpy(ccBuf->data() + ccBuf->size(),
(void *)&cc, sizeof(cc));
ccBuf->setRange(0, ccBuf->size() + sizeof(CCData));
@@ -938,13 +983,33 @@
br.skipBits(payload_size * 8);
}
- return hasCC;
+ return trackAdded;
+}
+
+sp<ABuffer> NuPlayer::CCDecoder::filterCCBuf(
+ const sp<ABuffer> &ccBuf, size_t index) {
+ sp<ABuffer> filteredCCBuf = new ABuffer(ccBuf->size());
+ filteredCCBuf->setRange(0, 0);
+
+ size_t cc_count = ccBuf->size() / sizeof(CCData);
+ const CCData* cc_data = (const CCData*)ccBuf->data();
+ for (size_t i = 0; i < cc_count; ++i) {
+ size_t channel;
+ if (cc_data[i].getChannel(&channel)) {
+ mCurrentChannel = channel;
+ }
+ if (mCurrentChannel == mFoundChannels[index]) {
+ memcpy(filteredCCBuf->data() + filteredCCBuf->size(),
+ (void *)&cc_data[i], sizeof(CCData));
+ filteredCCBuf->setRange(0, filteredCCBuf->size() + sizeof(CCData));
+ }
+ }
+
+ return filteredCCBuf;
}
void NuPlayer::CCDecoder::decode(const sp<ABuffer> &accessUnit) {
- if (extractFromSEI(accessUnit) && mTrackCount == 0) {
- mTrackCount++;
-
+ if (extractFromSEI(accessUnit)) {
ALOGI("Found CEA-608 track");
sp<AMessage> msg = mNotify->dup();
msg->setInt32("what", kWhatTrackAdded);
@@ -954,13 +1019,18 @@
}
void NuPlayer::CCDecoder::display(int64_t timeUs) {
+ if (!isTrackValid(mSelectedTrack)) {
+ ALOGE("Could not find current track(index=%d)", mSelectedTrack);
+ return;
+ }
+
ssize_t index = mCCMap.indexOfKey(timeUs);
if (index < 0) {
ALOGV("cc for timestamp %" PRId64 " not found", timeUs);
return;
}
- sp<ABuffer> &ccBuf = mCCMap.editValueAt(index);
+ sp<ABuffer> ccBuf = filterCCBuf(mCCMap.valueAt(index), mSelectedTrack);
if (ccBuf->size() > 0) {
#if 0
@@ -981,5 +1051,9 @@
mCCMap.removeItemsAt(0, index + 1);
}
+void NuPlayer::CCDecoder::flush() {
+ mCCMap.clear();
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 67bddb8..cc1bdff 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -126,18 +126,20 @@
bool isSelected() const;
void decode(const sp<ABuffer> &accessUnit);
void display(int64_t timeUs);
+ void flush();
private:
- struct CCData;
-
sp<AMessage> mNotify;
KeyedVector<int64_t, sp<ABuffer> > mCCMap;
- size_t mTrackCount;
+ size_t mCurrentChannel;
int32_t mSelectedTrack;
+ int32_t mTrackIndices[4];
+ Vector<size_t> mFoundChannels;
- bool isNullPad(CCData *cc) const;
- void dumpBytePair(const sp<ABuffer> &ccBuf) const;
+ bool isTrackValid(size_t index) const;
+ int32_t getTrackIndex(size_t channel) const;
bool extractFromSEI(const sp<ABuffer> &accessUnit);
+ sp<ABuffer> filterCCBuf(const sp<ABuffer> &ccBuf, size_t index);
DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
};
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index c9be0dd..ab7906a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -115,9 +115,12 @@
notify->post();
mPendingBuffers++;
- sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
- message->setInt32("generation", mBufferGeneration);
- message->post();
+ // pending buffers will already result in requestABuffer
+ if (mPendingBuffers < kMaxPendingBuffers) {
+ sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
+ message->setInt32("generation", mBufferGeneration);
+ message->post();
+ }
return;
}
@@ -155,9 +158,7 @@
void NuPlayer::DecoderPassThrough::onBufferConsumed(int32_t size) {
mPendingBuffers--;
mCachedBytes -= size;
- sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
- message->setInt32("generation", mBufferGeneration);
- message->post();
+ requestABuffer();
}
void NuPlayer::DecoderPassThrough::onFlush() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index c4bbcdf..7dd54c1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -560,8 +560,10 @@
void NuPlayerDriver::notifyPosition(int64_t positionUs) {
Mutex::Autolock autoLock(mLock);
- mPositionUs = positionUs;
- mNotifyTimeRealUs = ALooper::GetNowUs();
+ if (isPlaying()) {
+ mPositionUs = positionUs;
+ mNotifyTimeRealUs = ALooper::GetNowUs();
+ }
}
void NuPlayerDriver::notifySeekComplete() {
@@ -624,11 +626,14 @@
switch (msg) {
case MEDIA_PLAYBACK_COMPLETE:
{
- if (mLooping) {
- mLock.unlock();
- mPlayer->seekToAsync(0);
- mLock.lock();
- break;
+ if (mState != STATE_RESET_IN_PROGRESS) {
+ if (mLooping) {
+ mPlayer->seekToAsync(0);
+ break;
+ }
+
+ mPlayer->pause();
+ mState = STATE_PAUSED;
}
// fall through
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index bf6b3df..067784b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -58,7 +58,8 @@
mVideoRenderingStartGeneration(0),
mAudioRenderingStartGeneration(0),
mLastPositionUpdateUs(-1ll),
- mVideoLateByUs(0ll) {
+ mVideoLateByUs(0ll),
+ mVideoSampleReceived(false) {
}
NuPlayer::Renderer::~Renderer() {
@@ -315,7 +316,7 @@
size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
Mutex::Autolock autoLock(mLock);
- if (!offloadingAudio()) {
+ if (!offloadingAudio() || mPaused) {
return 0;
}
@@ -491,7 +492,9 @@
}
void NuPlayer::Renderer::postDrainVideoQueue() {
- if (mDrainVideoQueuePending || mSyncQueues || mPaused) {
+ if (mDrainVideoQueuePending
+ || mSyncQueues
+ || (mPaused && mVideoSampleReceived)) {
return;
}
@@ -570,16 +573,22 @@
realTimeUs = mediaTimeUs - mAnchorTimeMediaUs + mAnchorTimeRealUs;
}
- mVideoLateByUs = ALooper::GetNowUs() - realTimeUs;
- bool tooLate = (mVideoLateByUs > 40000);
+ bool tooLate = false;
- if (tooLate) {
- ALOGV("video late by %lld us (%.2f secs)",
- mVideoLateByUs, mVideoLateByUs / 1E6);
+ if (!mPaused) {
+ mVideoLateByUs = ALooper::GetNowUs() - realTimeUs;
+ tooLate = (mVideoLateByUs > 40000);
+
+ if (tooLate) {
+ ALOGV("video late by %lld us (%.2f secs)",
+ mVideoLateByUs, mVideoLateByUs / 1E6);
+ } else {
+ ALOGV("rendering video at media time %.2f secs",
+ (mFlags & FLAG_REAL_TIME ? realTimeUs :
+ (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
+ }
} else {
- ALOGV("rendering video at media time %.2f secs",
- (mFlags & FLAG_REAL_TIME ? realTimeUs :
- (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
+ mVideoLateByUs = 0ll;
}
entry->mNotifyConsumed->setInt32("render", !tooLate);
@@ -587,12 +596,15 @@
mVideoQueue.erase(mVideoQueue.begin());
entry = NULL;
- if (!mVideoRenderingStarted) {
- mVideoRenderingStarted = true;
- notifyVideoRenderingStart();
- }
+ mVideoSampleReceived = true;
- notifyIfMediaRenderingStarted();
+ if (!mPaused) {
+ if (!mVideoRenderingStarted) {
+ mVideoRenderingStarted = true;
+ notifyVideoRenderingStart();
+ }
+ notifyIfMediaRenderingStarted();
+ }
notifyPosition();
}
@@ -791,6 +803,7 @@
prepareForMediaRenderingStart();
}
+ mVideoSampleReceived = false;
notifyFlushComplete(audio);
}
@@ -880,13 +893,16 @@
}
void NuPlayer::Renderer::onPause() {
- CHECK(!mPaused);
-
+ if (mPaused) {
+ ALOGW("Renderer::onPause() called while already paused!");
+ return;
+ }
{
Mutex::Autolock autoLock(mLock);
++mAudioQueueGeneration;
++mVideoQueueGeneration;
prepareForMediaRenderingStart();
+ mPaused = true;
}
mDrainAudioQueuePending = false;
@@ -898,8 +914,6 @@
ALOGV("now paused audio queue has %d entries, video has %d entries",
mAudioQueue.size(), mVideoQueue.size());
-
- mPaused = true;
}
void NuPlayer::Renderer::onResume() {
@@ -911,9 +925,9 @@
mAudioSink->start();
}
+ Mutex::Autolock autoLock(mLock);
mPaused = false;
- Mutex::Autolock autoLock(mLock);
if (!mAudioQueue.empty()) {
postDrainAudioQueue_l();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 8da6458..5c7d2d7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -119,6 +119,7 @@
bool mSyncQueues;
bool mPaused;
+ bool mVideoSampleReceived;
bool mVideoRenderingStarted;
int32_t mVideoRenderingStartGeneration;
int32_t mAudioRenderingStartGeneration;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 45657c2..7ccf3b1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -51,6 +51,7 @@
kWhatSubtitleData,
kWhatTimedTextData,
kWhatQueueDecoderShutdown,
+ kWhatDrmNoLicense,
};
// The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e4e463a..9b03b71 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1245,13 +1245,13 @@
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
- int64_t audioHwSync = 0;
- if (!msg->findInt64("audio-hw-sync", &audioHwSync)) {
+ int32_t audioHwSync = 0;
+ if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
- ALOGE("configureTunneledVideoPlayback(%" PRId64 ",%p) failed!",
+ ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
@@ -1898,7 +1898,7 @@
}
status_t ACodec::configureTunneledVideoPlayback(
- int64_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+ int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
native_handle_t* sidebandHandle;
status_t err = mOMX->configureVideoTunnelMode(
@@ -2313,7 +2313,6 @@
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval;
- CHECK(ret > 1);
return ret;
}
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index be9af5e..193f8a7 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -62,6 +62,7 @@
avc_utils.cpp \
LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/include/media/ \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
@@ -70,6 +71,8 @@
$(TOP)/external/openssl/include \
$(TOP)/external/libvpx/libwebm \
$(TOP)/system/netd/include \
+ $(TOP)/external/icu/icu4c/source/common \
+ $(TOP)/external/icu/icu4c/source/i18n \
LOCAL_SHARED_LIBRARIES := \
libbinder \
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 0064293..1729f93 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -2810,7 +2810,6 @@
{
if (objectType == AOT_SBR || objectType == AOT_PS) {
- const int32_t extensionSamplingFrequency = br.getBits(4);
objectType = br.getBits(5);
if (objectType == AOT_ESCAPE) {
@@ -2828,9 +2827,30 @@
const int32_t coreCoderDelay = br.getBits(14);
}
- const int32_t extensionFlag = br.getBits(1);
+ int32_t extensionFlag = -1;
+ if (br.numBitsLeft() > 0) {
+ extensionFlag = br.getBits(1);
+ } else {
+ switch (objectType) {
+ // 14496-3 4.5.1.1 extensionFlag
+ case AOT_AAC_LC:
+ extensionFlag = 0;
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_SCAL:
+ case AOT_ER_BSAC:
+ case AOT_ER_AAC_LD:
+ extensionFlag = 1;
+ break;
+ default:
+ TRESPASS();
+ break;
+ }
+ ALOGW("csd missing extension flag; assuming %d for object type %u.",
+ extensionFlag, objectType);
+ }
- if (numChannels == 0 ) {
+ if (numChannels == 0) {
int32_t channelsEffectiveNum = 0;
int32_t channelsNum = 0;
const int32_t ElementInstanceTag = br.getBits(4);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 76f730f..0bfc6e4 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -270,7 +270,20 @@
}
sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
+ status_t err = PostAndAwaitResponse(msg, &response);
+
+ if (err != OK && err != INVALID_OPERATION) {
+ // MediaCodec now set state to UNINITIALIZED upon any fatal error.
+ // To maintain backward-compatibility, do a reset() to put codec
+ // back into INITIALIZED state.
+ // But don't reset if the err is INVALID_OPERATION, which means
+ // the configure failure is due to wrong state.
+
+ ALOGE("configure failed with err 0x%08x, resetting...", err);
+ reset();
+ }
+
+ return err;
}
status_t MediaCodec::createInputSurface(
@@ -733,13 +746,15 @@
case CONFIGURING:
{
- setState(INITIALIZED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : INITIALIZED);
break;
}
case STARTING:
{
- setState(CONFIGURED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : CONFIGURED);
break;
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 78758da..a8806c8 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -994,7 +994,6 @@
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval - 1;
- CHECK(ret > 1);
return ret;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 8cc41e7..101fc8a 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
#include <media/stagefright/MediaDefs.h>
+#include <CharacterEncodingDetector.h>
namespace android {
@@ -450,33 +451,59 @@
struct Map {
int from;
int to;
+ const char *name;
};
static const Map kMap[] = {
- { kKeyMIMEType, METADATA_KEY_MIMETYPE },
- { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER },
- { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER },
- { kKeyAlbum, METADATA_KEY_ALBUM },
- { kKeyArtist, METADATA_KEY_ARTIST },
- { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST },
- { kKeyAuthor, METADATA_KEY_AUTHOR },
- { kKeyComposer, METADATA_KEY_COMPOSER },
- { kKeyDate, METADATA_KEY_DATE },
- { kKeyGenre, METADATA_KEY_GENRE },
- { kKeyTitle, METADATA_KEY_TITLE },
- { kKeyYear, METADATA_KEY_YEAR },
- { kKeyWriter, METADATA_KEY_WRITER },
- { kKeyCompilation, METADATA_KEY_COMPILATION },
- { kKeyLocation, METADATA_KEY_LOCATION },
+ { kKeyMIMEType, METADATA_KEY_MIMETYPE, NULL },
+ { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER, "tracknumber" },
+ { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER, "discnumber" },
+ { kKeyAlbum, METADATA_KEY_ALBUM, "album" },
+ { kKeyArtist, METADATA_KEY_ARTIST, "artist" },
+ { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST, "albumartist" },
+ { kKeyAuthor, METADATA_KEY_AUTHOR, NULL },
+ { kKeyComposer, METADATA_KEY_COMPOSER, "composer" },
+ { kKeyDate, METADATA_KEY_DATE, NULL },
+ { kKeyGenre, METADATA_KEY_GENRE, "genre" },
+ { kKeyTitle, METADATA_KEY_TITLE, "title" },
+ { kKeyYear, METADATA_KEY_YEAR, "year" },
+ { kKeyWriter, METADATA_KEY_WRITER, "writer" },
+ { kKeyCompilation, METADATA_KEY_COMPILATION, "compilation" },
+ { kKeyLocation, METADATA_KEY_LOCATION, NULL },
};
+
static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+ CharacterEncodingDetector *detector = new CharacterEncodingDetector();
+
for (size_t i = 0; i < kNumMapEntries; ++i) {
const char *value;
if (meta->findCString(kMap[i].from, &value)) {
- mMetaData.add(kMap[i].to, String8(value));
+ if (kMap[i].name) {
+ // add to charset detector
+ detector->addTag(kMap[i].name, value);
+ } else {
+ // directly add to output list
+ mMetaData.add(kMap[i].to, String8(value));
+ }
}
}
+ detector->detectAndConvert();
+ int size = detector->size();
+ if (size) {
+ for (int i = 0; i < size; i++) {
+ const char *name;
+ const char *value;
+ detector->getTag(i, &name, &value);
+ for (size_t j = 0; j < kNumMapEntries; ++j) {
+ if (kMap[j].name && !strcmp(kMap[j].name, name)) {
+ mMetaData.add(kMap[j].to, String8(value));
+ }
+ }
+ }
+ }
+ delete detector;
+
const void *data;
uint32_t type;
size_t dataSize;
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index da50c56..1fdb244 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -338,7 +338,7 @@
status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
String16("TimedEventQueue"),
- String16("media"));
+ String16("media")); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
if (status == NO_ERROR) {
mWakeLockToken = binder;
@@ -363,7 +363,7 @@
CHECK(mWakeLockToken != 0);
if (mPowerManager != 0) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
}
mWakeLockToken.clear();
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
index 23d5ff1..cfa9ca5 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
@@ -67,10 +67,6 @@
kNumBuffers = 2,
};
- enum {
- kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
- };
-
// OMX input buffer's timestamp and flags
typedef struct {
int64_t mTimeUs;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index cc4ea8f..c59a1b9 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -56,10 +56,6 @@
kNumBuffers = 2,
};
- enum {
- kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
- };
-
// OMX input buffer's timestamp and flags
typedef struct {
int64_t mTimeUs;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 423a057..828577a 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -23,9 +23,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
-#include "vpx/vpx_decoder.h"
-#include "vpx/vpx_codec.h"
-#include "vpx/vp8dx.h"
namespace android {
@@ -41,7 +38,8 @@
NULL /* profileLevels */, 0 /* numProfileLevels */,
320 /* width */, 240 /* height */, callbacks, appData, component),
mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
- mCtx(NULL) {
+ mCtx(NULL),
+ mImg(NULL) {
initPorts(kNumBuffers, 768 * 1024 /* inputBufferSize */,
kNumBuffers,
codingType == OMX_VIDEO_CodingVP8 ? MEDIA_MIMETYPE_VIDEO_VP8 : MEDIA_MIMETYPE_VIDEO_VP9);
@@ -118,35 +116,30 @@
}
}
- if (vpx_codec_decode(
- (vpx_codec_ctx_t *)mCtx,
- inHeader->pBuffer + inHeader->nOffset,
- inHeader->nFilledLen,
- NULL,
- 0)) {
- ALOGE("on2 decoder failed to decode frame.");
+ if (mImg == NULL) {
+ if (vpx_codec_decode(
+ (vpx_codec_ctx_t *)mCtx,
+ inHeader->pBuffer + inHeader->nOffset,
+ inHeader->nFilledLen,
+ NULL,
+ 0)) {
+ ALOGE("on2 decoder failed to decode frame.");
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ vpx_codec_iter_t iter = NULL;
+ mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
}
- vpx_codec_iter_t iter = NULL;
- vpx_image_t *img = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
+ if (mImg != NULL) {
+ CHECK_EQ(mImg->fmt, IMG_FMT_I420);
- if (img != NULL) {
- CHECK_EQ(img->fmt, IMG_FMT_I420);
-
- uint32_t width = img->d_w;
- uint32_t height = img->d_h;
-
- if (width != mWidth || height != mHeight) {
- mWidth = width;
- mHeight = height;
-
- updatePortDefinitions();
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
+ uint32_t width = mImg->d_w;
+ uint32_t height = mImg->d_h;
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, width, height);
+ if (portWillReset) {
return;
}
@@ -155,31 +148,16 @@
outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
outHeader->nTimeStamp = inHeader->nTimeStamp;
- const uint8_t *srcLine = (const uint8_t *)img->planes[PLANE_Y];
uint8_t *dst = outHeader->pBuffer;
- for (size_t i = 0; i < img->d_h; ++i) {
- memcpy(dst, srcLine, img->d_w);
+ const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
+ size_t srcYStride = mImg->stride[PLANE_Y];
+ size_t srcUStride = mImg->stride[PLANE_U];
+ size_t srcVStride = mImg->stride[PLANE_V];
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
- srcLine += img->stride[PLANE_Y];
- dst += img->d_w;
- }
-
- srcLine = (const uint8_t *)img->planes[PLANE_U];
- for (size_t i = 0; i < img->d_h / 2; ++i) {
- memcpy(dst, srcLine, img->d_w / 2);
-
- srcLine += img->stride[PLANE_U];
- dst += img->d_w / 2;
- }
-
- srcLine = (const uint8_t *)img->planes[PLANE_V];
- for (size_t i = 0; i < img->d_h / 2; ++i) {
- memcpy(dst, srcLine, img->d_w / 2);
-
- srcLine += img->stride[PLANE_V];
- dst += img->d_w / 2;
- }
-
+ mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index cd5eb28..8f68693 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -20,6 +20,10 @@
#include "SoftVideoDecoderOMXComponent.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8dx.h"
+
namespace android {
struct SoftVPX : public SoftVideoDecoderOMXComponent {
@@ -47,6 +51,8 @@
void *mCtx;
+ vpx_image_t *mImg;
+
status_t initDecoder();
DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index c5a83d1..5b4c954 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -91,10 +91,6 @@
const char *name, OMX_INDEXTYPE *index);
private:
- enum {
- kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
- };
-
enum TemporalReferences {
// For 1 layer case: reference all (last, golden, and alt ref), but only
// update last.
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index a7bde97..cf3c3e3 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -58,7 +58,6 @@
320 /* width */, 240 /* height */, callbacks, appData, component),
mHandle(NULL),
mInputBufferCount(0),
- mPictureSize(mWidth * mHeight * 3 / 2),
mFirstPicture(NULL),
mFirstPictureId(-1),
mPicId(0),
@@ -118,7 +117,7 @@
}
H264SwDecRet ret = H264SWDEC_PIC_RDY;
- bool portSettingsChanged = false;
+ bool portWillReset = false;
while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
&& outQueue.size() == kNumOutputBuffers) {
@@ -161,17 +160,13 @@
H264SwDecInfo decoderInfo;
CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK);
- if (handlePortSettingChangeEvent(&decoderInfo)) {
- portSettingsChanged = true;
- }
-
- if (decoderInfo.croppingFlag &&
- handleCropRectEvent(&decoderInfo.cropParams)) {
- portSettingsChanged = true;
- }
+ bool cropChanged = handleCropChange(decoderInfo);
+ handlePortSettingsChange(
+ &portWillReset, decoderInfo.picWidth, decoderInfo.picHeight,
+ cropChanged);
}
} else {
- if (portSettingsChanged) {
+ if (portWillReset) {
if (H264SwDecNextPicture(mHandle, &decodedPicture, 0)
== H264SWDEC_PIC_RDY) {
@@ -199,8 +194,7 @@
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
- if (portSettingsChanged) {
- portSettingsChanged = false;
+ if (portWillReset) {
return;
}
@@ -215,44 +209,33 @@
}
}
-bool SoftAVC::handlePortSettingChangeEvent(const H264SwDecInfo *info) {
- if (mWidth != info->picWidth || mHeight != info->picHeight) {
- mWidth = info->picWidth;
- mHeight = info->picHeight;
- mPictureSize = mWidth * mHeight * 3 / 2;
- updatePortDefinitions();
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
- return true;
+bool SoftAVC::handleCropChange(const H264SwDecInfo& decInfo) {
+ if (!decInfo.croppingFlag) {
+ return false;
}
- return false;
-}
-
-bool SoftAVC::handleCropRectEvent(const CropParams *crop) {
- if (mCropLeft != crop->cropLeftOffset ||
- mCropTop != crop->cropTopOffset ||
- mCropWidth != crop->cropOutWidth ||
- mCropHeight != crop->cropOutHeight) {
- mCropLeft = crop->cropLeftOffset;
- mCropTop = crop->cropTopOffset;
- mCropWidth = crop->cropOutWidth;
- mCropHeight = crop->cropOutHeight;
-
- notify(OMX_EventPortSettingsChanged, 1,
- OMX_IndexConfigCommonOutputCrop, NULL);
-
- return true;
+ const CropParams& crop = decInfo.cropParams;
+ if (mCropLeft == crop.cropLeftOffset &&
+ mCropTop == crop.cropTopOffset &&
+ mCropWidth == crop.cropOutWidth &&
+ mCropHeight == crop.cropOutHeight) {
+ return false;
}
- return false;
+
+ mCropLeft = crop.cropLeftOffset;
+ mCropTop = crop.cropTopOffset;
+ mCropWidth = crop.cropOutWidth;
+ mCropHeight = crop.cropOutHeight;
+ return true;
}
void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) {
CHECK(mFirstPicture == NULL);
mFirstPictureId = picId;
- mFirstPicture = new uint8_t[mPictureSize];
- memcpy(mFirstPicture, data, mPictureSize);
+ uint32_t pictureSize = mWidth * mHeight * 3 / 2;
+ mFirstPicture = new uint8_t[pictureSize];
+ memcpy(mFirstPicture, data, pictureSize);
}
void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) {
@@ -263,9 +246,17 @@
OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId);
outHeader->nTimeStamp = header->nTimeStamp;
outHeader->nFlags = header->nFlags;
- outHeader->nFilledLen = mPictureSize;
- memcpy(outHeader->pBuffer + outHeader->nOffset,
- data, mPictureSize);
+ outHeader->nFilledLen = mWidth * mHeight * 3 / 2;
+
+ uint8_t *dst = outHeader->pBuffer + outHeader->nOffset;
+ const uint8_t *srcY = data;
+ const uint8_t *srcU = srcY + mWidth * mHeight;
+ const uint8_t *srcV = srcU + mWidth * mHeight / 4;
+ size_t srcYStride = mWidth;
+ size_t srcUStride = mWidth / 2;
+ size_t srcVStride = srcUStride;
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
mPicToHeaderMap.removeItem(picId);
delete header;
outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index ee69926..253a406 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -55,8 +55,6 @@
size_t mInputBufferCount;
- uint32_t mPictureSize;
-
uint8_t *mFirstPicture;
int32_t mFirstPictureId;
@@ -75,8 +73,7 @@
void drainAllOutputBuffers(bool eos);
void drainOneOutputBuffer(int32_t picId, uint8_t *data);
void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
- bool handleCropRectEvent(const CropParams* crop);
- bool handlePortSettingChangeEvent(const H264SwDecInfo *info);
+ bool handleCropChange(const H264SwDecInfo& decInfo);
DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
};
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 9b930bc..c97be28 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,18 +16,89 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
- <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" />
- <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
- <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-983040" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-139264" />
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
</Decoders>
<Encoders>
- <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
- <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+ <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level2 -->
+ <Limit name="size" min="2x2" max="896x896" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-11880" />
+ <Limit name="bitrate" range="1-2000000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index ebf9d8d..88b1c92 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -68,14 +68,14 @@
ALooper::ALooper()
: mRunningLocally(false) {
+ // clean up stale AHandlers. Doing it here instead of in the destructor avoids
+ // the side effect of objects being deleted from the unregister function recursively.
+ gLooperRoster.unregisterStaleHandlers();
}
ALooper::~ALooper() {
stop();
-
- // Since this looper is "dead" (or as good as dead by now),
- // have ALooperRoster unregister any handlers still registered for it.
- gLooperRoster.unregisterStaleHandlers();
+ // stale AHandlers are now cleaned up in the constructor of the next ALooper to come along
}
void ALooper::setName(const char *name) {
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 0f44b52..e0dc768 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -99,35 +99,13 @@
status_t ALooperRoster::postMessage(
const sp<AMessage> &msg, int64_t delayUs) {
- Mutex::Autolock autoLock(mLock);
- return postMessage_l(msg, delayUs);
-}
-status_t ALooperRoster::postMessage_l(
- const sp<AMessage> &msg, int64_t delayUs) {
- ssize_t index = mHandlers.indexOfKey(msg->target());
-
- if (index < 0) {
- ALOGW("failed to post message '%s'. Target handler not registered.",
- msg->debugString().c_str());
- return -ENOENT;
- }
-
- const HandlerInfo &info = mHandlers.valueAt(index);
-
- sp<ALooper> looper = info.mLooper.promote();
+ sp<ALooper> looper = findLooper(msg->target());
if (looper == NULL) {
- ALOGW("failed to post message. "
- "Target handler %d still registered, but object gone.",
- msg->target());
-
- mHandlers.removeItemsAt(index);
return -ENOENT;
}
-
looper->post(msg, delayUs);
-
return OK;
}
@@ -181,18 +159,23 @@
status_t ALooperRoster::postAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response) {
+ sp<ALooper> looper = findLooper(msg->target());
+
+ if (looper == NULL) {
+ ALOGW("failed to post message. "
+ "Target handler %d still registered, but object gone.",
+ msg->target());
+ response->clear();
+ return -ENOENT;
+ }
+
Mutex::Autolock autoLock(mLock);
uint32_t replyID = mNextReplyID++;
msg->setInt32("replyID", replyID);
- status_t err = postMessage_l(msg, 0 /* delayUs */);
-
- if (err != OK) {
- response->clear();
- return err;
- }
+ looper->post(msg, 0 /* delayUs */);
ssize_t index;
while ((index = mReplies.indexOfKey(replyID)) < 0) {
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index d268aa4..bc3e3fb 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -14,6 +14,11 @@
* limitations under the License.
*/
+#define LOG_TAG "AMessage"
+//#define LOG_NDEBUG 0
+//#define DUMP_STATS
+#include <cutils/log.h>
+
#include "AMessage.h"
#include <ctype.h>
@@ -60,12 +65,14 @@
void AMessage::clear() {
for (size_t i = 0; i < mNumItems; ++i) {
Item *item = &mItems[i];
- freeItem(item);
+ delete[] item->mName;
+ item->mName = NULL;
+ freeItemValue(item);
}
mNumItems = 0;
}
-void AMessage::freeItem(Item *item) {
+void AMessage::freeItemValue(Item *item) {
switch (item->mType) {
case kTypeString:
{
@@ -88,25 +95,85 @@
}
}
-AMessage::Item *AMessage::allocateItem(const char *name) {
- name = AAtomizer::Atomize(name);
+#ifdef DUMP_STATS
+#include <utils/Mutex.h>
- size_t i = 0;
- while (i < mNumItems && mItems[i].mName != name) {
- ++i;
+Mutex gLock;
+static int32_t gFindItemCalls = 1;
+static int32_t gDupCalls = 1;
+static int32_t gAverageNumItems = 0;
+static int32_t gAverageNumChecks = 0;
+static int32_t gAverageNumMemChecks = 0;
+static int32_t gAverageDupItems = 0;
+static int32_t gLastChecked = -1;
+
+static void reportStats() {
+ int32_t time = (ALooper::GetNowUs() / 1000);
+ if (time / 1000 != gLastChecked / 1000) {
+ gLastChecked = time;
+ ALOGI("called findItemIx %zu times (for len=%.1f i=%.1f/%.1f mem) dup %zu times (for len=%.1f)",
+ gFindItemCalls,
+ gAverageNumItems / (float)gFindItemCalls,
+ gAverageNumChecks / (float)gFindItemCalls,
+ gAverageNumMemChecks / (float)gFindItemCalls,
+ gDupCalls,
+ gAverageDupItems / (float)gDupCalls);
+ gFindItemCalls = gDupCalls = 1;
+ gAverageNumItems = gAverageNumChecks = gAverageNumMemChecks = gAverageDupItems = 0;
+ gLastChecked = time;
}
+}
+#endif
+inline size_t AMessage::findItemIndex(const char *name, size_t len) const {
+#ifdef DUMP_STATS
+ size_t memchecks = 0;
+#endif
+ size_t i = 0;
+ for (; i < mNumItems; i++) {
+ if (len != mItems[i].mNameLength) {
+ continue;
+ }
+#ifdef DUMP_STATS
+ ++memchecks;
+#endif
+ if (!memcmp(mItems[i].mName, name, len)) {
+ break;
+ }
+ }
+#ifdef DUMP_STATS
+ {
+ Mutex::Autolock _l(gLock);
+ ++gFindItemCalls;
+ gAverageNumItems += mNumItems;
+ gAverageNumMemChecks += memchecks;
+ gAverageNumChecks += i;
+ reportStats();
+ }
+#endif
+ return i;
+}
+
+// assumes item's name was uninitialized or NULL
+void AMessage::Item::setName(const char *name, size_t len) {
+ mNameLength = len;
+ mName = new char[len + 1];
+ memcpy((void*)mName, name, len + 1);
+}
+
+AMessage::Item *AMessage::allocateItem(const char *name) {
+ size_t len = strlen(name);
+ size_t i = findItemIndex(name, len);
Item *item;
if (i < mNumItems) {
item = &mItems[i];
- freeItem(item);
+ freeItemValue(item);
} else {
CHECK(mNumItems < kMaxNumItems);
i = mNumItems++;
item = &mItems[i];
-
- item->mName = name;
+ item->setName(name, len);
}
return item;
@@ -114,31 +181,18 @@
const AMessage::Item *AMessage::findItem(
const char *name, Type type) const {
- name = AAtomizer::Atomize(name);
-
- for (size_t i = 0; i < mNumItems; ++i) {
+ size_t i = findItemIndex(name, strlen(name));
+ if (i < mNumItems) {
const Item *item = &mItems[i];
+ return item->mType == type ? item : NULL;
- if (item->mName == name) {
- return item->mType == type ? item : NULL;
- }
}
-
return NULL;
}
bool AMessage::contains(const char *name) const {
- name = AAtomizer::Atomize(name);
-
- for (size_t i = 0; i < mNumItems; ++i) {
- const Item *item = &mItems[i];
-
- if (item->mName == name) {
- return true;
- }
- }
-
- return false;
+ size_t i = findItemIndex(name, strlen(name));
+ return i < mNumItems;
}
#define BASIC_TYPE(NAME,FIELDNAME,TYPENAME) \
@@ -297,11 +351,20 @@
sp<AMessage> msg = new AMessage(mWhat, mTarget);
msg->mNumItems = mNumItems;
+#ifdef DUMP_STATS
+ {
+ Mutex::Autolock _l(gLock);
+ ++gDupCalls;
+ gAverageDupItems += mNumItems;
+ reportStats();
+ }
+#endif
+
for (size_t i = 0; i < mNumItems; ++i) {
const Item *from = &mItems[i];
Item *to = &msg->mItems[i];
- to->mName = from->mName;
+ to->setName(from->mName, from->mNameLength);
to->mType = from->mType;
switch (from->mType) {
@@ -472,11 +535,11 @@
sp<AMessage> msg = new AMessage(what);
msg->mNumItems = static_cast<size_t>(parcel.readInt32());
-
for (size_t i = 0; i < msg->mNumItems; ++i) {
Item *item = &msg->mItems[i];
- item->mName = AAtomizer::Atomize(parcel.readCString());
+ const char *name = parcel.readCString();
+ item->setName(name, strlen(name));
item->mType = static_cast<Type>(parcel.readInt32());
switch (item->mType) {
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index f98af70..83481bc 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -498,7 +498,7 @@
break;
}
- onCheckBandwidth();
+ onCheckBandwidth(msg);
break;
}
@@ -935,14 +935,22 @@
}
}
- // Consider only 80% of the available bandwidth usable.
- bandwidthBps = (bandwidthBps * 8) / 10;
-
// Pick the highest bandwidth stream below or equal to estimated bandwidth.
index = mBandwidthItems.size() - 1;
- while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
- > (size_t)bandwidthBps) {
+ while (index > 0) {
+ // consider only 80% of the available bandwidth, but if we are switching up,
+ // be even more conservative (70%) to avoid overestimating and immediately
+ // switching back.
+ size_t adjustedBandwidthBps = bandwidthBps;
+ if (index > mCurBandwidthIndex) {
+ adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10;
+ } else {
+ adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10;
+ }
+ if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) {
+ break;
+ }
--index;
}
}
@@ -1410,6 +1418,7 @@
// All fetchers have now been started, the configuration change
// has completed.
+ cancelCheckBandwidthEvent();
scheduleCheckBandwidthEvent();
ALOGV("XXX configuration change completed.");
@@ -1546,20 +1555,16 @@
}
}
-void LiveSession::onCheckBandwidth() {
+void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) {
size_t bandwidthIndex = getBandwidthIndex();
if (canSwitchBandwidthTo(bandwidthIndex)) {
changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
} else {
- scheduleCheckBandwidthEvent();
+ // Come back and check again 10 seconds later in case there is nothing to do now.
+ // If we DO change configuration, once that completes it'll schedule a new
+ // check bandwidth event with an incremented mCheckBandwidthGeneration.
+ msg->post(10000000ll);
}
-
- // Handling the kWhatCheckBandwidth even here does _not_ automatically
- // schedule another one on return, only an explicit call to
- // scheduleCheckBandwidthEvent will do that.
- // This ensures that only one configuration change is ongoing at any
- // one time, once that completes it'll schedule another check bandwidth
- // event.
}
void LiveSession::postPrepared(status_t err) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index a68faf0..8a800da 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -262,7 +262,7 @@
void cancelBandwidthSwitch();
bool canSwitchBandwidthTo(size_t bandwidthIndex);
- void onCheckBandwidth();
+ void onCheckBandwidth(const sp<AMessage> &msg);
void finishDisconnect();
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 80cb2d0..3ef0f06 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -490,11 +490,11 @@
mStreamTypeMask = streamTypeMask;
- mStartTimeUs = startTimeUs;
mSegmentStartTimeUs = segmentStartTimeUs;
mDiscontinuitySeq = startDiscontinuitySeq;
- if (mStartTimeUs >= 0ll) {
+ if (startTimeUs >= 0) {
+ mStartTimeUs = startTimeUs;
mSeqNumber = -1;
mStartup = true;
mPrepared = false;
@@ -737,12 +737,6 @@
const int32_t lastSeqNumberInPlaylist =
firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
- if (mStartup && mSeqNumber >= 0
- && (mSeqNumber < firstSeqNumberInPlaylist || mSeqNumber > lastSeqNumberInPlaylist)) {
- // in case we guessed wrong during reconfiguration, try fetching the latest content.
- mSeqNumber = lastSeqNumberInPlaylist;
- }
-
if (mDiscontinuitySeq < 0) {
mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
}
@@ -754,6 +748,9 @@
if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
// If this is a live session, start 3 segments from the end on connect
mSeqNumber = lastSeqNumberInPlaylist - 3;
+ if (mSeqNumber < firstSeqNumberInPlaylist) {
+ mSeqNumber = firstSeqNumberInPlaylist;
+ }
} else {
mSeqNumber = getSeqNumberForTime(mStartTimeUs);
mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index f8c61eb..591b38e 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -58,6 +58,11 @@
} mTransition;
};
+ enum {
+ kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
+ kPrepareForAdaptivePlaybackIndex,
+ };
+
void addPort(const OMX_PARAM_PORTDEFINITIONTYPE &def);
virtual OMX_ERRORTYPE internalGetParameter(
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index 7f200dd..4a6ab63 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -55,12 +55,22 @@
virtual OMX_ERRORTYPE getConfig(
OMX_INDEXTYPE index, OMX_PTR params);
+ virtual OMX_ERRORTYPE getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index);
+
void initPorts(OMX_U32 numInputBuffers,
OMX_U32 inputBufferSize,
OMX_U32 numOutputBuffers,
const char *mimeType);
- virtual void updatePortDefinitions();
+ virtual void updatePortDefinitions(bool updateCrop = true);
+
+ void handlePortSettingsChange(
+ bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged = false);
+
+ void copyYV12FrameToOutputBuffer(
+ uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride);
enum {
kInputPortIndex = 0,
@@ -68,6 +78,8 @@
kMaxPortIndex = 1,
};
+ bool mIsAdaptive;
+ uint32_t mAdaptiveMaxWidth, mAdaptiveMaxHeight;
uint32_t mWidth, mHeight;
uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 1c383f7..e533fdd 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -22,6 +22,7 @@
#include "include/SoftVideoDecoderOMXComponent.h"
+#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -50,6 +51,9 @@
OMX_PTR appData,
OMX_COMPONENTTYPE **component)
: SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mIsAdaptive(false),
+ mAdaptiveMaxWidth(0),
+ mAdaptiveMaxHeight(0),
mWidth(width),
mHeight(height),
mCropLeft(0),
@@ -119,16 +123,18 @@
updatePortDefinitions();
}
-void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
+void SoftVideoDecoderOMXComponent::updatePortDefinitions(bool updateCrop) {
OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
def->format.video.nFrameWidth = mWidth;
def->format.video.nFrameHeight = mHeight;
def->format.video.nStride = def->format.video.nFrameWidth;
def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+ def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
+
def = &editPortInfo(kOutputPortIndex)->mDef;
- def->format.video.nFrameWidth = mWidth;
- def->format.video.nFrameHeight = mHeight;
+ def->format.video.nFrameWidth = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+ def->format.video.nFrameHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
def->format.video.nStride = def->format.video.nFrameWidth;
def->format.video.nSliceHeight = def->format.video.nFrameHeight;
@@ -136,10 +142,77 @@
(def->format.video.nFrameWidth *
def->format.video.nFrameHeight * 3) / 2;
- mCropLeft = 0;
- mCropTop = 0;
- mCropWidth = mWidth;
- mCropHeight = mHeight;
+ if (updateCrop) {
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = mWidth;
+ mCropHeight = mHeight;
+ }
+}
+
+void SoftVideoDecoderOMXComponent::handlePortSettingsChange(
+ bool *portWillReset, uint32_t width, uint32_t height, bool cropChanged) {
+ *portWillReset = false;
+ bool sizeChanged = (width != mWidth || height != mHeight);
+
+ if (sizeChanged || cropChanged) {
+ mWidth = width;
+ mHeight = height;
+
+ bool updateCrop = !cropChanged;
+ if ((sizeChanged && !mIsAdaptive)
+ || width > mAdaptiveMaxWidth
+ || height > mAdaptiveMaxHeight) {
+ if (mIsAdaptive) {
+ if (width > mAdaptiveMaxWidth) {
+ mAdaptiveMaxWidth = width;
+ }
+ if (height > mAdaptiveMaxHeight) {
+ mAdaptiveMaxHeight = height;
+ }
+ }
+ updatePortDefinitions(updateCrop);
+ notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ *portWillReset = true;
+ } else {
+ updatePortDefinitions(updateCrop);
+ notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+ OMX_IndexConfigCommonOutputCrop, NULL);
+ }
+ }
+}
+
+void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
+ uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride) {
+ size_t dstYStride = mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+ size_t dstUVStride = dstYStride / 2;
+ size_t dstHeight = mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+
+ for (size_t i = 0; i < dstHeight; ++i) {
+ if (i < mHeight) {
+ memcpy(dst, srcY, mWidth);
+ srcY += srcYStride;
+ }
+ dst += dstYStride;
+ }
+
+ for (size_t i = 0; i < dstHeight / 2; ++i) {
+ if (i < mHeight / 2) {
+ memcpy(dst, srcU, mWidth / 2);
+ srcU += srcUStride;
+ }
+ dst += dstUVStride;
+ }
+
+ for (size_t i = 0; i < dstHeight / 2; ++i) {
+ if (i < mHeight / 2) {
+ memcpy(dst, srcV, mWidth / 2);
+ srcV += srcVStride;
+ }
+ dst += dstUVStride;
+ }
}
OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
@@ -199,7 +272,10 @@
OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
- switch (index) {
+ // Include extension index OMX_INDEXEXTTYPE.
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
@@ -230,6 +306,24 @@
return OMX_ErrorNone;
}
+ case kPrepareForAdaptivePlaybackIndex:
+ {
+ const PrepareForAdaptivePlaybackParams* adaptivePlaybackParams =
+ (const PrepareForAdaptivePlaybackParams *)params;
+ mIsAdaptive = adaptivePlaybackParams->bEnable;
+ if (mIsAdaptive) {
+ mAdaptiveMaxWidth = adaptivePlaybackParams->nMaxFrameWidth;
+ mAdaptiveMaxHeight = adaptivePlaybackParams->nMaxFrameHeight;
+ mWidth = mAdaptiveMaxWidth;
+ mHeight = mAdaptiveMaxHeight;
+ } else {
+ mAdaptiveMaxWidth = 0;
+ mAdaptiveMaxHeight = 0;
+ }
+ updatePortDefinitions();
+ return OMX_ErrorNone;
+ }
+
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
@@ -259,6 +353,16 @@
}
}
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index) {
+ if (!strcmp(name, "OMX.google.android.index.prepareForAdaptivePlayback")) {
+ *(int32_t*)index = kPrepareForAdaptivePlaybackIndex;
+ return OMX_ErrorNone;
+ }
+
+ return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
void SoftVideoDecoderOMXComponent::onReset() {
mOutputPortSettingsChange = NONE;
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1f77b2f..1843722 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1941,9 +1941,8 @@
TEE_SINK_NEW, // copy input using a new pipe
TEE_SINK_OLD, // copy input using an existing pipe
} kind;
- NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common),
- audio_channel_count_from_in_mask(
- inStream->common.get_channels(&inStream->common)));
+ NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
+ audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
if (!mTeeSinkInputEnabled) {
kind = TEE_SINK_NO;
} else if (!Format_isValid(format)) {
@@ -2700,24 +2699,26 @@
// if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd
int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
if (teeFd >= 0) {
+ // FIXME use libsndfile
char wavHeader[44];
memcpy(wavHeader,
"RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0",
sizeof(wavHeader));
NBAIO_Format format = teeSource->format();
unsigned channelCount = Format_channelCount(format);
- ALOG_ASSERT(channelCount <= FCC_2);
uint32_t sampleRate = Format_sampleRate(format);
+ size_t frameSize = Format_frameSize(format);
wavHeader[22] = channelCount; // number of channels
wavHeader[24] = sampleRate; // sample rate
wavHeader[25] = sampleRate >> 8;
- wavHeader[32] = channelCount * 2; // block alignment
+ wavHeader[32] = frameSize; // block alignment
+ wavHeader[33] = frameSize >> 8;
write(teeFd, wavHeader, sizeof(wavHeader));
size_t total = 0;
bool firstRead = true;
+#define TEE_SINK_READ 1024 // frames per I/O operation
+ void *buffer = malloc(TEE_SINK_READ * frameSize);
for (;;) {
-#define TEE_SINK_READ 1024
- short buffer[TEE_SINK_READ * FCC_2];
size_t count = TEE_SINK_READ;
ssize_t actual = teeSource->read(buffer, count,
AudioBufferProvider::kInvalidPTS);
@@ -2730,14 +2731,17 @@
break;
}
ALOG_ASSERT(actual <= (ssize_t)count);
- write(teeFd, buffer, actual * channelCount * sizeof(short));
+ write(teeFd, buffer, actual * frameSize);
total += actual;
}
+ free(buffer);
lseek(teeFd, (off_t) 4, SEEK_SET);
- uint32_t temp = 44 + total * channelCount * sizeof(short) - 8;
+ uint32_t temp = 44 + total * frameSize - 8;
+ // FIXME not big-endian safe
write(teeFd, &temp, sizeof(temp));
lseek(teeFd, (off_t) 40, SEEK_SET);
- temp = total * channelCount * sizeof(short);
+ temp = total * frameSize;
+ // FIXME not big-endian safe
write(teeFd, &temp, sizeof(temp));
close(teeFd);
if (fd >= 0) {
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 9e15293..2678cbf 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -420,7 +420,7 @@
// if non-NULL, then duplicate write() to this non-blocking sink
NBAIO_Sink* teeSink;
if ((teeSink = current->mTeeSink) != NULL) {
- (void) teeSink->write(mMixerBuffer, frameCount);
+ (void) teeSink->write(buffer, frameCount);
}
// FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink,
// but this code should be modified to handle both non-blocking and blocking sinks
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 2d0a25f..7544052 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -593,10 +593,10 @@
status = BAD_VALUE;
break;
}
- status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->release_audio_patch(hwDevice, mPatches[index]->mHalHandle);
+ status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
}
} else {
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
@@ -632,7 +632,7 @@
}
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- status = thread->sendReleaseAudioPatchConfigEvent(mPatches[index]->mHalHandle);
+ status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
AudioParameter param;
param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index f721d5c..97b1753 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -662,12 +662,14 @@
binder,
getWakeLockTag(),
String16("media"),
- uid);
+ uid,
+ true /* FIXME force oneway contrary to .aidl */);
} else {
status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
getWakeLockTag(),
- String16("media"));
+ String16("media"),
+ true /* FIXME force oneway contrary to .aidl */);
}
if (status == NO_ERROR) {
mWakeLockToken = binder;
@@ -687,7 +689,8 @@
if (mWakeLockToken != 0) {
ALOGV("releaseWakeLock_l() %s", mName);
if (mPowerManager != 0) {
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0,
+ true /* FIXME force oneway contrary to .aidl */);
}
mWakeLockToken.clear();
}
@@ -723,7 +726,8 @@
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
status_t status;
- status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
+ status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
+ true /* FIXME force oneway contrary to .aidl */);
ALOGV("acquireWakeLock_l() %s status %d", mName, status);
}
}
@@ -3662,6 +3666,10 @@
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
+ if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+ mEffectBufferValid = true;
+ }
+
// sink or mix buffer must be cleared if all tracks are connected to an
// effect chain as in this case the mixer will not write to the sink or mix buffer
// and track effects will accumulate into it
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c5ab832..6cbab04 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -182,7 +182,7 @@
#ifdef TEE_SINK
if (mTeeSinkTrackEnabled) {
- NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
+ NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
if (Format_isValid(pipeFormat)) {
Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
size_t numCounterOffers = 0;
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index cc0e965..c45acd0 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -98,8 +98,12 @@
inputDesc = new EffectVector(audioSession);
mInputs.add(input, inputDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
inputDesc = mInputs.valueAt(idx);
}
+ inputDesc->mRefCount++;
+
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -133,10 +137,14 @@
return status;
}
EffectVector *inputDesc = mInputs.valueAt(index);
- setProcessorEnabled(inputDesc, false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
- ALOGV("releaseInputEffects(): all effects released");
+ inputDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
+ if (inputDesc->mRefCount == 0) {
+ setProcessorEnabled(inputDesc, false);
+ delete inputDesc;
+ mInputs.removeItemsAt(index);
+ ALOGV("releaseInputEffects(): all effects released");
+ }
return status;
}
@@ -223,8 +231,12 @@
procDesc = new EffectVector(audioSession);
mOutputSessions.add(audioSession, procDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
procDesc = mOutputSessions.valueAt(idx);
}
+ procDesc->mRefCount++;
+
+ ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -262,12 +274,16 @@
}
EffectVector *procDesc = mOutputSessions.valueAt(index);
- setProcessorEnabled(procDesc, false);
- procDesc->mEffects.clear();
- delete procDesc;
- mOutputSessions.removeItemsAt(index);
- ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
- audioSession);
+ procDesc->mRefCount--;
+ ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+ if (procDesc->mRefCount == 0) {
+ setProcessorEnabled(procDesc, false);
+ procDesc->mEffects.clear();
+ delete procDesc;
+ mOutputSessions.removeItemsAt(index);
+ ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
+ audioSession);
+ }
return status;
}
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
index 351cb1a..dbe0d0e 100644
--- a/services/audiopolicy/AudioPolicyEffects.h
+++ b/services/audiopolicy/AudioPolicyEffects.h
@@ -131,9 +131,11 @@
// class to store voctor of AudioEffects
class EffectVector {
public:
- EffectVector(int session) : mSessionId(session) {}
+ EffectVector(int session) : mSessionId(session), mRefCount(0) {}
/*virtual*/ ~EffectVector() {}
const int mSessionId;
+ // AudioPolicyManager keeps mLock, no need for lock on reference count here
+ int mRefCount;
Vector< sp<AudioEffect> >mEffects;
};
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 06dd22c..22c4e04 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -64,6 +64,7 @@
const StringToEnum sDeviceNameToEnumTable[] = {
STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+ STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
@@ -485,7 +486,9 @@
// request to reuse existing output stream if one is already opened to reach the RX device
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(rxDevice, mOutputs);
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
@@ -524,7 +527,9 @@
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
// request to reuse existing output stream if one is already opened to reach the TX
// path output device
if (output != AUDIO_IO_HANDLE_NONE) {
@@ -1016,7 +1021,9 @@
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
- output = selectOutput(outputs, flags);
+ // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
+ flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+ output = selectOutput(outputs, flags, format);
}
ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
"format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
@@ -1027,7 +1034,8 @@
}
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
- audio_output_flags_t flags)
+ audio_output_flags_t flags,
+ audio_format_t format)
{
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevice()).
@@ -1050,6 +1058,17 @@
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
if (!outputDesc->isDuplicated()) {
+ // if a valid format is specified, skip output if not compatible
+ if (format != AUDIO_FORMAT_INVALID) {
+ if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if (format != outputDesc->mFormat) {
+ continue;
+ }
+ } else if (!audio_is_linear_pcm(format)) {
+ continue;
+ }
+ }
+
int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
if (commonFlags > maxCommonFlags) {
outputFlags = outputs[i];
@@ -1297,21 +1316,23 @@
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
bool isSoundTrigger = false;
+ audio_source_t halInputSource = inputSource;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
ALOGV("SoundTrigger capture on session %d input %d", session, input);
+ } else {
+ halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
-
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
&input,
&config,
&device,
String8(""),
- inputSource,
+ halInputSource,
flags);
// only accept input with the exact requested set of parameters
@@ -2305,7 +2326,9 @@
mOutputs);
// if the sink device is reachable via an opened output stream, request to go via
// this output stream by adding a second source to the patch description
- audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE);
+ audio_io_handle_t output = selectOutput(outputs,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isDuplicated()) {
@@ -3802,6 +3825,14 @@
break;
}
}
+
+ /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
+ and doesn't really need to.*/
+ if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+ devices |= AUDIO_DEVICE_OUT_SPEAKER;
+ devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+ }
+
return devices;
}
@@ -3904,12 +3935,20 @@
// the isStreamActive() method only informs about the activity of a stream, not
// if it's for local playback. Note also that we use the same delay between both tests
device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+ //user "safe" speaker if available instead of normal speaker to avoid triggering
+ //other acoustic safety mechanisms for notification
+ if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+ device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
} else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
// while media is playing (or has recently played), use the same device
device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
} else {
// when media is not playing anymore, fall back on the sonification behavior
device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
+ //user "safe" speaker if available instead of normal speaker to avoid triggering
+ //other acoustic safety mechanisms for notification
+ if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+ device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
}
break;
@@ -4317,6 +4356,20 @@
mpClientInterface->onAudioPatchListUpdate();
}
}
+
+ // inform all input as well
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
+ if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+ AudioParameter inputCmd = AudioParameter();
+ ALOGV("%s: inform input %d of device:%d", __func__,
+ inputDescriptor->mIoHandle, device);
+ inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+ mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+ inputCmd.toString(),
+ delayMs);
+ }
+ }
}
// update stream volumes according to new device
@@ -4618,13 +4671,24 @@
// - one A2DP device + another device: happens with duplicated output. In this case
// retain the device on the A2DP output as the other must not correspond to an active
// selection if not the speaker.
+ // - HDMI-CEC system audio mode only output: give priority to available item in order.
if (device & AUDIO_DEVICE_OUT_SPEAKER) {
device = AUDIO_DEVICE_OUT_SPEAKER;
+ } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+ device = AUDIO_DEVICE_OUT_HDMI_ARC;
+ } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+ device = AUDIO_DEVICE_OUT_AUX_LINE;
+ } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+ device = AUDIO_DEVICE_OUT_SPDIF;
} else {
device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
}
}
+ /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
+ if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
+ device = AUDIO_DEVICE_OUT_SPEAKER;
+
ALOGW_IF(popcount(device) != 1,
"getDeviceForVolume() invalid device combination: %08x",
device);
@@ -6016,14 +6080,26 @@
return 0;
}
+ // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+ uint32_t samplingRate = UINT_MAX;
+ for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+ if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
+ samplingRate = mSamplingRates[i];
+ }
+ }
+ return (samplingRate == UINT_MAX) ? 0 : samplingRate;
+ }
+
uint32_t samplingRate = 0;
uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
// For mixed output and inputs, use max mixer sampling rates. Do not
// limit sampling rate otherwise
- if ((mType != AUDIO_PORT_TYPE_MIX) ||
- ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+ if (mType != AUDIO_PORT_TYPE_MIX) {
maxRate = UINT_MAX;
}
for (size_t i = 0; i < mSamplingRates.size(); i ++) {
@@ -6040,16 +6116,35 @@
if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
return AUDIO_CHANNEL_NONE;
}
-
audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
+
+ // For direct outputs, pick minimum channel count: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+ (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+ uint32_t channelCount = UINT_MAX;
+ for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+ uint32_t cnlCount;
+ if (mUseInChannelMask) {
+ cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+ } else {
+ cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+ }
+ if ((cnlCount < channelCount) && (cnlCount > 0)) {
+ channelMask = mChannelMasks[i];
+ channelCount = cnlCount;
+ }
+ }
+ return channelMask;
+ }
+
uint32_t channelCount = 0;
uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
// For mixed output and inputs, use max mixer channel count. Do not
// limit channel count otherwise
- if ((mType != AUDIO_PORT_TYPE_MIX) ||
- ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)))) {
+ if (mType != AUDIO_PORT_TYPE_MIX) {
maxCount = UINT_MAX;
}
for (size_t i = 0; i < mChannelMasks.size(); i ++) {
@@ -6061,6 +6156,7 @@
}
if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
channelMask = mChannelMasks[i];
+ channelCount = cnlCount;
}
}
return channelMask;
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 57e015e..da0d95d 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -713,7 +713,8 @@
uint32_t delayMs);
audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
- audio_output_flags_t flags);
+ audio_output_flags_t flags,
+ audio_format_t format);
// samplingRate parameter is an in/out and so may be modified
sp<IOProfile> getInputProfile(audio_devices_t device,
uint32_t& samplingRate,
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index 7f14960..50bb8c7 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -765,7 +765,16 @@
sp<AudioCommand> command2 = mAudioCommands[i];
// commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands
if (command2->mTime <= command->mTime) break;
- if (command2->mCommand != command->mCommand) continue;
+
+ // create audio patch or release audio patch commands are equivalent
+ // with regard to filtering
+ if ((command->mCommand == CREATE_AUDIO_PATCH) ||
+ (command->mCommand == RELEASE_AUDIO_PATCH)) {
+ if ((command2->mCommand != CREATE_AUDIO_PATCH) &&
+ (command2->mCommand != RELEASE_AUDIO_PATCH)) {
+ continue;
+ }
+ } else if (command2->mCommand != command->mCommand) continue;
switch (command->mCommand) {
case SET_PARAMETERS: {
@@ -817,6 +826,31 @@
// command status as the command is now delayed
delayMs = 1;
} break;
+
+ case CREATE_AUDIO_PATCH:
+ case RELEASE_AUDIO_PATCH: {
+ audio_patch_handle_t handle;
+ if (command->mCommand == CREATE_AUDIO_PATCH) {
+ handle = ((CreateAudioPatchData *)command->mParam.get())->mHandle;
+ } else {
+ handle = ((ReleaseAudioPatchData *)command->mParam.get())->mHandle;
+ }
+ audio_patch_handle_t handle2;
+ if (command2->mCommand == CREATE_AUDIO_PATCH) {
+ handle2 = ((CreateAudioPatchData *)command2->mParam.get())->mHandle;
+ } else {
+ handle2 = ((ReleaseAudioPatchData *)command2->mParam.get())->mHandle;
+ }
+ if (handle != handle2) break;
+ ALOGV("Filtering out %s audio patch command for handle %d",
+ (command->mCommand == CREATE_AUDIO_PATCH) ? "create" : "release", handle);
+ removedCommands.add(command2);
+ command->mTime = command2->mTime;
+ // force delayMs to non 0 so that code below does not request to wait for
+ // command status as the command is now delayed
+ delayMs = 1;
+ } break;
+
case START_TONE:
case STOP_TONE:
default:
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 2f485b9..e184d97 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -34,6 +34,7 @@
api1/client2/JpegProcessor.cpp \
api1/client2/CallbackProcessor.cpp \
api1/client2/ZslProcessor.cpp \
+ api1/client2/ZslProcessorInterface.cpp \
api1/client2/BurstCapture.cpp \
api1/client2/JpegCompressor.cpp \
api1/client2/CaptureSequencer.cpp \
@@ -47,6 +48,7 @@
device3/Camera3InputStream.cpp \
device3/Camera3OutputStream.cpp \
device3/Camera3ZslStream.cpp \
+ device3/Camera3DummyStream.cpp \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index bc40971..6f4a507 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -434,6 +434,9 @@
mCallbackProcessor->deleteStream();
mZslProcessor->deleteStream();
+ // Remove all ZSL stream state before disconnect; needed to work around b/15408128.
+ mZslProcessor->disconnect();
+
ALOGV("Camera %d: Disconnecting device", mCameraId);
mDevice->disconnect();
@@ -1088,6 +1091,22 @@
res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
outputStreams);
+ // try to reconfigure jpeg to video size if configureStreams failed
+ if (res == BAD_VALUE) {
+
+ ALOGV("%s: Camera %d: configure still size to video size before recording"
+ , __FUNCTION__, mCameraId);
+ params.overrideJpegSizeByVideoSize();
+ res = updateProcessorStream(mJpegProcessor, params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't configure still image size to video size: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+ res = mStreamingProcessor->startStream(StreamingProcessor::RECORD,
+ outputStreams);
+ }
+
if (res != OK) {
ALOGE("%s: Camera %d: Unable to start recording stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -1127,6 +1146,7 @@
mCameraService->playSound(CameraService::SOUND_RECORDING);
+ l.mParameters.recoverOverriddenJpegSize();
res = startPreviewL(l.mParameters, true);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to return to preview",
@@ -1271,6 +1291,9 @@
return OK;
}
+ if (l.mParameters.zslMode) {
+ mZslProcessor->clearZslQueue();
+ }
}
syncWithDevice();
@@ -1359,8 +1382,14 @@
SharedParameters::Lock l(mParameters);
+ Parameters::focusMode_t focusModeBefore = l.mParameters.focusMode;
res = l.mParameters.set(params);
if (res != OK) return res;
+ Parameters::focusMode_t focusModeAfter = l.mParameters.focusMode;
+
+ if (l.mParameters.zslMode && focusModeAfter != focusModeBefore) {
+ mZslProcessor->clearZslQueue();
+ }
res = updateRequests(l.mParameters);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index abe1235..33bdaa3 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -938,7 +938,20 @@
}
previewBuffer = mPreviewBuffer;
- memcpy(previewBuffer->base(), (uint8_t *)heap->base() + offset, size);
+ void* previewBufferBase = previewBuffer->base();
+ void* heapBase = heap->base();
+
+ if (heapBase == MAP_FAILED) {
+ ALOGE("%s: Failed to mmap heap for preview frame.", __FUNCTION__);
+ mLock.unlock();
+ return;
+ } else if (previewBufferBase == MAP_FAILED) {
+ ALOGE("%s: Failed to mmap preview buffer for preview frame.", __FUNCTION__);
+ mLock.unlock();
+ return;
+ }
+
+ memcpy(previewBufferBase, (uint8_t *) heapBase + offset, size);
sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size);
if (frame == 0) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index cb9aca6..9849f4d 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -445,11 +445,18 @@
if (mNewAEState) {
if (!mAeInPrecapture) {
// Waiting to see PRECAPTURE state
- if (mAETriggerId == mTriggerId &&
- mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
- ALOGV("%s: Got precapture start", __FUNCTION__);
- mAeInPrecapture = true;
- mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ if (mAETriggerId == mTriggerId) {
+ if (mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ ALOGV("%s: Got precapture start", __FUNCTION__);
+ mAeInPrecapture = true;
+ mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ } else if (mAEState == ANDROID_CONTROL_AE_STATE_CONVERGED ||
+ mAEState == ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED) {
+ // It is legal to transit to CONVERGED or FLASH_REQUIRED
+ // directly after a trigger.
+ ALOGV("%s: AE is already in good state, start capture", __FUNCTION__);
+ return STANDARD_CAPTURE;
+ }
}
} else {
// Waiting to see PRECAPTURE state end
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index e7f9a78..8d00590 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -249,6 +249,9 @@
// TODO: Pick maximum
pictureWidth = availableJpegSizes[0].width;
pictureHeight = availableJpegSizes[0].height;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureSizeOverriden = false;
params.setPictureSize(pictureWidth,
pictureHeight);
@@ -1381,8 +1384,8 @@
// PICTURE_SIZE
newParams.getPictureSize(&validatedParams.pictureWidth,
&validatedParams.pictureHeight);
- if (validatedParams.pictureWidth == pictureWidth ||
- validatedParams.pictureHeight == pictureHeight) {
+ if (validatedParams.pictureWidth != pictureWidth ||
+ validatedParams.pictureHeight != pictureHeight) {
Vector<Size> availablePictureSizes = getAvailableJpegSizes();
for (i = 0; i < availablePictureSizes.size(); i++) {
if ((availablePictureSizes[i].width ==
@@ -1798,6 +1801,7 @@
/** Update internal parameters */
*this = validatedParams;
+ updateOverriddenJpegSize();
/** Update external parameters calculated from the internal ones */
@@ -2115,6 +2119,52 @@
return OK;
}
+status_t Parameters::overrideJpegSizeByVideoSize() {
+ if (pictureSizeOverriden) {
+ ALOGV("Picture size has been overridden. Skip overriding");
+ return OK;
+ }
+
+ pictureSizeOverriden = true;
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+ // This change of picture size is invisible to app layer.
+ // Do not update app visible params
+ return OK;
+}
+
+status_t Parameters::updateOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip checking");
+ return OK;
+ }
+
+ pictureWidthLastSet = pictureWidth;
+ pictureHeightLastSet = pictureHeight;
+
+ if (pictureWidth <= videoWidth && pictureHeight <= videoHeight) {
+ // Picture size is now smaller than video size. No need to override anymore
+ return recoverOverriddenJpegSize();
+ }
+
+ pictureWidth = videoWidth;
+ pictureHeight = videoHeight;
+
+ return OK;
+}
+
+status_t Parameters::recoverOverriddenJpegSize() {
+ if (!pictureSizeOverriden) {
+ ALOGV("Picture size has not been overridden. Skip recovering");
+ return OK;
+ }
+ pictureSizeOverriden = false;
+ pictureWidth = pictureWidthLastSet;
+ pictureHeight = pictureHeightLastSet;
+ return OK;
+}
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index d9d33c4..5e6e6ab 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -52,6 +52,9 @@
int previewTransform; // set by CAMERA_CMD_SET_DISPLAY_ORIENTATION
int pictureWidth, pictureHeight;
+ // Store the picture size before they are overriden by video snapshot
+ int pictureWidthLastSet, pictureHeightLastSet;
+ bool pictureSizeOverriden;
int32_t jpegThumbSize[2];
uint8_t jpegQuality, jpegThumbQuality;
@@ -253,6 +256,12 @@
// Add/update JPEG entries in metadata
status_t updateRequestJpeg(CameraMetadata *request) const;
+ /* Helper functions to override jpeg size for video snapshot */
+ // Override jpeg size by video size. Called during startRecording.
+ status_t overrideJpegSizeByVideoSize();
+ // Recover overridden jpeg size. Called during stopRecording.
+ status_t recoverOverriddenJpegSize();
+
// Calculate the crop region rectangle based on current stream sizes
struct CropRegion {
float left;
@@ -348,6 +357,12 @@
// Get max size (from the size array) that matches the given aspect ratio.
Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
+ // Helper function for overriding jpeg size for video snapshot
+ // Check if overridden jpeg size needs to be updated after Parameters::set.
+ // The behavior of this function is tailored to the implementation of Parameters::set.
+ // Do not use this function for other purpose.
+ status_t updateOverriddenJpegSize();
+
struct StreamConfiguration {
int32_t format;
int32_t width;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 8fb876e..bb72206 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -48,6 +48,7 @@
mDevice(client->getCameraDevice()),
mSequencer(sequencer),
mId(client->getCameraId()),
+ mDeleted(false),
mZslBufferAvailable(false),
mZslStreamId(NO_STREAM),
mZslReprocessStreamId(NO_STREAM),
@@ -62,7 +63,7 @@
ZslProcessor::~ZslProcessor() {
ALOGV("%s: Exit", __FUNCTION__);
- deleteStream();
+ disconnect();
}
void ZslProcessor::onFrameAvailable() {
@@ -153,7 +154,7 @@
mId, strerror(-res), res);
return res;
}
- if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
+ if (mDeleted || currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
res = device->deleteReprocessStream(mZslReprocessStreamId);
if (res != OK) {
@@ -175,6 +176,8 @@
}
}
+ mDeleted = false;
+
if (mZslStreamId == NO_STREAM) {
// Create stream for HAL production
// TODO: Sort out better way to select resolution for ZSL
@@ -209,6 +212,14 @@
status_t ZslProcessor::deleteStream() {
ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ // WAR(b/15408128): do not delete stream unless client is being disconnected.
+ mDeleted = true;
+ return OK;
+}
+
+status_t ZslProcessor::disconnect() {
+ ATRACE_CALL();
status_t res;
Mutex::Autolock l(mInputMutex);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index f4cf0c8..b6533cf 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -67,6 +67,7 @@
status_t updateStream(const Parameters ¶ms);
status_t deleteStream();
+ status_t disconnect();
int getStreamId() const;
status_t pushToReprocess(int32_t requestId);
@@ -86,6 +87,8 @@
wp<CaptureSequencer> mSequencer;
int mId;
+ bool mDeleted;
+
mutable Mutex mInputMutex;
bool mZslBufferAvailable;
Condition mZslBufferAvailableSignal;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 2d31275..fa65b74 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -44,6 +44,7 @@
sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
Thread(false),
+ mLatestClearedBufferTimestamp(0),
mState(RUNNING),
mClient(client),
mSequencer(sequencer),
@@ -107,7 +108,6 @@
ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
return;
}
- (void)timestamp;
entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
if (entry.count == 0) {
@@ -120,6 +120,9 @@
if (mState != RUNNING) return;
+ // Corresponding buffer has been cleared. No need to push into mFrameList
+ if (timestamp <= mLatestClearedBufferTimestamp) return;
+
mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
}
@@ -392,7 +395,7 @@
if (mZslStream != 0) {
// clear result metadata list first.
clearZslResultQueueLocked();
- return mZslStream->clearInputRingBuffer();
+ return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
}
return OK;
}
@@ -454,6 +457,23 @@
}
}
+bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ return false;
+ break;
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
+ return true;
+ default:
+ ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
+ return false;
+ }
+}
+
nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
/**
* Find the smallest timestamp we know about so far
@@ -499,8 +519,16 @@
continue;
}
- // Check AF state if device has focuser
- if (mHasFocuser) {
+ entry = frame.find(ANDROID_CONTROL_AF_MODE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF mode field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afMode = entry.data.u8[0];
+
+ // Check AF state if device has focuser and focus mode isn't fixed
+ if (mHasFocuser && !isFixedFocusMode(afMode)) {
// Make sure the candidate frame has good focus.
entry = frame.find(ANDROID_CONTROL_AF_STATE);
if (entry.count == 0) {
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index daa352b..2975f7c 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -82,6 +82,7 @@
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ nsecs_t mLatestClearedBufferTimestamp;
enum {
RUNNING,
@@ -132,6 +133,8 @@
void dumpZslQueue(int id) const;
nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+
+ bool isFixedFocusMode(uint8_t afMode) const;
};
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
new file mode 100644
index 0000000..9efeaba
--- /dev/null
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ZslProcessorInterface.h"
+
+namespace android {
+namespace camera2 {
+
+status_t ZslProcessorInterface::disconnect() {
+ return OK;
+}
+
+}; //namespace camera2
+}; //namespace android
+
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
index 183c0c2..9e266e7 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
@@ -19,6 +19,8 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
namespace android {
namespace camera2 {
@@ -37,6 +39,9 @@
// Delete the underlying CameraDevice streams
virtual status_t deleteStream() = 0;
+ // Clear any additional state necessary before the CameraDevice is disconnected
+ virtual status_t disconnect();
+
/**
* Submits a ZSL capture request (id = requestId)
*
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9b51b99..fafe349 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
+#include "device3/Camera3DummyStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -181,6 +182,7 @@
mHal3Device = device;
mStatus = STATUS_UNCONFIGURED;
mNextStreamId = 0;
+ mDummyStreamId = NO_STREAM;
mNeedConfig = true;
mPauseStateNotify = false;
@@ -599,10 +601,18 @@
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
res = configureStreamsLocked();
+ // Stream configuration failed due to unsupported configuration.
+ // Device back to unconfigured state. Client might try other configuraitons
+ if (res == BAD_VALUE && mStatus == STATUS_UNCONFIGURED) {
+ CLOGE("No streams configured");
+ return NULL;
+ }
+ // Stream configuration failed for other reason. Fatal.
if (res != OK) {
SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res);
return NULL;
}
+ // Stream configuration successfully configure to empty stream configuration.
if (mStatus == STATUS_UNCONFIGURED) {
CLOGE("No streams configured");
return NULL;
@@ -1418,6 +1428,15 @@
return OK;
}
+ // Workaround for device HALv3.2 or older spec bug - zero streams requires
+ // adding a dummy stream instead.
+ // TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
+ if (mOutputStreams.size() == 0) {
+ addDummyStreamLocked();
+ } else {
+ tryRemoveDummyStreamLocked();
+ }
+
// Start configuring the streams
ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
@@ -1540,7 +1559,7 @@
mNeedConfig = false;
- if (config.num_streams > 0) {
+ if (mDummyStreamId == NO_STREAM) {
mStatus = STATUS_CONFIGURED;
} else {
mStatus = STATUS_UNCONFIGURED;
@@ -1554,6 +1573,69 @@
return OK;
}
+status_t Camera3Device::addDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId != NO_STREAM) {
+ // Should never be adding a second dummy stream when one is already
+ // active
+ SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+
+ sp<Camera3OutputStreamInterface> dummyStream =
+ new Camera3DummyStream(mNextStreamId);
+
+ res = mOutputStreams.add(mNextStreamId, dummyStream);
+ if (res < 0) {
+ SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ mDummyStreamId = mNextStreamId;
+ mNextStreamId++;
+
+ return OK;
+}
+
+status_t Camera3Device::tryRemoveDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId == NO_STREAM) return OK;
+ if (mOutputStreams.size() == 1) return OK;
+
+ ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+
+ // Ok, have a dummy stream and there's at least one other output stream,
+ // so remove the dummy
+
+ sp<Camera3StreamInterface> deletedStream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+ return INVALID_OPERATION;
+ }
+
+ deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
+ mOutputStreams.removeItemsAt(outputStreamIdx);
+
+ // Free up the stream endpoint so that it can be used by some other stream
+ res = deletedStream->disconnect();
+ if (res != OK) {
+ SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+ // fall through since we want to still list the stream as deleted.
+ }
+ mDeletedStreams.add(deletedStream);
+ mDummyStreamId = NO_STREAM;
+
+ return res;
+}
+
void Camera3Device::setErrorState(const char *fmt, ...) {
Mutex::Autolock l(mLock);
va_list args;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e3c98ef..b99ed7e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -151,6 +151,8 @@
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
static const ssize_t kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
+ // Constant to use for stream ID when one doesn't exist
+ static const int NO_STREAM = -1;
// A lock to enforce serialization on the input/configure side
// of the public interface.
@@ -196,6 +198,8 @@
int mNextStreamId;
bool mNeedConfig;
+ int mDummyStreamId;
+
// Whether to send state updates upstream
// Pause when doing transparent reconfiguration
bool mPauseStateNotify;
@@ -291,6 +295,17 @@
status_t configureStreamsLocked();
/**
+ * Add a dummy stream to the current stream set as a workaround for
+ * not allowing 0 streams in the camera HAL spec.
+ */
+ status_t addDummyStreamLocked();
+
+ /**
+ * Remove a dummy stream if the current config includes real streams.
+ */
+ status_t tryRemoveDummyStreamLocked();
+
+ /**
* Set device into an error state due to some fatal failure, and set an
* error message to indicate why. Only the first call's message will be
* used. The message is also sent to the log.
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
new file mode 100644
index 0000000..6656b09
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DummyStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3DummyStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3DummyStream::Camera3DummyStream(int id) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
+ /*maxSize*/0, DUMMY_FORMAT) {
+
+}
+
+Camera3DummyStream::~Camera3DummyStream() {
+
+}
+
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Dummy\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3DummyStream::setTransform(int transform) {
+ ATRACE_CALL();
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::configureQueueLocked() {
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::disconnectLocked() {
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+ *usage = DUMMY_USAGE;
+ return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
new file mode 100644
index 0000000..3e42623
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+namespace camera3 {
+
+/**
+ * A dummy output stream class, to be used as a placeholder when no valid
+ * streams are configured by the client.
+ * This is necessary because camera HAL v3.2 or older disallow configuring
+ * 0 output streams, while the public camera2 API allows for it.
+ */
+class Camera3DummyStream :
+ public Camera3IOStreamBase,
+ public Camera3OutputStreamInterface {
+
+ public:
+ /**
+ * Set up a dummy stream; doesn't actually connect to anything, and uses
+ * a default dummy format and size.
+ */
+ Camera3DummyStream(int id);
+
+ virtual ~Camera3DummyStream();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ status_t setTransform(int transform);
+
+ protected:
+
+ /**
+ * Note that we release the lock briefly in this function
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ virtual status_t disconnectLocked();
+
+ private:
+
+ // Default dummy parameters; 320x240 is a required size for all devices,
+ // otherwise act like a SurfaceView would.
+ static const int DUMMY_WIDTH = 320;
+ static const int DUMMY_HEIGHT = 240;
+ static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3DummyStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 29ce38c..3c0e908 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -233,8 +233,7 @@
camera3_stream::usage = oldUsage;
camera3_stream::max_buffers = oldMaxBuffers;
- mState = STATE_CONSTRUCTED;
-
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 92bf81b..81330ea 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -315,20 +315,24 @@
return OK;
}
-status_t Camera3ZslStream::clearInputRingBuffer() {
+status_t Camera3ZslStream::clearInputRingBuffer(nsecs_t* latestTimestamp) {
Mutex::Autolock l(mLock);
- return clearInputRingBufferLocked();
+ return clearInputRingBufferLocked(latestTimestamp);
}
-status_t Camera3ZslStream::clearInputRingBufferLocked() {
+status_t Camera3ZslStream::clearInputRingBufferLocked(nsecs_t* latestTimestamp) {
+
+ if (latestTimestamp) {
+ *latestTimestamp = mProducer->getLatestTimestamp();
+ }
mInputBufferQueue.clear();
return mProducer->clear();
}
status_t Camera3ZslStream::disconnectLocked() {
- clearInputRingBufferLocked();
+ clearInputRingBufferLocked(NULL);
return Camera3OutputStream::disconnectLocked();
}
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index d89c38d..5323a49 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -59,8 +59,10 @@
/**
* Clears the buffers that can be used by enqueueInputBufferByTimestamp
+ * latestTimestamp will be filled with the largest timestamp of buffers
+ * being cleared, 0 if there is no buffer being clear.
*/
- status_t clearInputRingBuffer();
+ status_t clearInputRingBuffer(nsecs_t* latestTimestamp);
protected:
@@ -100,7 +102,7 @@
// Disconnet the Camera3ZslStream specific bufferQueues.
virtual status_t disconnectLocked();
- status_t clearInputRingBufferLocked();
+ status_t clearInputRingBufferLocked(nsecs_t* latestTimestamp);
}; // class Camera3ZslStream
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index e4ec5fd..f8562ec 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -41,7 +41,8 @@
uint32_t consumerUsage,
int bufferCount) :
ConsumerBase(consumer),
- mBufferCount(bufferCount)
+ mBufferCount(bufferCount),
+ mLatestTimestamp(0)
{
mConsumer->setConsumerUsageBits(consumerUsage);
mConsumer->setMaxAcquiredBufferCount(bufferCount);
@@ -152,6 +153,14 @@
return OK;
}
+nsecs_t RingBufferConsumer::getLatestTimestamp() {
+ Mutex::Autolock _l(mMutex);
+ if (mBufferItemList.size() == 0) {
+ return 0;
+ }
+ return mLatestTimestamp;
+}
+
void RingBufferConsumer::pinBufferLocked(const BufferItem& item) {
List<RingBufferItem>::iterator it, end;
@@ -302,6 +311,13 @@
item.mTimestamp,
mBufferItemList.size(), mBufferCount);
+ if (item.mTimestamp < mLatestTimestamp) {
+ BI_LOGE("Timestamp decreases from %" PRId64 " to %" PRId64,
+ mLatestTimestamp, item.mTimestamp);
+ }
+
+ mLatestTimestamp = item.mTimestamp;
+
item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer;
} // end of mMutex lock
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index a03736d..da97a11 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -159,6 +159,9 @@
// Release all the non-pinned buffers in the ring buffer
status_t clear();
+ // Return 0 if RingBuffer is empty, otherwise return timestamp of latest buffer.
+ nsecs_t getLatestTimestamp();
+
private:
// Override ConsumerBase::onFrameAvailable
@@ -180,6 +183,9 @@
// List of acquired buffers in our ring buffer
List<RingBufferItem> mBufferItemList;
const int mBufferCount;
+
+ // Timestamp of latest buffer
+ nsecs_t mLatestTimestamp;
};
} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 2502e0d..b5aaee3 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -249,7 +249,7 @@
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
default:
- return eventMemory;
+ return eventMemory;
}
size_t size = event->data_offset + event->data_size;
@@ -653,7 +653,6 @@
{
ALOGV("onCallbackEvent type %d", event->mType);
- AutoMutex lock(mLock);
sp<IMemory> eventMemory = event->mMemory;
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
@@ -668,34 +667,53 @@
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
(struct sound_trigger_recognition_event *)eventMemory->pointer();
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(recognitionEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ if (model->mState != Model::STATE_ACTIVE) {
+ ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+ return;
+ }
- sp<Model> model = getModel(recognitionEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ recognitionEvent->capture_session = model->mCaptureSession;
+ model->mState = Model::STATE_IDLE;
+ client = mClient;
}
- if (model->mState != Model::STATE_ACTIVE) {
- ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
- return;
+ if (client != 0) {
+ client->onRecognitionEvent(eventMemory);
}
-
- recognitionEvent->capture_session = model->mCaptureSession;
- mClient->onRecognitionEvent(eventMemory);
- model->mState = Model::STATE_IDLE;
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
(struct sound_trigger_model_event *)eventMemory->pointer();
-
- sp<Model> model = getModel(soundmodelEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(soundmodelEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ client = mClient;
}
- mClient->onSoundModelEvent(eventMemory);
+ if (client != 0) {
+ client->onSoundModelEvent(eventMemory);
+ }
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- mClient->onServiceStateChange(eventMemory);
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+ if (client != 0) {
+ client->onServiceStateChange(eventMemory);
+ }
} break;
default:
LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);