Merge "When fast flag is denied, retry with new I/O handle" into nyc-dev
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 58d9b43..0a447e7 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -38,6 +38,7 @@
parcel->readInt32(&precaptureTriggerId);
parcel->readInt64(&frameNumber);
parcel->readInt32(&partialResultCount);
+ parcel->readInt32(&errorStreamId);
return OK;
}
@@ -54,6 +55,7 @@
parcel->writeInt32(precaptureTriggerId);
parcel->writeInt64(frameNumber);
parcel->writeInt32(partialResultCount);
+ parcel->writeInt32(errorStreamId);
return OK;
}
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index ff0e3d3..45e4518 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -64,6 +64,12 @@
int32_t partialResultCount;
/**
+ * For buffer drop errors, the stream ID for the stream that lost a buffer.
+ * Otherwise -1.
+ */
+ int32_t errorStreamId;
+
+ /**
* Constructor initializes object as invalid by setting requestId to be -1.
*/
CaptureResultExtras()
@@ -72,7 +78,8 @@
afTriggerId(0),
precaptureTriggerId(0),
frameNumber(0),
- partialResultCount(0) {
+ partialResultCount(0),
+ errorStreamId(-1) {
}
/**
diff --git a/include/camera/ndk/NdkCameraMetadataTags.h b/include/camera/ndk/NdkCameraMetadataTags.h
index d7035a7..a1d3bf7 100644
--- a/include/camera/ndk/NdkCameraMetadataTags.h
+++ b/include/camera/ndk/NdkCameraMetadataTags.h
@@ -647,6 +647,8 @@
ACAMERA_CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO,
ACAMERA_CONTROL_SCENE_MODE_HDR,
ACAMERA_CONTROL_SCENE_MODE_FACE_PRIORITY_LOW_LIGHT,
+ ACAMERA_CONTROL_SCENE_MODE_DEVICE_CUSTOM_START = 100,
+ ACAMERA_CONTROL_SCENE_MODE_DEVICE_CUSTOM_END = 127,
} acamera_metadata_enum_android_control_scene_mode_t;
// ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 69dc062..4de55a2 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -836,6 +836,8 @@
// check sample rate and speed is compatible with AudioTrack
bool isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+ void restartIfDisabled();
+
// Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 3f211bf..19c7955 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -121,7 +121,7 @@
// This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
// well as on success.
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type = NULL) = 0;
@@ -186,6 +186,7 @@
INTERNAL_OPTION_MAX_FPS, // data is float
INTERNAL_OPTION_START_TIME, // data is an int64_t
INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
+ INTERNAL_OPTION_COLOR_ASPECTS, // data is ColorAspects
};
virtual status_t setInternalOption(
node_id node,
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
index 20f2cad..1957a45 100644
--- a/include/media/MediaResource.h
+++ b/include/media/MediaResource.h
@@ -23,17 +23,24 @@
namespace android {
-extern const char kResourceSecureCodec[];
-extern const char kResourceNonSecureCodec[];
-extern const char kResourceAudioCodec[];
-extern const char kResourceVideoCodec[];
-extern const char kResourceGraphicMemory[];
-
class MediaResource {
public:
+ enum Type {
+ kUnspecified = 0,
+ kSecureCodec,
+ kNonSecureCodec,
+ kGraphicMemory
+ };
+
+ enum SubType {
+ kUnspecifiedSubType = 0,
+ kAudioCodec,
+ kVideoCodec
+ };
+
MediaResource();
- MediaResource(String8 type, uint64_t value);
- MediaResource(String8 type, String8 subType, uint64_t value);
+ MediaResource(Type type, uint64_t value);
+ MediaResource(Type type, SubType subType, uint64_t value);
void readFromParcel(const Parcel &parcel);
void writeToParcel(Parcel *parcel) const;
@@ -43,11 +50,30 @@
bool operator==(const MediaResource &other) const;
bool operator!=(const MediaResource &other) const;
- String8 mType;
- String8 mSubType;
+ Type mType;
+ SubType mSubType;
uint64_t mValue;
};
+inline static const char *asString(MediaResource::Type i, const char *def = "??") {
+ switch (i) {
+ case MediaResource::kUnspecified: return "unspecified";
+ case MediaResource::kSecureCodec: return "secure-codec";
+ case MediaResource::kNonSecureCodec: return "non-secure-codec";
+ case MediaResource::kGraphicMemory: return "graphic-memory";
+ default: return def;
+ }
+}
+
+inline static const char *asString(MediaResource::SubType i, const char *def = "??") {
+ switch (i) {
+ case MediaResource::kUnspecifiedSubType: return "unspecified";
+ case MediaResource::kAudioCodec: return "audio-codec";
+ case MediaResource::kVideoCodec: return "video-codec";
+ default: return def;
+ }
+}
+
}; // namespace android
#endif // ANDROID_MEDIA_RESOURCE_H
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 761f182..2365323 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -242,8 +242,10 @@
IOMX::node_id mNode;
sp<MemoryDealer> mDealer[2];
+ bool mUsingNativeWindow;
sp<ANativeWindow> mNativeWindow;
int mNativeWindowUsageBits;
+ sp<AMessage> mConfigFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
sp<AMessage> mBaseOutputFormat;
@@ -255,7 +257,7 @@
List<sp<AMessage> > mDeferredQueue;
- bool mSentFormat;
+ sp<AMessage> mLastOutputFormat;
bool mIsVideo;
bool mIsEncoder;
bool mFatalError;
@@ -343,21 +345,72 @@
status_t setSupportedOutputFormat(bool getLegacyFlexibleFormat);
status_t setupVideoDecoder(
- const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers,
+ const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers, bool haveSwRenderer,
sp<AMessage> &outputformat);
status_t setupVideoEncoder(
- const char *mime, const sp<AMessage> &msg, sp<AMessage> &outputformat);
+ const char *mime, const sp<AMessage> &msg,
+ sp<AMessage> &outputformat, sp<AMessage> &inputformat);
status_t setVideoFormatOnPort(
OMX_U32 portIndex,
int32_t width, int32_t height,
OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate = -1.0);
- status_t setColorAspects(
- OMX_U32 portIndex, int32_t width, int32_t height, const sp<AMessage> &msg,
- sp<AMessage> &format);
- status_t getColorAspects(OMX_U32 portIndex, sp<AMessage> &format);
+ // gets index or sets it to 0 on error. Returns error from codec.
+ status_t initDescribeColorAspectsIndex();
+
+ // sets |params|. If |readBack| is true, it re-gets them afterwards if set succeeded.
+ // returns the codec error.
+ status_t setCodecColorAspects(DescribeColorAspectsParams ¶ms, bool readBack = false);
+
+ // gets |params|; returns the codec error. |param| should not change on error.
+ status_t getCodecColorAspects(DescribeColorAspectsParams ¶ms);
+
+ // gets dataspace guidance from codec and platform. |params| should be set up with the color
+ // aspects to use. If |tryCodec| is true, the codec is queried first. If it succeeds, we
+ // return OK. Otherwise, we fall back to the platform guidance and return the codec error;
+ // though, we return OK if the codec failed with UNSUPPORTED, as codec guidance is optional.
+ status_t getDataSpace(
+ DescribeColorAspectsParams ¶ms, android_dataspace *dataSpace /* nonnull */,
+ bool tryCodec);
+
+ // sets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+ // set resulting color config into |outputFormat|. If |usingNativeWindow| is true, we use
+ // video defaults if config is unspecified. Returns error from the codec.
+ status_t setColorAspectsForVideoDecoder(
+ int32_t width, int32_t height, bool usingNativeWindow,
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat);
+
+ // gets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+ // set resulting color config into |outputFormat|. If |dataSpace| is non-null, it requests
+ // dataspace guidance from the codec and platform and sets it into |dataSpace|. Returns the
+ // error from the codec.
+ status_t getColorAspectsAndDataSpaceForVideoDecoder(
+ int32_t width, int32_t height, const sp<AMessage> &configFormat,
+ sp<AMessage> &outputFormat, android_dataspace *dataSpace);
+
+ // sets color aspects for the video encoder assuming bytebuffer mode for certain |configFormat|
+ // and sets resulting color config into |outputFormat|. For mediarecorder, also set dataspace
+ // into |inputFormat|. Returns the error from the codec.
+ status_t setColorAspectsForVideoEncoder(
+ const sp<AMessage> &configFormat,
+ sp<AMessage> &outputFormat, sp<AMessage> &inputFormat);
+
+ // sets color aspects for the video encoder in surface mode. This basically sets the default
+ // video values for unspecified aspects and sets the dataspace to use in the input format.
+ // Also sets the dataspace into |dataSpace|.
+ // Returns any codec errors during this configuration, except for optional steps.
+ status_t setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+ android_dataspace *dataSpace /* nonnull */);
+
+ // gets color aspects for the video encoder input port and sets them into the |format|.
+ // Returns any codec errors.
+ status_t getInputColorAspectsForVideoEncoder(sp<AMessage> &format);
+
+ // updates the encoder output format with |aspects| defaulting to |dataSpace| for
+ // unspecified values.
+ void onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects);
typedef struct drcParams {
int32_t drcCut;
@@ -443,7 +496,10 @@
void notifyOfRenderedFrames(
bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
- void sendFormatChange(const sp<AMessage> &reply);
+ void onOutputFormatChanged();
+ void addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify);
+ void sendFormatChange();
+
status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> ¬ify);
void signalError(
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 2bb1291..fe579b7 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -394,7 +394,7 @@
bool isExecuting() const;
uint64_t getGraphicBufferSize();
- void addResource(const String8 &type, const String8 &subtype, uint64_t value);
+ void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
bool hasPendingBuffer(int portIndex);
bool hasPendingBuffer();
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index e3f3f5e..5f10487 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -115,8 +115,8 @@
bool mStopping;
bool mDoMoreWorkPending;
bool mSetEncoderFormat;
- int mEncoderFormat;
- int mEncoderDataSpace;
+ int32_t mEncoderFormat;
+ int32_t mEncoderDataSpace;
sp<AMessage> mEncoderActivityNotify;
sp<IGraphicBufferProducer> mGraphicBufferProducer;
sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
diff --git a/include/media/stagefright/foundation/ALookup.h b/include/media/stagefright/foundation/ALookup.h
index 571eda2..5a68806 100644
--- a/include/media/stagefright/foundation/ALookup.h
+++ b/include/media/stagefright/foundation/ALookup.h
@@ -27,10 +27,14 @@
struct ALookup {
ALookup(std::initializer_list<std::pair<T, U>> list);
- bool lookup(const T& from, U *to);
- bool rlookup(const U& from, T *to);
- inline bool map(const T& from, U *to) { return lookup(from, to); }
- inline bool map(const U& from, T *to) { return rlookup(from, to); }
+ bool lookup(const T& from, U *to) const;
+ bool rlookup(const U& from, T *to) const;
+
+ template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+ inline bool map(const T& from, V *to) const { return lookup(from, to); }
+
+ template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+ inline bool map(const V& from, T *to) const { return rlookup(from, to); }
private:
std::vector<std::pair<T, U>> mTable;
@@ -42,7 +46,7 @@
}
template<typename T, typename U>
-bool ALookup<T, U>::lookup(const T& from, U *to) {
+bool ALookup<T, U>::lookup(const T& from, U *to) const {
for (auto elem : mTable) {
if (elem.first == from) {
*to = elem.second;
@@ -53,7 +57,7 @@
}
template<typename T, typename U>
-bool ALookup<T, U>::rlookup(const U& from, T *to) {
+bool ALookup<T, U>::rlookup(const U& from, T *to) const {
for (auto elem : mTable) {
if (elem.second == from) {
*to = elem.first;
diff --git a/include/media/stagefright/foundation/ColorUtils.h b/include/media/stagefright/foundation/ColorUtils.h
index b95c80b..f01a210 100644
--- a/include/media/stagefright/foundation/ColorUtils.h
+++ b/include/media/stagefright/foundation/ColorUtils.h
@@ -129,11 +129,46 @@
static status_t convertCodecColorAspectsToPlatformAspects(
const ColorAspects &aspects, int32_t *range, int32_t *standard, int32_t *transfer);
- // updates unspecified range, standard and transfer values to their defaults
- static void setDefaultPlatformColorAspectsIfNeeded(
- int32_t &range, int32_t &standard, int32_t &transfer, int32_t width, int32_t height);
+ // converts Other values to Unspecified
+ static void convertCodecColorAspectsToIsoAspects(
+ const ColorAspects &aspects,
+ int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange);
+ // converts unsupported values to Other
+ static void convertIsoColorAspectsToCodecAspects(
+ int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+ ColorAspects &aspects);
+
+ // updates Unspecified color aspects to their defaults based on the video size
static void setDefaultCodecColorAspectsIfNeeded(
ColorAspects &aspects, int32_t width, int32_t height);
+
+ // it returns the closest dataSpace for given color |aspects|. if |mayExpand| is true, it allows
+ // returning a larger dataSpace that contains the color space given by |aspects|, and is better
+ // suited to blending. This requires implicit color space conversion on part of the device.
+ static android_dataspace getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand);
+
+ // converts |dataSpace| to a V0 enum, and returns true if dataSpace is an aspect-only value
+ static bool convertDataSpaceToV0(android_dataspace &dataSpace);
+
+ // compares |aspect| to |orig|. Returns |true| if any aspects have changed, except if they
+ // changed to Unspecified value. It also sets the changed values to Unspecified in |aspect|.
+ static bool checkIfAspectsChangedAndUnspecifyThem(
+ ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects = false);
+
+ // finds color config in format, defaulting them to 0.
+ static void getColorConfigFromFormat(
+ const sp<AMessage> &format, int *range, int *standard, int *transfer);
+
+ // copies existing color config from |source| to |target|.
+ static void copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target);
+
+ // finds color config in format as ColorAspects, defaulting them to 0.
+ static void getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects);
+
+ // writes |aspects| into format. iff |force| is false, Unspecified values are not
+ // written.
+ static void setColorAspectsIntoFormat(
+ const ColorAspects &aspects, sp<AMessage> &format, bool force = false);
};
inline static const char *asString(android::ColorUtils::ColorStandard i, const char *def = "??") {
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ea8a78e..2dfb850 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -268,6 +268,8 @@
// DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
// -EINTR Call has been interrupted. Look around to see why, and then perhaps try again.
// NO_INIT Shared memory is corrupt.
+ // NOT_ENOUGH_DATA Server has disabled the track because of underrun: restart the track
+ // if still in active state.
// Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
status_t obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
struct timespec *elapsed = NULL);
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 423273d..ab18c27 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1532,6 +1532,10 @@
}
oldSequence = newSequence;
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ }
+
// Keep the extra references
proxy = mProxy;
iMem = mCblkMemory;
@@ -1554,8 +1558,7 @@
buffer.mFrameCount = audioBuffer->frameCount;
// FIXME starts the requested timeout and elapsed over from scratch
status = proxy->obtainBuffer(&buffer, requested, elapsed);
-
- } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+ } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
audioBuffer->size = buffer.mFrameCount * mFrameSize;
@@ -1588,13 +1591,16 @@
mProxy->releaseBuffer(&buffer);
// restart track if it was disabled by audioflinger due to previous underrun
- if (mState == STATE_ACTIVE) {
- audio_track_cblk_t* cblk = mCblk;
- if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
- ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
- // FIXME ignoring status
- mAudioTrack->start();
- }
+ restartIfDisabled();
+}
+
+void AudioTrack::restartIfDisabled()
+{
+ int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
+ ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
+ // FIXME ignoring status
+ mAudioTrack->start();
}
}
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 1d15495..6b6865b 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -129,6 +129,11 @@
status = DEAD_OBJECT;
goto end;
}
+ if (flags & CBLK_DISABLED) {
+ ALOGV("Track disabled");
+ status = NOT_ENOUGH_DATA;
+ goto end;
+ }
// check for obtainBuffer interrupted by client
if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
ALOGV("obtainBuffer() interrupted by client");
@@ -425,7 +430,8 @@
status = DEAD_OBJECT;
goto end;
}
- if (flags & CBLK_STREAM_END_DONE) {
+ // a track is not supposed to underrun at this stage but consider it done
+ if (flags & (CBLK_STREAM_END_DONE | CBLK_DISABLED)) {
ALOGV("stream end received");
status = NO_ERROR;
goto end;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 1770fb8..8376c0a 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -18,8 +18,6 @@
#define LOG_TAG "BpMediaSource"
#include <utils/Log.h>
-#include <utils/CallStack.h>
-
#include <inttypes.h>
#include <stdint.h>
#include <sys/types.h>
@@ -111,16 +109,9 @@
BpMediaSource(const sp<IBinder>& impl)
: BpInterface<IMediaSource>(impl)
{
- mStarted = false;
}
virtual status_t start(MetaData *params) {
- if (mStarted) {
- ALOGD("Source was started previously from:");
- mStartStack.log(LOG_TAG);
- ALOGD("Now from:");
- CallStack stack(LOG_TAG);
- }
ALOGV("start");
Parcel data, reply;
data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
@@ -128,10 +119,6 @@
params->writeToParcel(data);
}
status_t ret = remote()->transact(START, data, &reply);
- if (ret == NO_ERROR) {
- mStarted = true;
- mStartStack.update();
- }
if (ret == NO_ERROR && params) {
ALOGW("ignoring potentially modified MetaData from start");
ALOGW("input:");
@@ -144,7 +131,6 @@
}
virtual status_t stop() {
- mStarted = false;
ALOGV("stop");
Parcel data, reply;
data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
@@ -219,8 +205,7 @@
// NuPlayer passes pointers-to-metadata around, so we use this to keep the metadata alive
// XXX: could we use this for caching, or does metadata change on the fly?
sp<MetaData> mMetaData;
- bool mStarted;
- CallStack mStartStack;
+
};
IMPLEMENT_META_INTERFACE(MediaSource, "android.media.IMediaSource");
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index e6b4347..ea4a966 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -24,6 +24,7 @@
#include <binder/Parcel.h>
#include <media/IOMX.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/openmax/OMX_IndexExt.h>
namespace android {
@@ -313,13 +314,14 @@
}
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
Parcel data, reply;
status_t err;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
data.writeInt32(port_index);
+ data.writeInt32(dataSpace);
err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
if (err != OK) {
ALOGW("binder transaction failed: %d", err);
@@ -716,7 +718,8 @@
void *params = NULL;
size_t pageSize = 0;
size_t allocSize = 0;
- if (code != SET_INTERNAL_OPTION && size < 8) {
+ if ((index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits && size < 4) ||
+ (code != SET_INTERNAL_OPTION && size < 8)) {
// we expect the structure to contain at least the size and
// version, 8 bytes total
ALOGE("b/27207275 (%zu)", size);
@@ -738,7 +741,9 @@
} else {
err = NOT_ENOUGH_DATA;
OMX_U32 declaredSize = *(OMX_U32*)params;
- if (code != SET_INTERNAL_OPTION && declaredSize > size) {
+ if (code != SET_INTERNAL_OPTION &&
+ index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
+ declaredSize > size) {
// the buffer says it's bigger than it actually is
ALOGE("b/27207275 (%u/%zu)", declaredSize, size);
android_errorWriteLog(0x534e4554, "27207275");
@@ -908,10 +913,11 @@
node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
+ android_dataspace dataSpace = (android_dataspace)data.readInt32();
sp<IGraphicBufferProducer> bufferProducer;
MetadataBufferType type = kMetadataBufferTypeInvalid;
- status_t err = createInputSurface(node, port_index, &bufferProducer, &type);
+ status_t err = createInputSurface(node, port_index, dataSpace, &bufferProducer, &type);
if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
android_errorWriteLog(0x534e4554, "26324358");
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index 40ec0cb..e636a50 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -21,38 +21,36 @@
namespace android {
-const char kResourceSecureCodec[] = "secure-codec";
-const char kResourceNonSecureCodec[] = "non-secure-codec";
-const char kResourceAudioCodec[] = "audio-codec";
-const char kResourceVideoCodec[] = "video-codec";
-const char kResourceGraphicMemory[] = "graphic-memory";
+MediaResource::MediaResource()
+ : mType(kUnspecified),
+ mSubType(kUnspecifiedSubType),
+ mValue(0) {}
-MediaResource::MediaResource() : mValue(0) {}
-
-MediaResource::MediaResource(String8 type, uint64_t value)
+MediaResource::MediaResource(Type type, uint64_t value)
: mType(type),
+ mSubType(kUnspecifiedSubType),
mValue(value) {}
-MediaResource::MediaResource(String8 type, String8 subType, uint64_t value)
+MediaResource::MediaResource(Type type, SubType subType, uint64_t value)
: mType(type),
mSubType(subType),
mValue(value) {}
void MediaResource::readFromParcel(const Parcel &parcel) {
- mType = parcel.readString8();
- mSubType = parcel.readString8();
+ mType = static_cast<Type>(parcel.readInt32());
+ mSubType = static_cast<SubType>(parcel.readInt32());
mValue = parcel.readUint64();
}
void MediaResource::writeToParcel(Parcel *parcel) const {
- parcel->writeString8(mType);
- parcel->writeString8(mSubType);
+ parcel->writeInt32(static_cast<int32_t>(mType));
+ parcel->writeInt32(static_cast<int32_t>(mSubType));
parcel->writeUint64(mValue);
}
String8 MediaResource::toString() const {
String8 str;
- str.appendFormat("%s/%s:%llu", mType.string(), mSubType.string(), (unsigned long long)mValue);
+ str.appendFormat("%s/%s:%llu", asString(mType), asString(mSubType), (unsigned long long)mValue);
return str;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 26362ec..1b248db 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1571,6 +1571,9 @@
if (cameraSource == NULL) {
flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
+ } else {
+ // require dataspace setup even if not using surface input
+ format->setInt32("android._using-recorder", 1);
}
sp<MediaCodecSource> encoder = MediaCodecSource::Create(
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index def9e25..e0fe7e3 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -488,8 +488,8 @@
ACodec::ACodec()
: mQuirks(0),
mNode(0),
+ mUsingNativeWindow(false),
mNativeWindowUsageBits(0),
- mSentFormat(false),
mIsVideo(false),
mIsEncoder(false),
mFatalError(false),
@@ -1646,8 +1646,9 @@
encoder = false;
}
- sp<AMessage> inputFormat = new AMessage();
- sp<AMessage> outputFormat = mNotify->dup(); // will use this for kWhatOutputFormatChanged
+ sp<AMessage> inputFormat = new AMessage;
+ sp<AMessage> outputFormat = new AMessage;
+ mConfigFormat = msg;
mIsEncoder = encoder;
@@ -1765,6 +1766,7 @@
sp<RefBase> obj;
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && video && !encoder;
+ mUsingNativeWindow = haveNativeWindow;
mLegacyAdaptiveExperiment = false;
if (video && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
@@ -1941,9 +1943,9 @@
}
if (encoder) {
- err = setupVideoEncoder(mime, msg, outputFormat);
+ err = setupVideoEncoder(mime, msg, outputFormat, inputFormat);
} else {
- err = setupVideoDecoder(mime, msg, haveNativeWindow, outputFormat);
+ err = setupVideoDecoder(mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
if (err != OK) {
@@ -2004,7 +2006,8 @@
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
} else if (err == OK) {
- err = setupVideoDecoder(mime, msg, false, outputFormat);
+ err = setupVideoDecoder(
+ mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
}
}
@@ -2198,6 +2201,8 @@
}
mBaseOutputFormat = outputFormat;
+ // trigger a kWhatOutputFormatChanged msg on first buffer
+ mLastOutputFormat.clear();
err = getPortFormat(kPortIndexInput, inputFormat);
if (err == OK) {
@@ -3012,7 +3017,7 @@
status_t ACodec::setupVideoDecoder(
const char *mime, const sp<AMessage> &msg, bool haveNativeWindow,
- sp<AMessage> &outputFormat) {
+ bool usingSwRenderer, sp<AMessage> &outputFormat) {
int32_t width, height;
if (!msg->findInt32("width", &width)
|| !msg->findInt32("height", &height)) {
@@ -3075,113 +3080,262 @@
return err;
}
- err = setColorAspects(
- kPortIndexOutput, width, height, msg, outputFormat);
- if (err != OK) {
- ALOGI("Falling back to presets as component does not describe color aspects.");
+ err = setColorAspectsForVideoDecoder(
+ width, height, haveNativeWindow | usingSwRenderer, msg, outputFormat);
+ if (err == ERROR_UNSUPPORTED) { // support is optional
err = OK;
}
+ return err;
+}
+
+status_t ACodec::initDescribeColorAspectsIndex() {
+ status_t err = mOMX->getExtensionIndex(
+ mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
+ if (err != OK) {
+ mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
+ }
+ return err;
+}
+
+status_t ACodec::setCodecColorAspects(DescribeColorAspectsParams ¶ms, bool verify) {
+ status_t err = ERROR_UNSUPPORTED;
+ if (mDescribeColorAspectsIndex) {
+ err = mOMX->setConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ }
+ ALOGV("[%s] setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+
+ if (verify && err == OK) {
+ err = getCodecColorAspects(params);
+ }
+
+ ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex,
+ "[%s] getting color aspects failed even though codec advertises support",
+ mComponentName.c_str());
+ return err;
+}
+
+status_t ACodec::setColorAspectsForVideoDecoder(
+ int32_t width, int32_t height, bool usingNativeWindow,
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+
+ getColorAspectsFromFormat(configFormat, params.sAspects);
+ if (usingNativeWindow) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ // The default aspects will be set back to the output format during the
+ // getFormat phase of configure(). Set non-Unspecified values back into the
+ // format, in case component does not support this enumeration.
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+ }
+
+ (void)initDescribeColorAspectsIndex();
+
+ // communicate color aspects to codec
+ return setCodecColorAspects(params);
+}
+
+status_t ACodec::getCodecColorAspects(DescribeColorAspectsParams ¶ms) {
+ status_t err = ERROR_UNSUPPORTED;
+ if (mDescribeColorAspectsIndex) {
+ err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ }
+ ALOGV("[%s] got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+ if (params.bRequestingDataSpace) {
+ ALOGV("for dataspace %#x", params.nDataSpace);
+ }
+ if (err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex
+ && !params.bRequestingDataSpace && !params.bDataSpaceChanged) {
+ ALOGW("[%s] getting color aspects failed even though codec advertises support",
+ mComponentName.c_str());
+ }
+ return err;
+}
+
+status_t ACodec::getInputColorAspectsForVideoEncoder(sp<AMessage> &format) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ status_t err = getCodecColorAspects(params);
+ if (err == OK) {
+ // we only set encoder input aspects if codec supports them
+ setColorAspectsIntoFormat(params.sAspects, format, true /* force */);
+ }
+ return err;
+}
+
+status_t ACodec::getDataSpace(
+ DescribeColorAspectsParams ¶ms, android_dataspace *dataSpace /* nonnull */,
+ bool tryCodec) {
+ status_t err = OK;
+ if (tryCodec) {
+ // request dataspace guidance from codec.
+ params.bRequestingDataSpace = OMX_TRUE;
+ err = getCodecColorAspects(params);
+ params.bRequestingDataSpace = OMX_FALSE;
+ if (err == OK && params.nDataSpace != HAL_DATASPACE_UNKNOWN) {
+ *dataSpace = (android_dataspace)params.nDataSpace;
+ return err;
+ } else if (err == ERROR_UNSUPPORTED) {
+ // ignore not-implemented error for dataspace requests
+ err = OK;
+ }
+ }
+
+ // this returns legacy versions if available
+ *dataSpace = getDataSpaceForColorAspects(params.sAspects, true /* mayexpand */);
+ ALOGV("[%s] using color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "and dataspace %#x",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ *dataSpace);
+ return err;
+}
+
+
+status_t ACodec::getColorAspectsAndDataSpaceForVideoDecoder(
+ int32_t width, int32_t height, const sp<AMessage> &configFormat, sp<AMessage> &outputFormat,
+ android_dataspace *dataSpace) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+
+ // reset default format and get resulting format
+ getColorAspectsFromFormat(configFormat, params.sAspects);
+ if (dataSpace != NULL) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ }
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+
+ // we always set specified aspects for decoders
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+
+ if (dataSpace != NULL) {
+ status_t res = getDataSpace(params, dataSpace, err == OK /* tryCodec */);
+ if (err == OK) {
+ err = res;
+ }
+ }
return err;
}
-status_t ACodec::setColorAspects(
- OMX_U32 portIndex, int32_t width, int32_t height, const sp<AMessage> &msg,
- sp<AMessage> &format) {
- DescribeColorAspectsParams params;
- InitOMXParams(¶ms);
- params.nPortIndex = portIndex;
-
- // 0 values are unspecified
- int32_t range = 0, standard = 0, transfer = 0;
- if (portIndex == kPortIndexInput) {
- // Encoders allow overriding default aspects with 0 if specified by format. Decoders do not.
- setDefaultPlatformColorAspectsIfNeeded(range, standard, transfer, width, height);
- }
- (void)msg->findInt32("color-range", &range);
- (void)msg->findInt32("color-standard", &standard);
- (void)msg->findInt32("color-transfer", &transfer);
-
- if (convertPlatformColorAspectsToCodecAspects(
- range, standard, transfer, params.sAspects) != OK) {
- ALOGW("[%s] Ignoring illegal color aspects(range=%d, standard=%d, transfer=%d)",
- mComponentName.c_str(), range, standard, transfer);
- // Invalid values were converted to unspecified !params!, but otherwise were not changed
- // For encoders, we leave these as is. For decoders, we will use default values.
- }
-
- // set defaults for decoders.
- if (portIndex != kPortIndexInput) {
- setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
- convertCodecColorAspectsToPlatformAspects(params.sAspects, &range, &standard, &transfer);
- }
-
- // save updated values to base output format (encoder input format will read back actually
- // supported values by the codec)
- if (range != 0) {
- format->setInt32("color-range", range);
- }
- if (standard != 0) {
- format->setInt32("color-standard", standard);
- }
- if (transfer != 0) {
- format->setInt32("color-transfer", transfer);
- }
-
- // communicate color aspects to codec
- status_t err = mOMX->getExtensionIndex(
- mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
- if (err != OK) {
- mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
- return err;
- }
-
- return mOMX->setConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
-}
-
-status_t ACodec::getColorAspects(OMX_U32 portIndex, sp<AMessage> &format) {
- if (!mDescribeColorAspectsIndex) {
- return ERROR_UNSUPPORTED;
- }
+// initial video encoder setup for bytebuffer mode
+status_t ACodec::setColorAspectsForVideoEncoder(
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
+ // copy config to output format as this is not exposed via getFormat
+ copyColorConfig(configFormat, outputFormat);
DescribeColorAspectsParams params;
InitOMXParams(¶ms);
- params.nPortIndex = portIndex;
- ColorAspects &aspects = params.sAspects;
- aspects.mRange = ColorAspects::RangeUnspecified;
- aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
- aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
- aspects.mTransfer = ColorAspects::TransferUnspecified;
+ params.nPortIndex = kPortIndexInput;
+ getColorAspectsFromFormat(configFormat, params.sAspects);
- status_t err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
- if (err != OK) {
- return err;
+ (void)initDescribeColorAspectsIndex();
+
+ int32_t usingRecorder;
+ if (configFormat->findInt32("android._using-recorder", &usingRecorder) && usingRecorder) {
+ android_dataspace dataSpace = HAL_DATASPACE_BT709;
+ int32_t width, height;
+ if (configFormat->findInt32("width", &width)
+ && configFormat->findInt32("height", &height)) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ status_t err = getDataSpace(
+ params, &dataSpace, mDescribeColorAspectsIndex /* tryCodec */);
+ if (err != OK) {
+ return err;
+ }
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+ }
+ inputFormat->setInt32("android._dataspace", (int32_t)dataSpace);
}
- // keep non-standard codec values in extension ranges
- int32_t range, standard, transfer;
- if (convertCodecColorAspectsToPlatformAspects(
- params.sAspects, &range, &standard, &transfer) != OK) {
- ALOGW("[%s] Ignoring invalid color aspects(range=%u, primaries=%u, coeffs=%u, transfer=%u)",
- mComponentName.c_str(),
- aspects.mRange, aspects.mPrimaries, aspects.mMatrixCoeffs, aspects.mTransfer);
- }
-
- // save specified values to format
- if (range != 0) {
- format->setInt32("color-range", range);
- }
- if (standard != 0) {
- format->setInt32("color-standard", standard);
- }
- if (transfer != 0) {
- format->setInt32("color-transfer", transfer);
+ // communicate color aspects to codec, but do not allow change of the platform aspects
+ ColorAspects origAspects = params.sAspects;
+ for (int triesLeft = 2; --triesLeft >= 0; ) {
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+ if (err != OK
+ || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ params.sAspects, origAspects, true /* usePlatformAspects */)) {
+ return err;
+ }
+ ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+ mComponentName.c_str());
}
return OK;
}
+// subsequent initial video encoder setup for surface mode
+status_t ACodec::setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+ android_dataspace *dataSpace /* nonnull */) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ ColorAspects &aspects = params.sAspects;
+
+ // reset default format and store resulting format into both input and output formats
+ getColorAspectsFromFormat(mConfigFormat, aspects);
+ int32_t width, height;
+ if (mInputFormat->findInt32("width", &width) && mInputFormat->findInt32("height", &height)) {
+ setDefaultCodecColorAspectsIfNeeded(aspects, width, height);
+ }
+ setColorAspectsIntoFormat(aspects, mInputFormat);
+ setColorAspectsIntoFormat(aspects, mOutputFormat);
+
+ // communicate color aspects to codec, but do not allow any change
+ ColorAspects origAspects = aspects;
+ status_t err = OK;
+ for (int triesLeft = 2; mDescribeColorAspectsIndex && --triesLeft >= 0; ) {
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+ if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(aspects, origAspects)) {
+ break;
+ }
+ ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+ mComponentName.c_str());
+ }
+
+ *dataSpace = HAL_DATASPACE_BT709;
+ aspects = origAspects; // restore desired color aspects
+ status_t res = getDataSpace(
+ params, dataSpace, err == OK && mDescribeColorAspectsIndex /* tryCodec */);
+ if (err == OK) {
+ err = res;
+ }
+ mInputFormat->setInt32("android._dataspace", (int32_t)*dataSpace);
+ mInputFormat->setBuffer(
+ "android._color-aspects", ABuffer::CreateAsCopy(&aspects, sizeof(aspects)));
+
+ // update input format with codec supported color aspects (basically set unsupported
+ // aspects to Unspecified)
+ if (err == OK) {
+ (void)getInputColorAspectsForVideoEncoder(mInputFormat);
+ }
+
+ ALOGV("set default color aspects, updated input format to %s, output format to %s",
+ mInputFormat->debugString(4).c_str(), mOutputFormat->debugString(4).c_str());
+
+ return err;
+}
+
status_t ACodec::setupVideoEncoder(
- const char *mime, const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
+ const char *mime, const sp<AMessage> &msg,
+ sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
int32_t tmp;
if (!msg->findInt32("color-format", &tmp)) {
return INVALID_OPERATION;
@@ -3354,9 +3508,8 @@
// Set up color aspects on input, but propagate them to the output format, as they will
// not be read back from encoder.
- err = setColorAspects(
- kPortIndexInput, width, height, msg, outputFormat);
- if (err != OK) {
+ err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
+ if (err == ERROR_UNSUPPORTED) {
ALOGI("[%s] cannot encode color aspects. Ignoring.", mComponentName.c_str());
err = OK;
}
@@ -3693,13 +3846,6 @@
h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level);
}
- // XXX
- if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
- ALOGW("Use baseline profile instead of %d for AVC recording",
- h264type.eProfile);
- h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
- }
-
if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
@@ -3717,6 +3863,23 @@
h264type.bDirect8x8Inference = OMX_FALSE;
h264type.bDirectSpatialTemporal = OMX_FALSE;
h264type.nCabacInitIdc = 0;
+ } else if (h264type.eProfile == OMX_VIDEO_AVCProfileMain ||
+ h264type.eProfile == OMX_VIDEO_AVCProfileHigh) {
+ h264type.nSliceHeaderSpacing = 0;
+ h264type.bUseHadamard = OMX_TRUE;
+ h264type.nRefFrames = 2;
+ h264type.nBFrames = 1;
+ h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+ h264type.nAllowedPictureTypes =
+ OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
+ h264type.nRefIdx10ActiveMinus1 = 0;
+ h264type.nRefIdx11ActiveMinus1 = 0;
+ h264type.bEntropyCodingCABAC = OMX_TRUE;
+ h264type.bWeightedPPrediction = OMX_TRUE;
+ h264type.bconstIpred = OMX_TRUE;
+ h264type.bDirect8x8Inference = OMX_TRUE;
+ h264type.bDirectSpatialTemporal = OMX_TRUE;
+ h264type.nCabacInitIdc = 1;
}
if (h264type.nBFrames != 0) {
@@ -4329,47 +4492,58 @@
}
}
- if (portIndex != kPortIndexOutput) {
- // TODO: also get input crop
- break;
+ int32_t width = (int32_t)videoDef->nFrameWidth;
+ int32_t height = (int32_t)videoDef->nFrameHeight;
+
+ if (portIndex == kPortIndexOutput) {
+ OMX_CONFIG_RECTTYPE rect;
+ InitOMXParams(&rect);
+ rect.nPortIndex = portIndex;
+
+ if (mOMX->getConfig(
+ mNode,
+ (portIndex == kPortIndexOutput ?
+ OMX_IndexConfigCommonOutputCrop :
+ OMX_IndexConfigCommonInputCrop),
+ &rect, sizeof(rect)) != OK) {
+ rect.nLeft = 0;
+ rect.nTop = 0;
+ rect.nWidth = videoDef->nFrameWidth;
+ rect.nHeight = videoDef->nFrameHeight;
+ }
+
+ if (rect.nLeft < 0 ||
+ rect.nTop < 0 ||
+ rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
+ rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
+ ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
+ rect.nLeft, rect.nTop,
+ rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
+ videoDef->nFrameWidth, videoDef->nFrameHeight);
+ return BAD_VALUE;
+ }
+
+ notify->setRect(
+ "crop",
+ rect.nLeft,
+ rect.nTop,
+ rect.nLeft + rect.nWidth - 1,
+ rect.nTop + rect.nHeight - 1);
+
+ width = rect.nWidth;
+ height = rect.nHeight;
+
+ android_dataspace dataSpace = HAL_DATASPACE_UNKNOWN;
+ (void)getColorAspectsAndDataSpaceForVideoDecoder(
+ width, height, mConfigFormat, notify,
+ mUsingNativeWindow ? &dataSpace : NULL);
+ if (mUsingNativeWindow) {
+ notify->setInt32("android._dataspace", dataSpace);
+ }
+ } else {
+ (void)getInputColorAspectsForVideoEncoder(notify);
}
- (void)getColorAspects(portIndex, notify);
-
- OMX_CONFIG_RECTTYPE rect;
- InitOMXParams(&rect);
- rect.nPortIndex = portIndex;
-
- if (mOMX->getConfig(
- mNode,
- (portIndex == kPortIndexOutput ?
- OMX_IndexConfigCommonOutputCrop :
- OMX_IndexConfigCommonInputCrop),
- &rect, sizeof(rect)) != OK) {
- rect.nLeft = 0;
- rect.nTop = 0;
- rect.nWidth = videoDef->nFrameWidth;
- rect.nHeight = videoDef->nFrameHeight;
- }
-
- if (rect.nLeft < 0 ||
- rect.nTop < 0 ||
- rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
- rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
- ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
- rect.nLeft, rect.nTop,
- rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
- videoDef->nFrameWidth, videoDef->nFrameHeight);
- return BAD_VALUE;
- }
-
- notify->setRect(
- "crop",
- rect.nLeft,
- rect.nTop,
- rect.nLeft + rect.nWidth - 1,
- rect.nTop + rect.nHeight - 1);
-
break;
}
@@ -4702,29 +4876,85 @@
return OK;
}
-void ACodec::sendFormatChange(const sp<AMessage> &reply) {
- sp<AMessage> notify = mBaseOutputFormat->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
+void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
+ // aspects are normally communicated in ColorAspects
+ int32_t range, standard, transfer;
+ convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
- if (getPortFormat(kPortIndexOutput, notify) != OK) {
+ // if some aspects are unspecified, use dataspace fields
+ if (range != 0) {
+ range = (dataSpace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
+ }
+ if (standard != 0) {
+ standard = (dataSpace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
+ }
+ if (transfer != 0) {
+ transfer = (dataSpace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
+ }
+
+ mOutputFormat = mOutputFormat->dup(); // trigger an output format changed event
+ if (range != 0) {
+ mOutputFormat->setInt32("color-range", range);
+ }
+ if (standard != 0) {
+ mOutputFormat->setInt32("color-standard", standard);
+ }
+ if (transfer != 0) {
+ mOutputFormat->setInt32("color-transfer", transfer);
+ }
+
+ ALOGD("dataspace changed to %#x (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "(R:%d(%s), S:%d(%s), T:%d(%s))",
+ dataSpace,
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
+void ACodec::onOutputFormatChanged() {
+ // store new output format
+ mOutputFormat = mBaseOutputFormat->dup();
+
+ if (getPortFormat(kPortIndexOutput, mOutputFormat) != OK) {
ALOGE("[%s] Failed to get port format to send format change", mComponentName.c_str());
return;
}
- AString mime;
- CHECK(notify->findString("mime", &mime));
+ if (mTunneled) {
+ sendFormatChange();
+ }
+}
- int32_t left, top, right, bottom;
- if (mime == MEDIA_MIMETYPE_VIDEO_RAW &&
- mNativeWindow != NULL &&
- notify->findRect("crop", &left, &top, &right, &bottom)) {
- // notify renderer of the crop change
+void ACodec::addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify) {
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ if (mime == MEDIA_MIMETYPE_VIDEO_RAW && mNativeWindow != NULL) {
+ // notify renderer of the crop change and dataspace change
// NOTE: native window uses extended right-bottom coordinate
- reply->setRect("crop", left, top, right + 1, bottom + 1);
- } else if (mime == MEDIA_MIMETYPE_AUDIO_RAW &&
- (mEncoderDelay || mEncoderPadding)) {
+ int32_t left, top, right, bottom;
+ if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ notify->setRect("crop", left, top, right + 1, bottom + 1);
+ }
+
+ int32_t dataSpace;
+ if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+ notify->setInt32("dataspace", dataSpace);
+ }
+ }
+}
+
+void ACodec::sendFormatChange() {
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ if (mime == MEDIA_MIMETYPE_AUDIO_RAW && (mEncoderDelay || mEncoderPadding)) {
int32_t channelCount;
- CHECK(notify->findInt32("channel-count", &channelCount));
+ CHECK(mOutputFormat->findInt32("channel-count", &channelCount));
if (mSkipCutBuffer != NULL) {
size_t prevbufsize = mSkipCutBuffer->size();
if (prevbufsize != 0) {
@@ -4734,9 +4964,13 @@
mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
}
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatOutputFormatChanged);
+ notify->setMessage("format", mOutputFormat);
notify->post();
- mSentFormat = true;
+ // mLastOutputFormat is not used when tunneled; doing this just to stay consistent
+ mLastOutputFormat = mOutputFormat;
}
void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
@@ -5037,6 +5271,17 @@
bool ACodec::BaseState::onOMXEvent(
OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
+ if (event == OMX_EventDataSpaceChanged) {
+ ColorAspects aspects;
+ aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
+ aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
+ aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
+ aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+
+ mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
+ return true;
+ }
+
if (event != OMX_EventError) {
ALOGV("[%s] EVENT(%d, 0x%08x, 0x%08x)",
mCodec->mComponentName.c_str(), event, data1, data2);
@@ -5440,9 +5685,11 @@
sp<AMessage> reply =
new AMessage(kWhatOutputBufferDrained, mCodec);
- if (!mCodec->mSentFormat && rangeLength > 0) {
- mCodec->sendFormatChange(reply);
+ if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
+ mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
+ mCodec->sendFormatChange();
}
+
if (mCodec->usingMetadataOnEncoderOutput()) {
native_handle_t *handle = NULL;
VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)info->mData->data();
@@ -5540,6 +5787,13 @@
ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
}
+ int32_t dataSpace;
+ if (msg->findInt32("dataspace", &dataSpace)) {
+ status_t err = native_window_set_buffers_data_space(
+ mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
+ ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
+ }
+
int32_t render;
if (mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
@@ -5663,6 +5917,7 @@
mDeathNotifier.clear();
}
+ mCodec->mUsingNativeWindow = false;
mCodec->mNativeWindow.clear();
mCodec->mNativeWindowUsageBits = 0;
mCodec->mNode = 0;
@@ -6095,6 +6350,17 @@
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
}
+ sp<ABuffer> colorAspectsBuffer;
+ if (mCodec->mInputFormat->findBuffer("android._color-aspects", &colorAspectsBuffer)) {
+ err = mCodec->mOMX->setInternalOption(
+ mCodec->mNode, kPortIndexInput, IOMX::INTERNAL_OPTION_COLOR_ASPECTS,
+ colorAspectsBuffer->base(), colorAspectsBuffer->capacity());
+ if (err != OK) {
+ ALOGE("[%s] Unable to configure color aspects (err %d)",
+ mCodec->mComponentName.c_str(), err);
+ return err;
+ }
+ }
return OK;
}
@@ -6105,9 +6371,17 @@
sp<AMessage> notify = mCodec->mNotify->dup();
notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+ android_dataspace dataSpace;
+ status_t err =
+ mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+ notify->setMessage("input-format", mCodec->mInputFormat);
+ notify->setMessage("output-format", mCodec->mOutputFormat);
+
sp<IGraphicBufferProducer> bufferProducer;
- status_t err = mCodec->mOMX->createInputSurface(
- mCodec->mNode, kPortIndexInput, &bufferProducer, &mCodec->mInputMetadataType);
+ if (err == OK) {
+ err = mCodec->mOMX->createInputSurface(
+ mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer, &mCodec->mInputMetadataType);
+ }
if (err == OK) {
err = setupInputSurface();
@@ -6138,11 +6412,20 @@
CHECK(msg->findObject("input-surface", &obj));
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
- status_t err = mCodec->mOMX->setInputSurface(
- mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
- &mCodec->mInputMetadataType);
+ android_dataspace dataSpace;
+ status_t err =
+ mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+ notify->setMessage("input-format", mCodec->mInputFormat);
+ notify->setMessage("output-format", mCodec->mOutputFormat);
if (err == OK) {
+ err = mCodec->mOMX->setInputSurface(
+ mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
+ &mCodec->mInputMetadataType);
+ }
+
+ if (err == OK) {
+ surface->getBufferConsumer()->setDefaultBufferDataSpace(dataSpace);
err = setupInputSurface();
}
@@ -6684,6 +6967,8 @@
{
CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
+ mCodec->onOutputFormatChanged();
+
if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
mCodec->mMetadataBuffersToSubmit = 0;
CHECK_EQ(mCodec->mOMX->sendCommand(
@@ -6694,15 +6979,8 @@
mCodec->freeOutputBuffersNotOwnedByComponent();
mCodec->changeState(mCodec->mOutputPortSettingsChangedState);
- } else if (data2 == OMX_IndexConfigCommonOutputCrop
- || data2 == OMX_IndexConfigAndroidIntraRefresh) {
- mCodec->mSentFormat = false;
-
- if (mCodec->mTunneled) {
- sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
- mCodec->sendFormatChange(dummy);
- }
- } else {
+ } else if (data2 != OMX_IndexConfigCommonOutputCrop
+ && data2 != OMX_IndexConfigAndroidIntraRefresh) {
ALOGV("[%s] OMX_EventPortSettingsChanged 0x%08x",
mCodec->mComponentName.c_str(), data2);
}
@@ -6829,13 +7107,6 @@
return false;
}
- mCodec->mSentFormat = false;
-
- if (mCodec->mTunneled) {
- sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
- mCodec->sendFormatChange(dummy);
- }
-
ALOGV("[%s] Output port now reenabled.", mCodec->mComponentName.c_str());
if (mCodec->mExecutingState->active()) {
@@ -6894,7 +7165,7 @@
ALOGV("[%s] Now Executing->Idle", mCodec->mComponentName.c_str());
mComponentNowIdle = false;
- mCodec->mSentFormat = false;
+ mCodec->mLastOutputFormat.clear();
}
bool ACodec::ExecutingToIdleState::onOMXEvent(
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 39f73c3..59f839c 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -774,10 +774,10 @@
// apply encoder color format if specified
if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
- ALOGV("Using encoder format: %#x", mEncoderFormat);
+ ALOGI("Using encoder format: %#x", mEncoderFormat);
}
if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
- ALOGV("Using encoder data space: %#x", mEncoderDataSpace);
+ ALOGI("Using encoder data space: %#x", mEncoderDataSpace);
}
}
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index d5a869d..322eab9 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -34,6 +34,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
@@ -2083,6 +2084,21 @@
break;
}
+ case FOURCC('c', 'o', 'l', 'r'):
+ {
+ *offset += chunk_size;
+ // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
+ // ignore otherwise
+ if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+ status_t err = parseColorInfo(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ break;
+ }
+
case FOURCC('t', 'i', 't', 'l'):
case FOURCC('p', 'e', 'r', 'f'):
case FOURCC('a', 'u', 't', 'h'):
@@ -2663,6 +2679,49 @@
return OK;
}
+status_t MPEG4Extractor::parseColorInfo(off64_t offset, size_t size) {
+ if (size < 4 || size == SIZE_MAX || mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(offset, buffer, size) != (ssize_t)size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ int32_t type = U32_AT(&buffer[0]);
+ if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
+ || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+ int32_t primaries = U16_AT(&buffer[4]);
+ int32_t transfer = U16_AT(&buffer[6]);
+ int32_t coeffs = U16_AT(&buffer[8]);
+ bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+
+ ColorAspects aspects;
+ ColorUtils::convertIsoColorAspectsToCodecAspects(
+ primaries, transfer, coeffs, fullRange, aspects);
+
+ // only store the first color specification
+ if (!mLastTrack->meta->hasData(kKeyColorPrimaries)) {
+ mLastTrack->meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+ mLastTrack->meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
+ mLastTrack->meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+ mLastTrack->meta->setInt32(kKeyColorRange, aspects.mRange);
+ }
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ return OK;
+}
+
status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
if (size < 4 || size == SIZE_MAX) {
return ERROR_MALFORMED;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index ef0e17f..7c03886 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -30,6 +30,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
@@ -371,6 +372,7 @@
void writeVmhdBox();
void writeHdlrBox();
void writeTkhdBox(uint32_t now);
+ void writeColrBox();
void writeMp4aEsdsBox();
void writeMp4vEsdsBox();
void writeAudioFourCCBox();
@@ -2353,6 +2355,7 @@
if (buffer->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecConfig)
&& isCodecConfig) {
CHECK(!mGotAllCodecSpecificData);
+ mMeta = mSource->getFormat(); // get output format after format change
if (mIsAvc) {
status_t err = makeAVCCodecSpecificData(
@@ -2960,9 +2963,32 @@
}
writePaspBox();
+ writeColrBox();
mOwner->endBox(); // mp4v, s263 or avc1
}
+void MPEG4Writer::Track::writeColrBox() {
+ ColorAspects aspects;
+ memset(&aspects, 0, sizeof(aspects));
+ // TRICKY: using | instead of || because we want to execute all findInt32-s
+ if (mMeta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries)
+ | mMeta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer)
+ | mMeta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs)
+ | mMeta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange)) {
+ int32_t primaries, transfer, coeffs;
+ bool fullRange;
+ ColorUtils::convertCodecColorAspectsToIsoAspects(
+ aspects, &primaries, &transfer, &coeffs, &fullRange);
+ mOwner->beginBox("colr");
+ mOwner->writeFourcc("nclx");
+ mOwner->writeInt16(primaries);
+ mOwner->writeInt16(transfer);
+ mOwner->writeInt16(coeffs);
+ mOwner->writeInt8(fullRange ? 128 : 0);
+ mOwner->endBox(); // colr
+ }
+}
+
void MPEG4Writer::Track::writeAudioFourCCBox() {
const char *mime;
bool success = mMeta->findCString(kKeyMIMEType, &mime);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index fbdf56f..e8cd58a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -399,9 +399,11 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type =
+ secureCodec ? MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -468,13 +470,14 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = (mFlags & kFlagIsSecure) ?
- kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+ MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -553,7 +556,8 @@
return size;
}
-void MediaCodec::addResource(const String8 &type, const String8 &subtype, uint64_t value) {
+void MediaCodec::addResource(
+ MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
Vector<MediaResource> resources;
resources.push_back(MediaResource(type, subtype, value));
mResourceManagerService->addResource(
@@ -565,13 +569,14 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = (mFlags & kFlagIsSecure) ?
- kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+ MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -1228,18 +1233,18 @@
mFlags &= ~kFlagUsesSoftwareRenderer;
}
- String8 resourceType;
+ MediaResource::Type resourceType;
if (mComponentName.endsWith(".secure")) {
mFlags |= kFlagIsSecure;
- resourceType = String8(kResourceSecureCodec);
+ resourceType = MediaResource::kSecureCodec;
} else {
mFlags &= ~kFlagIsSecure;
- resourceType = String8(kResourceNonSecureCodec);
+ resourceType = MediaResource::kNonSecureCodec;
}
if (mIsVideo) {
// audio codec is currently ignored.
- addResource(resourceType, String8(kResourceVideoCodec), 1);
+ addResource(resourceType, MediaResource::kVideoCodec, 1);
}
(new AMessage)->postReply(mReplyID);
@@ -1261,7 +1266,10 @@
CHECK(msg->findMessage("input-format", &mInputFormat));
CHECK(msg->findMessage("output-format", &mOutputFormat));
-
+ ALOGV("[%s] configured as input format: %s, output format: %s",
+ mComponentName.c_str(),
+ mInputFormat->debugString(4).c_str(),
+ mOutputFormat->debugString(4).c_str());
int32_t usingSwRenderer;
if (mOutputFormat->findInt32("using-sw-renderer", &usingSwRenderer)
&& usingSwRenderer) {
@@ -1280,6 +1288,12 @@
if (!msg->findInt32("err", &err)) {
sp<RefBase> obj;
msg->findObject("input-surface", &obj);
+ CHECK(msg->findMessage("input-format", &mInputFormat));
+ CHECK(msg->findMessage("output-format", &mOutputFormat));
+ ALOGV("[%s] input surface created as input format: %s, output format: %s",
+ mComponentName.c_str(),
+ mInputFormat->debugString(4).c_str(),
+ mOutputFormat->debugString(4).c_str());
CHECK(obj != NULL);
response->setObject("input-surface", obj);
mHaveInputSurface = true;
@@ -1376,10 +1390,9 @@
// allocating input buffers, so this is a good
// indication that now all buffers are allocated.
if (mIsVideo) {
- String8 subtype;
addResource(
- String8(kResourceGraphicMemory),
- subtype,
+ MediaResource::kGraphicMemory,
+ MediaResource::kUnspecifiedSubType,
getGraphicBufferSize());
}
setState(STARTED);
@@ -1394,21 +1407,34 @@
case CodecBase::kWhatOutputFormatChanged:
{
- ALOGV("codec output format changed");
+ CHECK(msg->findMessage("format", &mOutputFormat));
+
+ ALOGV("[%s] output format changed to: %s",
+ mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
if (mSoftRenderer == NULL &&
mSurface != NULL &&
(mFlags & kFlagUsesSoftwareRenderer)) {
AString mime;
- CHECK(msg->findString("mime", &mime));
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ // TODO: propagate color aspects to software renderer to allow better
+ // color conversion to RGB. For now, just mark dataspace for YUV
+ // rendering.
+ int32_t dataSpace;
+ if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+ ALOGD("[%s] setting dataspace on output surface to #%x",
+ mComponentName.c_str(), dataSpace);
+ int err = native_window_set_buffers_data_space(
+ mSurface.get(), (android_dataspace)dataSpace);
+ ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+ }
if (mime.startsWithIgnoreCase("video/")) {
mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
}
}
- mOutputFormat = msg;
-
if (mFlags & kFlagIsEncoder) {
// Before we announce the format change we should
// collect codec specific data and amend the output
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 5f9a1c0..b5f7b12 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -40,8 +40,9 @@
namespace android {
-const int kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
-const int kDefaultSwVideoEncoderDataSpace = HAL_DATASPACE_BT709;
+const int32_t kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+const int32_t kDefaultHwVideoEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+const int32_t kDefaultVideoEncoderDataSpace = HAL_DATASPACE_BT709;
const int kStopTimeoutUs = 300000; // allow 1 sec for shutting down encoder
@@ -515,13 +516,19 @@
sp<AMessage> inputFormat;
int32_t usingSwReadOften;
mSetEncoderFormat = false;
- if (mEncoder->getInputFormat(&inputFormat) == OK
- && inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
- && usingSwReadOften) {
- // this is a SW encoder; signal source to allocate SW readable buffers
+ if (mEncoder->getInputFormat(&inputFormat) == OK) {
mSetEncoderFormat = true;
- mEncoderFormat = kDefaultSwVideoEncoderFormat;
- mEncoderDataSpace = kDefaultSwVideoEncoderDataSpace;
+ if (inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
+ && usingSwReadOften) {
+ // this is a SW encoder; signal source to allocate SW readable buffers
+ mEncoderFormat = kDefaultSwVideoEncoderFormat;
+ } else {
+ mEncoderFormat = kDefaultHwVideoEncoderFormat;
+ }
+ if (!inputFormat->findInt32("android._dataspace", &mEncoderDataSpace)) {
+ mEncoderDataSpace = kDefaultVideoEncoderDataSpace;
+ }
+ ALOGV("setting dataspace %#x, format %#x", mEncoderDataSpace, mEncoderFormat);
}
err = mEncoder->start();
@@ -774,6 +781,13 @@
mAvailEncoderInputIndices.push_back(index);
feedEncoderInputBuffers();
+ } else if (cbID == MediaCodec::CB_OUTPUT_FORMAT_CHANGED) {
+ status_t err = mEncoder->getOutputFormat(&mOutputFormat);
+ if (err != OK) {
+ signalEOS(err);
+ break;
+ }
+ convertMessageToMetaData(mOutputFormat, mMeta);
} else if (cbID == MediaCodec::CB_OUTPUT_AVAILABLE) {
int32_t index;
size_t offset;
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 8e72405..a523d0e 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -112,7 +112,7 @@
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
virtual status_t createPersistentInputSurface(
@@ -388,10 +388,10 @@
}
status_t MuxOMX::createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
status_t err = getOMX(node)->createInputSurface(
- node, port_index, bufferProducer, type);
+ node, port_index, dataSpace, bufferProducer, type);
return err;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 45fb785..7027780 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -215,6 +215,7 @@
if (err != OK) {
ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
decoder->release();
+ source->stop();
return NULL;
}
@@ -223,6 +224,7 @@
if (err != OK) {
ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
decoder->release();
+ source->stop();
return NULL;
}
@@ -328,7 +330,6 @@
if (err != OK || size <= 0 || outputFormat == NULL) {
ALOGE("Failed to decode thumbnail frame");
source->stop();
- decoder->stop();
decoder->release();
return NULL;
}
@@ -401,7 +402,6 @@
videoFrameBuffer.clear();
source->stop();
decoder->releaseOutputBuffer(index);
- decoder->stop();
decoder->release();
if (err != OK) {
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 3fcca07..6ec8c41 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -208,6 +208,7 @@
mEnableAltRef = DEFAULT_ENABLE_ALT_REF;
mEncSpeed = DEFAULT_ENC_SPEED;
mIntra4x4 = DEFAULT_INTRA4x4;
+ mConstrainedIntraFlag = DEFAULT_CONSTRAINED_INTRA;
mAIRMode = DEFAULT_AIR;
mAIRRefreshPeriod = DEFAULT_AIR_REFRESH_PERIOD;
mPSNREnable = DEFAULT_PSNR_ENABLE;
@@ -305,6 +306,7 @@
s_ipe_params_ip.u4_enable_intra_4x4 = mIntra4x4;
s_ipe_params_ip.u4_enc_speed_preset = mEncSpeed;
+ s_ipe_params_ip.u4_constrained_intra_pred = mConstrainedIntraFlag;
s_ipe_params_ip.u4_timestamp_high = -1;
s_ipe_params_ip.u4_timestamp_low = -1;
@@ -1018,6 +1020,7 @@
}
mIInterval = avcType->nPFrames + avcType->nBFrames;
+ mConstrainedIntraFlag = avcType->bconstIpred;
if (OMX_VIDEO_AVCLoopFilterDisable == avcType->eLoopFilterMode)
mDisableDeblkLevel = 4;
@@ -1027,7 +1030,6 @@
|| avcType->nRefIdx10ActiveMinus1 != 0
|| avcType->nRefIdx11ActiveMinus1 != 0
|| avcType->bWeightedPPrediction != OMX_FALSE
- || avcType->bconstIpred != OMX_FALSE
|| avcType->bDirect8x8Inference != OMX_FALSE
|| avcType->bDirectSpatialTemporal != OMX_FALSE
|| avcType->nCabacInitIdc != 0) {
@@ -1062,7 +1064,8 @@
return OMX_ErrorUndefined;
}
- intraRefreshParams->nRefreshPeriod = mAIRRefreshPeriod;
+ intraRefreshParams->nRefreshPeriod =
+ (mAIRMode == IVE_AIR_MODE_NONE) ? 0 : mAIRRefreshPeriod;
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 232c6e0..cf6f899 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -95,8 +95,7 @@
#define DEFAULT_SOC SOC_GENERIC
#define DEFAULT_INTRA4x4 0
#define STRLENGTH 500
-
-
+#define DEFAULT_CONSTRAINED_INTRA 0
#define MIN(a, b) ((a) < (b))? (a) : (b)
#define MAX(a, b) ((a) > (b))? (a) : (b)
@@ -182,6 +181,7 @@
bool mReconEnable;
bool mPSNREnable;
bool mEntropyMode;
+ bool mConstrainedIntraFlag;
IVE_SPEED_CONFIG mEncSpeed;
uint8_t *mConversionBuffers[MAX_CONVERSION_BUFFERS];
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index e92c192..bbc4d26 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -363,6 +363,16 @@
}
}
+ // TODO: propagate color aspects to software renderer to allow better
+ // color conversion to RGB. For now, just mark dataspace for YUV rendering.
+ android_dataspace dataSpace;
+ if (format->findInt32("android._dataspace", (int32_t *)&dataSpace) && dataSpace != mDataSpace) {
+ ALOGD("setting dataspace on output surface to #%x", dataSpace);
+ if ((err = native_window_set_buffers_data_space(mNativeWindow.get(), dataSpace))) {
+ ALOGW("failed to set dataspace on surface (%d)", err);
+ }
+ mDataSpace = dataSpace;
+ }
if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf, -1)) != 0) {
ALOGW("Surface::queueBuffer returned error %d", err);
} else {
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index 2b86b0e..99031ca 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -29,6 +29,7 @@
typedef ColorAspects CA;
typedef ColorUtils CU;
+const static
ALookup<CU::ColorRange, CA::Range> sRanges{
{
{ CU::kColorRangeLimited, CA::RangeLimited },
@@ -37,6 +38,7 @@
}
};
+const static
ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandards {
{
{ CU::kColorStandardUnspecified, { CA::PrimariesUnspecified, CA::MatrixUnspecified } },
@@ -56,6 +58,7 @@
}
};
+const static
ALookup<CU::ColorTransfer, CA::Transfer> sTransfers{
{
{ CU::kColorTransferUnspecified, CA::TransferUnspecified },
@@ -243,31 +246,95 @@
}
}
+const static
+ALookup<int32_t, ColorAspects::Primaries> sIsoPrimaries {
+ {
+ { 1, ColorAspects::PrimariesBT709_5 },
+ { 2, ColorAspects::PrimariesUnspecified },
+ { 4, ColorAspects::PrimariesBT470_6M },
+ { 5, ColorAspects::PrimariesBT601_6_625 },
+ { 6, ColorAspects::PrimariesBT601_6_525 /* main */},
+ { 7, ColorAspects::PrimariesBT601_6_525 },
+ // -- ITU T.832 201201 ends here
+ { 8, ColorAspects::PrimariesGenericFilm },
+ { 9, ColorAspects::PrimariesBT2020 },
+ { 10, ColorAspects::PrimariesOther /* XYZ */ },
+ }
+};
+
+const static
+ALookup<int32_t, ColorAspects::Transfer> sIsoTransfers {
+ {
+ { 1, ColorAspects::TransferSMPTE170M /* main */},
+ { 2, ColorAspects::TransferUnspecified },
+ { 4, ColorAspects::TransferGamma22 },
+ { 5, ColorAspects::TransferGamma28 },
+ { 6, ColorAspects::TransferSMPTE170M },
+ { 7, ColorAspects::TransferSMPTE240M },
+ { 8, ColorAspects::TransferLinear },
+ { 9, ColorAspects::TransferOther /* log 100:1 */ },
+ { 10, ColorAspects::TransferOther /* log 316:1 */ },
+ { 11, ColorAspects::TransferXvYCC },
+ { 12, ColorAspects::TransferBT1361 },
+ { 13, ColorAspects::TransferSRGB },
+ // -- ITU T.832 201201 ends here
+ { 14, ColorAspects::TransferSMPTE170M },
+ { 15, ColorAspects::TransferSMPTE170M },
+ { 16, ColorAspects::TransferST2084 },
+ { 17, ColorAspects::TransferST428 },
+ }
+};
+
+const static
+ALookup<int32_t, ColorAspects::MatrixCoeffs> sIsoMatrixCoeffs {
+ {
+ { 0, ColorAspects::MatrixOther },
+ { 1, ColorAspects::MatrixBT709_5 },
+ { 2, ColorAspects::MatrixUnspecified },
+ { 4, ColorAspects::MatrixBT470_6M },
+ { 6, ColorAspects::MatrixBT601_6 /* main */ },
+ { 5, ColorAspects::MatrixBT601_6 },
+ { 7, ColorAspects::MatrixSMPTE240M },
+ { 8, ColorAspects::MatrixOther /* YCgCo */ },
+ // -- ITU T.832 201201 ends here
+ { 9, ColorAspects::MatrixBT2020 },
+ { 10, ColorAspects::MatrixBT2020Constant },
+ }
+};
+
// static
-void ColorUtils::setDefaultPlatformColorAspectsIfNeeded(
- int32_t &range, int32_t &standard, int32_t &transfer,
- int32_t width, int32_t height) {
- if (range == ColorUtils::kColorRangeUnspecified) {
- range = ColorUtils::kColorRangeLimited;
+void ColorUtils::convertCodecColorAspectsToIsoAspects(
+ const ColorAspects &aspects,
+ int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange) {
+ if (aspects.mPrimaries == ColorAspects::PrimariesOther ||
+ !sIsoPrimaries.map(aspects.mPrimaries, primaries)) {
+ CHECK(sIsoPrimaries.map(ColorAspects::PrimariesUnspecified, primaries));
}
+ if (aspects.mTransfer == ColorAspects::TransferOther ||
+ !sIsoTransfers.map(aspects.mTransfer, transfer)) {
+ CHECK(sIsoTransfers.map(ColorAspects::TransferUnspecified, transfer));
+ }
+ if (aspects.mMatrixCoeffs == ColorAspects::MatrixOther ||
+ !sIsoMatrixCoeffs.map(aspects.mMatrixCoeffs, coeffs)) {
+ CHECK(sIsoMatrixCoeffs.map(ColorAspects::MatrixUnspecified, coeffs));
+ }
+ *fullRange = aspects.mRange == ColorAspects::RangeFull;
+}
- if (standard == ColorUtils::kColorStandardUnspecified) {
- // Default to BT2020, BT709 or BT601 based on size. Allow 2.35:1 aspect ratio. Limit BT601
- // to PAL or smaller, BT2020 to 4K or larger, leaving BT709 for all resolutions in between.
- if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
- standard = ColorUtils::kColorStandardBT2020;
- } else if ((width <= 720 && height > 480) || (height <= 720 && width > 480)) {
- standard = ColorUtils::kColorStandardBT601_625;
- } else if ((width <= 720 && height <= 480) || (height <= 720 && width <= 480)) {
- standard = ColorUtils::kColorStandardBT601_525;
- } else {
- standard = ColorUtils::kColorStandardBT709;
- }
+// static
+void ColorUtils::convertIsoColorAspectsToCodecAspects(
+ int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+ ColorAspects &aspects) {
+ if (!sIsoPrimaries.map(primaries, &aspects.mPrimaries)) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
}
-
- if (transfer == ColorUtils::kColorTransferUnspecified) {
- transfer = ColorUtils::kColorTransferSMPTE_170M;
+ if (!sIsoTransfers.map(transfer, &aspects.mTransfer)) {
+ aspects.mTransfer = ColorAspects::TransferUnspecified;
}
+ if (!sIsoMatrixCoeffs.map(coeffs, &aspects.mMatrixCoeffs)) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ }
+ aspects.mRange = fullRange ? ColorAspects::RangeFull : ColorAspects::RangeLimited;
}
// static
@@ -308,5 +375,208 @@
}
}
+// TODO: move this into a Video HAL
+ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandardFallbacks {
+ {
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT601_6 } },
+ { CU::kColorStandardBT709, { CA::PrimariesBT709_5, CA::MatrixSMPTE240M } },
+ { CU::kColorStandardBT709, { CA::PrimariesBT709_5, CA::MatrixBT2020 } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT709_5, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT2020Constant,
+ { CA::PrimariesBT470_6M, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT601_6_625, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_625, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT2020Constant,
+ { CA::PrimariesGenericFilm, CA::MatrixBT2020Constant } },
+ }
+};
+
+ALookup<CU::ColorStandard, CA::Primaries> sStandardPrimariesFallbacks {
+ {
+ { CU::kColorStandardFilm, CA::PrimariesGenericFilm },
+ { CU::kColorStandardBT470M, CA::PrimariesBT470_6M },
+ { CU::kColorStandardBT2020, CA::PrimariesBT2020 },
+ { CU::kColorStandardBT601_525_Unadjusted, CA::PrimariesBT601_6_525 },
+ { CU::kColorStandardBT601_625_Unadjusted, CA::PrimariesBT601_6_625 },
+ }
+};
+
+static ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
+ {
+ { HAL_DATASPACE_SRGB, HAL_DATASPACE_V0_SRGB },
+ { HAL_DATASPACE_BT709, HAL_DATASPACE_V0_BT709 },
+ { HAL_DATASPACE_SRGB_LINEAR, HAL_DATASPACE_V0_SRGB_LINEAR },
+ { HAL_DATASPACE_BT601_525, HAL_DATASPACE_V0_BT601_525 },
+ { HAL_DATASPACE_BT601_625, HAL_DATASPACE_V0_BT601_625 },
+ { HAL_DATASPACE_JFIF, HAL_DATASPACE_V0_JFIF },
+ }
+};
+
+bool ColorUtils::convertDataSpaceToV0(android_dataspace &dataSpace) {
+ (void)sLegacyDataSpaceToV0.lookup(dataSpace, &dataSpace);
+ return (dataSpace & 0xC000FFFF) == 0;
+}
+
+bool ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects) {
+ // remove changed aspects (change them to Unspecified)
+ bool changed = false;
+ if (aspects.mRange && aspects.mRange != orig.mRange) {
+ aspects.mRange = ColorAspects::RangeUnspecified;
+ changed = true;
+ }
+ if (aspects.mPrimaries && aspects.mPrimaries != orig.mPrimaries) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ if (usePlatformAspects) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ }
+ changed = true;
+ }
+ if (aspects.mMatrixCoeffs && aspects.mMatrixCoeffs != orig.mMatrixCoeffs) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ if (usePlatformAspects) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ }
+ changed = true;
+ }
+ if (aspects.mTransfer && aspects.mTransfer != orig.mTransfer) {
+ aspects.mTransfer = ColorAspects::TransferUnspecified;
+ changed = true;
+ }
+ return changed;
+}
+
+// static
+android_dataspace ColorUtils::getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand) {
+ // This platform implementation never expands color space (e.g. returns an expanded
+ // dataspace to use where the codec does in-the-background color space conversion)
+ mayExpand = false;
+
+ if (aspects.mRange == ColorAspects::RangeUnspecified
+ || aspects.mPrimaries == ColorAspects::PrimariesUnspecified
+ || aspects.mMatrixCoeffs == ColorAspects::MatrixUnspecified
+ || aspects.mTransfer == ColorAspects::TransferUnspecified) {
+ ALOGW("expected specified color aspects (%u:%u:%u:%u)",
+ aspects.mRange, aspects.mPrimaries, aspects.mMatrixCoeffs, aspects.mTransfer);
+ }
+
+ // default to video range and transfer
+ ColorRange range = kColorRangeLimited;
+ ColorTransfer transfer = kColorTransferSMPTE_170M;
+ (void)sRanges.map(aspects.mRange, &range);
+ (void)sTransfers.map(aspects.mTransfer, &transfer);
+
+ ColorStandard standard = kColorStandardBT709;
+ auto pair = std::make_pair(aspects.mPrimaries, aspects.mMatrixCoeffs);
+ if (!sStandards.map(pair, &standard)) {
+ if (!sStandardFallbacks.map(pair, &standard)) {
+ (void)sStandardPrimariesFallbacks.map(aspects.mPrimaries, &standard);
+
+ if (aspects.mMatrixCoeffs == ColorAspects::MatrixBT2020Constant) {
+ range = kColorRangeFull;
+ }
+ }
+ }
+
+ android_dataspace dataSpace = (android_dataspace)(
+ (range << HAL_DATASPACE_RANGE_SHIFT) | (standard << HAL_DATASPACE_STANDARD_SHIFT) |
+ (transfer << HAL_DATASPACE_TRANSFER_SHIFT));
+ (void)sLegacyDataSpaceToV0.rlookup(dataSpace, &dataSpace);
+
+ if (!mayExpand) {
+ // update codec aspects based on dataspace
+ convertPlatformColorAspectsToCodecAspects(range, standard, transfer, aspects);
+ }
+ return dataSpace;
+}
+
+// static
+void ColorUtils::getColorConfigFromFormat(
+ const sp<AMessage> &format, int32_t *range, int32_t *standard, int32_t *transfer) {
+ if (!format->findInt32("color-range", range)) {
+ *range = kColorRangeUnspecified;
+ }
+ if (!format->findInt32("color-standard", standard)) {
+ *standard = kColorStandardUnspecified;
+ }
+ if (!format->findInt32("color-transfer", transfer)) {
+ *transfer = kColorTransferUnspecified;
+ }
+}
+
+// static
+void ColorUtils::copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target) {
+ // 0 values are unspecified
+ int32_t value;
+ if (source->findInt32("color-range", &value)) {
+ target->setInt32("color-range", value);
+ }
+ if (source->findInt32("color-standard", &value)) {
+ target->setInt32("color-standard", value);
+ }
+ if (source->findInt32("color-transfer", &value)) {
+ target->setInt32("color-transfer", value);
+ }
+}
+
+// static
+void ColorUtils::getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects) {
+ int32_t range, standard, transfer;
+ getColorConfigFromFormat(format, &range, &standard, &transfer);
+
+ if (convertPlatformColorAspectsToCodecAspects(
+ range, standard, transfer, aspects) != OK) {
+ ALOGW("Ignoring illegal color aspects(R:%d(%s), S:%d(%s), T:%d(%s))",
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+ // Invalid values were converted to unspecified !params!, but otherwise were not changed
+ // For encoders, we leave these as is. For decoders, we will use default values.
+ }
+ ALOGV("Got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "from format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
+// static
+void ColorUtils::setColorAspectsIntoFormat(
+ const ColorAspects &aspects, sp<AMessage> &format, bool force) {
+ int32_t range = 0, standard = 0, transfer = 0;
+ convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
+ // save set values to base output format
+ // (encoder input format will read back actually supported values by the codec)
+ if (range != 0 || force) {
+ format->setInt32("color-range", range);
+ }
+ if (standard != 0 || force) {
+ format->setInt32("color-standard", standard);
+ }
+ if (transfer != 0 || force) {
+ format->setInt32("color-transfer", transfer);
+ }
+ ALOGV("Setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "into format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
} // namespace android
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 3e8fb7c..18b14e1 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -110,6 +110,7 @@
status_t readMetaData();
status_t parseChunk(off64_t *offset, int depth);
status_t parseITunesMetaData(off64_t offset, size_t size);
+ status_t parseColorInfo(off64_t offset, size_t size);
status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
void parseID3v2MetaData(off64_t offset);
status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index c715939..9726741 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -94,7 +94,7 @@
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index b1cb91d..25c3773 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -80,7 +80,8 @@
OMX::buffer_id buffer);
status_t createInputSurface(
- OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer,
+ OMX_U32 portIndex, android_dataspace dataSpace,
+ sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type);
static status_t createPersistentInputSurface(
@@ -93,6 +94,8 @@
status_t signalEndOfInputStream();
+ void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
+
status_t allocateSecureBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data, native_handle_t **native_handle);
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 757b308..258511a 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -54,6 +54,7 @@
int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
+ android_dataspace mDataSpace;
FrameRenderTracker mRenderTracker;
SoftwareRenderer(const SoftwareRenderer &);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index acdc4b0..995e50e 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -20,12 +20,16 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
+
#include "GraphicBufferSource.h"
+#include "OMXUtils.h"
#include <OMX_Core.h>
#include <OMX_IndexExt.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
@@ -39,6 +43,8 @@
static const bool EXTRA_CHECK = true;
+static const OMX_U32 kPortIndexInput = 0;
+
GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
const wp<IGraphicBufferConsumer> &consumer,
const wp<ConsumerListener>& consumerListener) :
@@ -117,6 +123,7 @@
mNodeInstance(nodeInstance),
mExecuting(false),
mSuspended(false),
+ mLastDataSpace(HAL_DATASPACE_UNKNOWN),
mIsPersistent(false),
mConsumer(consumer),
mNumFramesAvailable(0),
@@ -189,6 +196,8 @@
return;
}
+ memset(&mColorAspects, 0, sizeof(mColorAspects));
+
CHECK(mInitCheck == NO_ERROR);
}
@@ -215,6 +224,8 @@
mNumFramesAvailable, mCodecBuffers.size());
CHECK(!mExecuting);
mExecuting = true;
+ mLastDataSpace = HAL_DATASPACE_UNKNOWN;
+ ALOGV("clearing last dataSpace");
// Start by loading up as many buffers as possible. We want to do this,
// rather than just submit the first buffer, to avoid a degenerate case:
@@ -495,6 +506,76 @@
}
}
+void GraphicBufferSource::onDataSpaceChanged_l(
+ android_dataspace dataSpace, android_pixel_format pixelFormat) {
+ ALOGD("got buffer with new dataSpace #%x", dataSpace);
+ mLastDataSpace = dataSpace;
+
+ if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
+ ColorAspects aspects = mColorAspects; // initially requested aspects
+
+ // request color aspects to encode
+ OMX_INDEXTYPE index;
+ status_t err = mNodeInstance->getExtensionIndex(
+ "OMX.google.android.index.describeColorAspects", &index);
+ if (err == OK) {
+ // V0 dataspace
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ params.nDataSpace = mLastDataSpace;
+ params.nPixelFormat = pixelFormat;
+ params.bDataSpaceChanged = OMX_TRUE;
+ params.sAspects = mColorAspects;
+
+ err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ aspects = params.sAspects;
+ ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+ } else {
+ params.sAspects = aspects;
+ err = OK;
+ }
+ params.bDataSpaceChanged = OMX_FALSE;
+ for (int triesLeft = 2; --triesLeft >= 0; ) {
+ status_t err = mNodeInstance->setConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
+ }
+ if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ params.sAspects, aspects)) {
+ // if we can't set or get color aspects, still communicate dataspace to client
+ break;
+ }
+
+ ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+ }
+ }
+
+ ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ err, asString(err));
+
+ // signal client that the dataspace has changed; this will update the output format
+ // TODO: we should tie this to an output buffer somehow, and signal the change
+ // just before the output buffer is returned to the client, but there are many
+ // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+ mNodeInstance->signalEvent(
+ OMX_EventDataSpaceChanged, dataSpace,
+ (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+ | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+ }
+}
+
bool GraphicBufferSource::fillCodecBuffer_l() {
CHECK(mExecuting && mNumFramesAvailable > 0);
@@ -534,6 +615,12 @@
mBufferSlot[item.mSlot] = item.mGraphicBuffer;
}
+ if (item.mDataSpace != mLastDataSpace) {
+ onDataSpaceChanged_l(
+ item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
+ }
+
+
err = UNKNOWN_ERROR;
// only submit sample if start time is unspecified, or sample
@@ -925,6 +1012,13 @@
ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
}
+void GraphicBufferSource::setDefaultDataSpace(android_dataspace dataSpace) {
+ // no need for mutex as we are not yet running
+ ALOGD("setting dataspace: %#x", dataSpace);
+ mConsumer->setDefaultBufferDataSpace(dataSpace);
+ mLastDataSpace = dataSpace;
+}
+
status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
int64_t repeatAfterUs) {
Mutex::Autolock autoLock(mMutex);
@@ -974,19 +1068,29 @@
(skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
}
-status_t GraphicBufferSource::setTimeLapseUs(int64_t* data) {
+status_t GraphicBufferSource::setTimeLapseConfig(const TimeLapseConfig &config) {
Mutex::Autolock autoLock(mMutex);
- if (mExecuting || data[0] <= 0ll || data[1] <= 0ll) {
+ if (mExecuting || config.mTimePerFrameUs <= 0ll || config.mTimePerCaptureUs <= 0ll) {
return INVALID_OPERATION;
}
- mTimePerFrameUs = data[0];
- mTimePerCaptureUs = data[1];
+ mTimePerFrameUs = config.mTimePerFrameUs;
+ mTimePerCaptureUs = config.mTimePerCaptureUs;
return OK;
}
+void GraphicBufferSource::setColorAspects(const ColorAspects &aspects) {
+ Mutex::Autolock autoLock(mMutex);
+ mColorAspects = aspects;
+ ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer));
+}
+
void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatRepeatLastFrame:
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 7150684..c8b0e62 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -23,6 +23,7 @@
#include <utils/RefBase.h>
#include <OMX_Core.h>
+#include <VideoAPI.h>
#include "../include/OMXNodeInstance.h"
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
@@ -73,6 +74,9 @@
return mProducer;
}
+ // Sets the default buffer data space
+ void setDefaultDataSpace(android_dataspace dataSpace);
+
// This is called when OMX transitions to OMX_StateExecuting, which means
// we can start handing it buffers. If we already have buffers of data
// sitting in the BufferQueue, this will send them to the codec.
@@ -130,17 +134,23 @@
// When set, the max frame rate fed to the encoder will be capped at maxFps.
status_t setMaxFps(float maxFps);
+ struct TimeLapseConfig {
+ int64_t mTimePerFrameUs; // the time (us) between two frames for playback
+ int64_t mTimePerCaptureUs; // the time (us) between two frames for capture
+ };
+
// Sets the time lapse (or slow motion) parameters.
- // data[0] is the time (us) between two frames for playback
- // data[1] is the time (us) between two frames for capture
// When set, the sample's timestamp will be modified to playback framerate,
// and capture timestamp will be modified to capture rate.
- status_t setTimeLapseUs(int64_t* data);
+ status_t setTimeLapseConfig(const TimeLapseConfig &config);
// Sets the start time us (in system time), samples before which should
// be dropped and not submitted to encoder
void setSkipFramesBeforeUs(int64_t startTimeUs);
+ // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
+ void setColorAspects(const ColorAspects &aspects);
+
protected:
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
@@ -238,6 +248,9 @@
bool repeatLatestBuffer_l();
int64_t getTimestamp(const BufferItem &item);
+ // called when the data space of the input buffer changes
+ void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
+
// Lock, covers all member variables.
mutable Mutex mMutex;
@@ -252,6 +265,9 @@
bool mSuspended;
+ // Last dataspace seen
+ android_dataspace mLastDataSpace;
+
// Our BufferQueue interfaces. mProducer is passed to the producer through
// getIGraphicBufferProducer, and mConsumer is used internally to retrieve
// the buffers queued by the producer.
@@ -321,6 +337,7 @@
int64_t mPrevFrameUs;
MetadataBufferType mMetadataBufferType;
+ ColorAspects mColorAspects;
void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 970f6f5..759648b 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -484,7 +484,7 @@
}
status_t OMX::createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
OMXNodeInstance *instance = findInstance(node);
@@ -493,7 +493,7 @@
}
return instance->createInputSurface(
- port_index, bufferProducer, type);
+ port_index, dataSpace, bufferProducer, type);
}
status_t OMX::createPersistentInputSurface(
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 82d5ba3..6b7a871 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -933,7 +933,8 @@
}
status_t OMXNodeInstance::createInputSurface(
- OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
+ OMX_U32 portIndex, android_dataspace dataSpace,
+ sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
if (bufferProducer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -946,6 +947,8 @@
return err;
}
+ mGraphicBufferSource->setDefaultDataSpace(dataSpace);
+
*bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
return OK;
}
@@ -988,6 +991,10 @@
return createGraphicBufferSource(portIndex, bufferConsumer, type);
}
+void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
+ mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
+}
+
status_t OMXNodeInstance::signalEndOfInputStream() {
// For non-Surface input, the MediaCodec should convert the call to a
// pair of requests (dequeue input buffer, queue input buffer with EOS
@@ -1359,6 +1366,16 @@
}
}
+template<typename T>
+static bool getInternalOption(
+ const void *data, size_t size, T *out) {
+ if (size != sizeof(T)) {
+ return false;
+ }
+ *out = *(T*)data;
+ return true;
+}
+
status_t OMXNodeInstance::setInternalOption(
OMX_U32 portIndex,
IOMX::InternalOptionType type,
@@ -1373,6 +1390,7 @@
case IOMX::INTERNAL_OPTION_MAX_FPS:
case IOMX::INTERNAL_OPTION_START_TIME:
case IOMX::INTERNAL_OPTION_TIME_LAPSE:
+ case IOMX::INTERNAL_OPTION_COLOR_ASPECTS:
{
const sp<GraphicBufferSource> &bufferSource =
getGraphicBufferSource();
@@ -1383,58 +1401,63 @@
}
if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
- if (size != sizeof(bool)) {
+ bool suspend;
+ if (!getInternalOption(data, size, &suspend)) {
return INVALID_OPERATION;
}
- bool suspend = *(bool *)data;
CLOG_CONFIG(setInternalOption, "suspend=%d", suspend);
bufferSource->suspend(suspend);
- } else if (type ==
- IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY){
- if (size != sizeof(int64_t)) {
+ } else if (type == IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY) {
+ int64_t delayUs;
+ if (!getInternalOption(data, size, &delayUs)) {
return INVALID_OPERATION;
}
- int64_t delayUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "delayUs=%lld", (long long)delayUs);
return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
- } else if (type ==
- IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP){
- if (size != sizeof(int64_t)) {
+ } else if (type == IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP) {
+ int64_t maxGapUs;
+ if (!getInternalOption(data, size, &maxGapUs)) {
return INVALID_OPERATION;
}
- int64_t maxGapUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
return bufferSource->setMaxTimestampGapUs(maxGapUs);
} else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
- if (size != sizeof(float)) {
+ float maxFps;
+ if (!getInternalOption(data, size, &maxFps)) {
return INVALID_OPERATION;
}
- float maxFps = *(float *)data;
CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
return bufferSource->setMaxFps(maxFps);
} else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
- if (size != sizeof(int64_t)) {
+ int64_t skipFramesBeforeUs;
+ if (!getInternalOption(data, size, &skipFramesBeforeUs)) {
return INVALID_OPERATION;
}
- int64_t skipFramesBeforeUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "beforeUs=%lld", (long long)skipFramesBeforeUs);
bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
- } else { // IOMX::INTERNAL_OPTION_TIME_LAPSE
- if (size != sizeof(int64_t) * 2) {
+ } else if (type == IOMX::INTERNAL_OPTION_TIME_LAPSE) {
+ GraphicBufferSource::TimeLapseConfig config;
+ if (!getInternalOption(data, size, &config)) {
return INVALID_OPERATION;
}
- int64_t timePerFrameUs = ((int64_t *)data)[0];
- int64_t timePerCaptureUs = ((int64_t *)data)[1];
CLOG_CONFIG(setInternalOption, "perFrameUs=%lld perCaptureUs=%lld",
- (long long)timePerFrameUs, (long long)timePerCaptureUs);
+ (long long)config.mTimePerFrameUs, (long long)config.mTimePerCaptureUs);
- bufferSource->setTimeLapseUs((int64_t *)data);
+ return bufferSource->setTimeLapseConfig(config);
+ } else if (type == IOMX::INTERNAL_OPTION_COLOR_ASPECTS) {
+ ColorAspects aspects;
+ if (!getInternalOption(data, size, &aspects)) {
+ return INVALID_OPERATION;
+ }
+
+ CLOG_CONFIG(setInternalOption, "setting color aspects");
+ bufferSource->setColorAspects(aspects);
}
return OK;
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 5fcc0fe..27149ed 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -139,18 +139,18 @@
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
- patch->num_sources, patch->num_sinks, *handle);
status_t status = NO_ERROR;
audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
+ if (handle == NULL || patch == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
+ patch->num_sources, patch->num_sinks, *handle);
if (audioflinger == 0) {
return NO_INIT;
}
- if (handle == NULL || patch == NULL) {
- return BAD_VALUE;
- }
if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
(patch->num_sinks == 0 && patch->num_sources != 2) ||
patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index fa61af2..6e0c46d 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -110,10 +110,13 @@
// audioHalFrames is derived from output latency
// FIXME parameters not needed, could get them from the thread
bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
+ void signalClientFlag(int32_t flag);
public:
void triggerEvents(AudioSystem::sync_event_t type);
void invalidate();
+ void disable();
+
bool isInvalid() const { return mIsInvalid; }
int fastIndex() const { return mFastIndex; }
@@ -200,6 +203,8 @@
uint32_t waitTimeMs);
void clearBufferQueue();
+ void restartIfDisabled();
+
// Maximum number of pending buffers allocated by OutputTrack::write()
static const uint8_t kMaxOverFlowBuffers = 10;
@@ -224,6 +229,10 @@
IAudioFlinger::track_flags_t flags);
virtual ~PatchTrack();
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
+
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
@@ -236,6 +245,8 @@
void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
private:
+ void restartIfDisabled();
+
sp<ClientProxy> mProxy;
PatchProxyBufferProvider* mPeerProxy;
struct timespec mPeerTimeout;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 96c5f59..f206e96 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1113,7 +1113,7 @@
status_t status;
status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
true /* FIXME force oneway contrary to .aidl */);
- ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
+ ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
}
}
@@ -3961,7 +3961,7 @@
}
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &track->mCblk->mFlags);
+ track->disable();
// remove from active list, but state remains ACTIVE [confusing but true]
isActive = false;
break;
@@ -4322,7 +4322,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
// If one track is not ready, mark the mixer also not ready if:
// - the mixer was ready during previous round OR
// - no other track is ready
@@ -4869,7 +4869,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
} else if (last) {
ALOGW("pause because of UNDERRUN, framesReady = %zu,"
"minFrames = %u, mFormat = %#x",
@@ -5423,7 +5423,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
} else if (last){
mixerStatus = MIXER_TRACKS_ENABLED;
}
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 8b49062..0c51e81 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1027,13 +1027,23 @@
void AudioFlinger::PlaybackThread::Track::invalidate()
{
+ signalClientFlag(CBLK_INVALID);
+ mIsInvalid = true;
+}
+
+void AudioFlinger::PlaybackThread::Track::disable()
+{
+ signalClientFlag(CBLK_DISABLED);
+}
+
+void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
+{
// FIXME should use proxy, and needs work
audio_track_cblk_t* cblk = mCblk;
- android_atomic_or(CBLK_INVALID, &cblk->mFlags);
+ android_atomic_or(flag, &cblk->mFlags);
android_atomic_release_store(0x40000000, &cblk->mFutex);
// client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
(void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
- mIsInvalid = true;
}
void AudioFlinger::PlaybackThread::Track::signal()
@@ -1199,7 +1209,7 @@
mOutBuffer.frameCount = pInBuffer->frameCount;
nsecs_t startTime = systemTime();
status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
- if (status != NO_ERROR) {
+ if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
mThread.unsafe_get(), status);
outputBufferFull = true;
@@ -1211,6 +1221,10 @@
} else {
waitTimeLeftMs = 0;
}
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ continue;
+ }
}
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
@@ -1220,6 +1234,7 @@
buf.mFrameCount = outFrames;
buf.mRaw = NULL;
mClientProxy->releaseBuffer(&buf);
+ restartIfDisabled();
pInBuffer->frameCount -= outFrames;
pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
mOutBuffer.frameCount -= outFrames;
@@ -1293,6 +1308,13 @@
mBufferQueue.clear();
}
+void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
+{
+ int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ if (mActive && (flags & CBLK_DISABLED)) {
+ start();
+ }
+}
AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
audio_stream_type_t streamType,
@@ -1322,6 +1344,17 @@
{
}
+status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
+ int triggerSession)
+{
+ status_t status = Track::start(event, triggerSession);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ return status;
+}
+
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
AudioBufferProvider::Buffer* buffer)
@@ -1352,17 +1385,31 @@
status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
const struct timespec *timeOut)
{
- return mProxy->obtainBuffer(buffer, timeOut);
+ status_t status = NO_ERROR;
+ static const int32_t kMaxTries = 5;
+ int32_t tryCounter = kMaxTries;
+ do {
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ }
+ status = mProxy->obtainBuffer(buffer, timeOut);
+ } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
+ return status;
}
void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
{
mProxy->releaseBuffer(buffer);
+ restartIfDisabled();
+ android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
+{
if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
start();
}
- android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
}
// ----------------------------------------------------------------------------
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 787f53f..cf7c8fc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -50,15 +50,11 @@
{
// Devices are considered equal if they:
// - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
- // - have the same address or one device does not specify the address
- // - have the same channel mask or one device does not specify the channel mask
+ // - have the same address
if (other == 0) {
return false;
}
- return (mDeviceType == other->mDeviceType) &&
- (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
- (mChannelMask == 0 || other->mChannelMask == 0 ||
- mChannelMask == other->mChannelMask);
+ return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
}
void DeviceVector::refreshTypes()
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index dd2a60a..b7c7879 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -310,12 +310,6 @@
if (!deviceList.isEmpty()) {
return deviceList.itemAt(0);
}
- deviceList = hwModule->getDeclaredDevices().getDevicesFromType(device);
- if (!deviceList.isEmpty()) {
- deviceList.itemAt(0)->setName(String8(device_name));
- deviceList.itemAt(0)->mAddress = address;
- return deviceList.itemAt(0);
- }
}
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 21107a1..c3b1529 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -459,10 +459,7 @@
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, false, true);
}
@@ -538,10 +535,7 @@
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, true, true);
}
@@ -1796,10 +1790,8 @@
mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
// initialize other private stream volumes which follow this one
- routing_strategy strategy = getStrategy(stream);
- for (int curStream = 0; curStream < AUDIO_STREAM_CNT; curStream++) {
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- if (!strategiesMatchForvolume(strategy, curStrategy)) {
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
@@ -1832,10 +1824,8 @@
}
// update other private stream volumes which follow this one
- routing_strategy strategy = getStrategy(stream);
- for (int curStream = 0; curStream < AUDIO_STREAM_CNT; curStream++) {
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- if (!strategiesMatchForvolume(strategy, curStrategy)) {
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
@@ -1847,11 +1837,11 @@
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
- for (int curStream = 0; curStream < AUDIO_STREAM_CNT; curStream++) {
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- if (!strategiesMatchForvolume(strategy, curStrategy)) {
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
+ routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, true /*fromCache*/);
// it is possible that the requested device is not selected by the strategy
// (e.g an explicit audio patch is active causing getDevicesForStream()
@@ -1970,15 +1960,12 @@
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
bool active = false;
- routing_strategy strategy = getStrategy(stream);
- for (int curStream = 0; curStream < AUDIO_STREAM_CNT && !active; curStream++) {
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- if (!strategiesMatchForvolume(strategy, curStrategy)) {
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
}
-
return active;
}
@@ -2734,10 +2721,7 @@
// invalidate all tracks in this strategy to force re connection.
// Otherwise select new device on the output mix.
if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
if (getStrategy((audio_stream_type_t)stream) == strategy) {
mpClientInterface->invalidateStream((audio_stream_type_t)stream);
}
@@ -4097,10 +4081,7 @@
}
}
// Move tracks associated to this strategy from previous output to new output
- for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
if (getStrategy((audio_stream_type_t)i) == strategy) {
mpClientInterface->invalidateStream((audio_stream_type_t)i);
}
@@ -4251,11 +4232,11 @@
return device;
}
-bool AudioPolicyManager::strategiesMatchForvolume(routing_strategy strategy1,
- routing_strategy strategy2) {
- return ((strategy1 == strategy2) ||
- ((strategy1 == STRATEGY_ACCESSIBILITY) && (strategy2 == STRATEGY_MEDIA)) ||
- ((strategy1 == STRATEGY_MEDIA) && (strategy2 == STRATEGY_ACCESSIBILITY)));
+bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
+ audio_stream_type_t stream2) {
+ return ((stream1 == stream2) ||
+ ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
+ ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
}
uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
@@ -4270,17 +4251,17 @@
return AUDIO_DEVICE_NONE;
}
audio_devices_t devices = AUDIO_DEVICE_NONE;
- routing_strategy strategy = getStrategy(stream);
- for (int curStrategy = 0; curStrategy < NUM_STRATEGIES; curStrategy++) {
- if (!strategiesMatchForvolume(strategy, (routing_strategy)curStrategy)) {
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
+ routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curDevices =
getDeviceForStrategy((routing_strategy)curStrategy, true /*fromCache*/);
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
- if (isStrategyActive(outputDesc, (routing_strategy)curStrategy)) {
+ if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
curDevices |= outputDesc->device();
}
}
@@ -4897,10 +4878,7 @@
{
ALOGVV("applyStreamVolumes() for device %08x", device);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
checkAndSetVolume((audio_stream_type_t)stream,
mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
outputDesc,
@@ -4918,10 +4896,7 @@
{
ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
strategy, on, outputDesc->getId());
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
if (getStrategy((audio_stream_type_t)stream) == strategy) {
setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
}
@@ -5101,10 +5076,7 @@
if ((sysTime == 0) && (inPastMs != 0)) {
sysTime = systemTime();
}
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int i = 0; i < (int)AUDIO_STREAM_FOR_POLICY_CNT; i++) {
if (((getStrategy((audio_stream_type_t)i) == strategy) ||
(NUM_STRATEGIES == strategy)) &&
outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d6e48ab..1ef896f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -506,8 +506,8 @@
void clearAudioSources(uid_t uid);
- static bool strategiesMatchForvolume(routing_strategy strategy1,
- routing_strategy strategy2);
+ static bool streamsMatchForvolume(audio_stream_type_t stream1,
+ audio_stream_type_t stream2);
uid_t mUidCached;
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 6490682..4d12015 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -53,7 +53,13 @@
// Check if lens is fixed-focus
if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ } else {
+ m3aState.afMode = ANDROID_CONTROL_AF_MODE_AUTO;
}
+ m3aState.awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ m3aState.aeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ m3aState.afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ m3aState.awbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
}
}
@@ -253,80 +259,99 @@
if (frameNumber <= mLast3AFrameNumber) {
ALOGV("%s: Already sent 3A for frame number %d, skipping",
__FUNCTION__, frameNumber);
+
+ // Remove the entry if there is one for this frame number in mPending3AStates.
+ mPending3AStates.removeItem(frameNumber);
return OK;
}
- mLast3AFrameNumber = frameNumber;
+ AlgState pendingState;
- // Get 3A states from result metadata
+ ssize_t index = mPending3AStates.indexOfKey(frameNumber);
+ if (index != NAME_NOT_FOUND) {
+ pendingState = mPending3AStates.valueAt(index);
+ }
+
+ // Update 3A states from the result.
bool gotAllStates = true;
- AlgState new3aState;
-
// TODO: Also use AE mode, AE trigger ID
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+ &pendingState.afMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
- &new3aState.afMode, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+ &pendingState.awbMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
- &new3aState.awbMode, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+ &pendingState.aeState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
- &new3aState.aeState, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+ &pendingState.afState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
- &new3aState.afState, frameNumber, cameraId);
-
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
- &new3aState.awbState, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+ &pendingState.awbState, frameNumber, cameraId);
if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- new3aState.afTriggerId = frame.mResultExtras.afTriggerId;
- new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+ pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
+ pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
} else {
- gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID,
- &new3aState.afTriggerId, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<int32_t>(metadata,
+ ANDROID_CONTROL_AF_TRIGGER_ID, &pendingState.afTriggerId, frameNumber, cameraId);
- gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &new3aState.aeTriggerId, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<int32_t>(metadata,
+ ANDROID_CONTROL_AE_PRECAPTURE_ID, &pendingState.aeTriggerId, frameNumber, cameraId);
}
- if (!gotAllStates) return BAD_VALUE;
+ if (!gotAllStates) {
+ // If not all states are received, put the pending state to mPending3AStates.
+ if (index == NAME_NOT_FOUND) {
+ mPending3AStates.add(frameNumber, pendingState);
+ } else {
+ mPending3AStates.replaceValueAt(index, pendingState);
+ }
+ return NOT_ENOUGH_DATA;
+ }
- if (new3aState.aeState != m3aState.aeState) {
+ // Once all 3A states are received, notify the client about 3A changes.
+ if (pendingState.aeState != m3aState.aeState) {
ALOGV("%s: Camera %d: AE state %d->%d",
__FUNCTION__, cameraId,
- m3aState.aeState, new3aState.aeState);
- client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
+ m3aState.aeState, pendingState.aeState);
+ client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
}
- if (new3aState.afState != m3aState.afState ||
- new3aState.afMode != m3aState.afMode ||
- new3aState.afTriggerId != m3aState.afTriggerId) {
+ if (pendingState.afState != m3aState.afState ||
+ pendingState.afMode != m3aState.afMode ||
+ pendingState.afTriggerId != m3aState.afTriggerId) {
ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
__FUNCTION__, cameraId,
- m3aState.afState, new3aState.afState,
- m3aState.afMode, new3aState.afMode,
- m3aState.afTriggerId, new3aState.afTriggerId);
- client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
+ m3aState.afState, pendingState.afState,
+ m3aState.afMode, pendingState.afMode,
+ m3aState.afTriggerId, pendingState.afTriggerId);
+ client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
}
- if (new3aState.awbState != m3aState.awbState ||
- new3aState.awbMode != m3aState.awbMode) {
+ if (pendingState.awbState != m3aState.awbState ||
+ pendingState.awbMode != m3aState.awbMode) {
ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
__FUNCTION__, cameraId,
- m3aState.awbState, new3aState.awbState,
- m3aState.awbMode, new3aState.awbMode);
- client->notifyAutoWhitebalance(new3aState.awbState,
- new3aState.aeTriggerId);
+ m3aState.awbState, pendingState.awbState,
+ m3aState.awbMode, pendingState.awbMode);
+ client->notifyAutoWhitebalance(pendingState.awbState,
+ pendingState.aeTriggerId);
}
- m3aState = new3aState;
+ if (index != NAME_NOT_FOUND) {
+ mPending3AStates.removeItemsAt(index);
+ }
+
+ m3aState = pendingState;
+ mLast3AFrameNumber = frameNumber;
return OK;
}
template<typename Src, typename T>
-bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+bool FrameProcessor::updatePendingState(const CameraMetadata& result, int32_t tag,
T* value, int32_t frameNumber, int cameraId) {
camera_metadata_ro_entry_t entry;
if (value == NULL) {
@@ -335,9 +360,14 @@
return false;
}
+ // Already got the value for this tag.
+ if (*value != static_cast<T>(NOT_SET)) {
+ return true;
+ }
+
entry = result.find(tag);
if (entry.count == 0) {
- ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+ ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
__FUNCTION__, cameraId,
get_camera_metadata_tag_name(tag), frameNumber);
return false;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 68cf55b..a5b81a7 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -43,6 +43,8 @@
~FrameProcessor();
private:
+ static const int32_t NOT_SET = -1;
+
wp<Camera2Client> mClient;
bool mSynthesize3ANotify;
@@ -63,7 +65,7 @@
// Helper for process3aState
template<typename Src, typename T>
- bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+ bool updatePendingState(const CameraMetadata& result, int32_t tag, T* value,
int32_t frameNumber, int cameraId);
@@ -81,15 +83,20 @@
// These defaults need to match those in Parameters.cpp
AlgState() :
- afMode(ANDROID_CONTROL_AF_MODE_AUTO),
- awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
- aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
- afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
- awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
- afTriggerId(0),
- aeTriggerId(0) {
+ afMode((camera_metadata_enum_android_control_af_mode)NOT_SET),
+ awbMode((camera_metadata_enum_android_control_awb_mode)NOT_SET),
+ aeState((camera_metadata_enum_android_control_ae_state)NOT_SET),
+ afState((camera_metadata_enum_android_control_af_state)NOT_SET),
+ awbState((camera_metadata_enum_android_control_awb_state)NOT_SET),
+ afTriggerId(NOT_SET),
+ aeTriggerId(NOT_SET) {
}
- } m3aState;
+ };
+
+ AlgState m3aState;
+
+ // frame number -> pending 3A states that not all data are received yet.
+ KeyedVector<int32_t, AlgState> mPending3AStates;
// Whether the partial result is enabled for this device
bool mUsePartialResult;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 0692e5a..316cfda 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -710,6 +710,11 @@
request != NULL) {
request->swap(metadata);
+ } else if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Template ID %d is invalid or not supported: %s (%d)",
+ mCameraId, templateId, strerror(-err), err);
+
} else {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
"Camera %d: Error creating default request for template %d: %s (%d)",
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index b44cbd2..05c5323 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1239,6 +1239,13 @@
CameraMetadata *request) {
ATRACE_CALL();
ALOGV("%s: for template %d", __FUNCTION__, templateId);
+
+ if (templateId <= 0 || templateId >= CAMERA3_TEMPLATE_COUNT) {
+ android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26866110",
+ IPCThreadState::self()->getCallingUid(), nullptr, 0);
+ return BAD_VALUE;
+ }
+
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -2051,176 +2058,6 @@
return OK;
}
-/**
- * Check if all 3A fields are ready, and send off a partial 3A-only result
- * to the output frame queue
- */
-bool Camera3Device::processPartial3AResult(
- uint32_t frameNumber,
- const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
-
- // Check if all 3A states are present
- // The full list of fields is
- // android.control.afMode
- // android.control.awbMode
- // android.control.aeState
- // android.control.awbState
- // android.control.afState
- // android.control.afTriggerID
- // android.control.aePrecaptureID
- // TODO: Add android.control.aeMode
-
- bool gotAllStates = true;
-
- uint8_t afMode;
- uint8_t awbMode;
- uint8_t aeState;
- uint8_t afState;
- uint8_t awbState;
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
- &afMode, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
- &awbMode, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
- &aeState, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
- &afState, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
- &awbState, frameNumber);
-
- if (!gotAllStates) return false;
-
- ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
- "AF state %d, AE state %d, AWB state %d, "
- "AF trigger %d, AE precapture trigger %d",
- __FUNCTION__, mId, frameNumber, resultExtras.requestId,
- afMode, awbMode,
- afState, aeState, awbState,
- resultExtras.afTriggerId, resultExtras.precaptureTriggerId);
-
- // Got all states, so construct a minimal result to send
- // In addition to the above fields, this means adding in
- // android.request.frameCount
- // android.request.requestId
- // android.quirks.partialResult (for HAL version below HAL3.2)
-
- const size_t kMinimal3AResultEntries = 10;
-
- Mutex::Autolock l(mOutputLock);
-
- CaptureResult captureResult;
- captureResult.mResultExtras = resultExtras;
- captureResult.mMetadata = CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0);
- // TODO: change this to sp<CaptureResult>. This will need other changes, including,
- // but not limited to CameraDeviceBase::getNextResult
- CaptureResult& min3AResult =
- *mResultQueue.insert(mResultQueue.end(), captureResult);
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_FRAME_COUNT,
- // TODO: This is problematic casting. Need to fix CameraMetadata.
- reinterpret_cast<int32_t*>(&frameNumber), frameNumber)) {
- return false;
- }
-
- int32_t requestId = resultExtras.requestId;
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_ID,
- &requestId, frameNumber)) {
- return false;
- }
-
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
- &partialResult, frameNumber)) {
- return false;
- }
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
- &afMode, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_MODE,
- &awbMode, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_STATE,
- &aeState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_STATE,
- &afState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_STATE,
- &awbState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID,
- &resultExtras.afTriggerId, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &resultExtras.precaptureTriggerId, frameNumber)) {
- return false;
- }
-
- // We only send the aggregated partial when all 3A related metadata are available
- // For both API1 and API2.
- // TODO: we probably should pass through all partials to API2 unconditionally.
- mResultSignal.signal();
-
- return true;
-}
-
-template<typename T>
-bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
- T* value, uint32_t frameNumber) {
- (void) frameNumber;
-
- camera_metadata_ro_entry_t entry;
-
- entry = result.find(tag);
- if (entry.count == 0) {
- ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
- mId, frameNumber, get_camera_metadata_tag_name(tag));
- return false;
- }
-
- if (sizeof(T) == sizeof(uint8_t)) {
- *value = entry.data.u8[0];
- } else if (sizeof(T) == sizeof(int32_t)) {
- *value = entry.data.i32[0];
- } else {
- ALOGE("%s: Unexpected type", __FUNCTION__);
- return false;
- }
- return true;
-}
-
-template<typename T>
-bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
- const T* value, uint32_t frameNumber) {
- if (result.update(tag, value, 1) != NO_ERROR) {
- mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
- SET_ERR("Frame %d: Failed to set %s in partial metadata",
- frameNumber, get_camera_metadata_tag_name(tag));
- return false;
- }
- return true;
-}
-
void Camera3Device::returnOutputBuffers(
const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
nsecs_t timestamp) {
@@ -2288,6 +2125,48 @@
}
}
+void Camera3Device::insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ if (result == nullptr) return;
+
+ if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
+ (int32_t*)&frameNumber, 1) != OK) {
+ SET_ERR("Failed to set frame number %d in metadata", frameNumber);
+ return;
+ }
+
+ if (result->mMetadata.update(ANDROID_REQUEST_ID, &result->mResultExtras.requestId, 1) != OK) {
+ SET_ERR("Failed to set request ID in metadata for frame %d", frameNumber);
+ return;
+ }
+
+ overrideResultForPrecaptureCancel(&result->mMetadata, aeTriggerCancelOverride);
+
+ // Valid result, insert into queue
+ List<CaptureResult>::iterator queuedResult =
+ mResultQueue.insert(mResultQueue.end(), CaptureResult(*result));
+ ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
+ ", burstId = %" PRId32, __FUNCTION__,
+ queuedResult->mResultExtras.requestId,
+ queuedResult->mResultExtras.frameNumber,
+ queuedResult->mResultExtras.burstId);
+
+ mResultSignal.signal();
+}
+
+
+void Camera3Device::sendPartialCaptureResult(const camera_metadata_t * partialResult,
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ Mutex::Autolock l(mOutputLock);
+
+ CaptureResult captureResult;
+ captureResult.mResultExtras = resultExtras;
+ captureResult.mMetadata = partialResult;
+
+ insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
+}
+
void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
@@ -2323,16 +2202,6 @@
captureResult.mResultExtras = resultExtras;
captureResult.mMetadata = pendingMetadata;
- if (captureResult.mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
- (int32_t*)&frameNumber, 1) != OK) {
- SET_ERR("Failed to set frame# in metadata (%d)",
- frameNumber);
- return;
- } else {
- ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
- __FUNCTION__, mId, frameNumber);
- }
-
// Append any previous partials to form a complete result
if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
captureResult.mMetadata.append(collectedPartialResult);
@@ -2341,26 +2210,14 @@
captureResult.mMetadata.sort();
// Check that there's a timestamp in the result metadata
- camera_metadata_entry entry =
- captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
+ camera_metadata_entry entry = captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
if (entry.count == 0) {
SET_ERR("No timestamp provided by HAL for frame %d!",
frameNumber);
return;
}
- overrideResultForPrecaptureCancel(&captureResult.mMetadata, aeTriggerCancelOverride);
-
- // Valid result, insert into queue
- List<CaptureResult>::iterator queuedResult =
- mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult));
- ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
- ", burstId = %" PRId32, __FUNCTION__,
- queuedResult->mResultExtras.requestId,
- queuedResult->mResultExtras.frameNumber,
- queuedResult->mResultExtras.burstId);
-
- mResultSignal.signal();
+ insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
}
/**
@@ -2437,7 +2294,7 @@
}
isPartialResult = (result->partial_result < mNumPartialResults);
if (isPartialResult) {
- request.partialResult.collectedResult.append(result->result);
+ request.collectedPartialResult.append(result->result);
}
} else {
camera_metadata_ro_entry_t partialResultEntry;
@@ -2450,21 +2307,17 @@
// A partial result. Flag this as such, and collect this
// set of metadata into the in-flight entry.
isPartialResult = true;
- request.partialResult.collectedResult.append(
+ request.collectedPartialResult.append(
result->result);
- request.partialResult.collectedResult.erase(
+ request.collectedPartialResult.erase(
ANDROID_QUIRKS_PARTIAL_RESULT);
}
}
if (isPartialResult) {
- // Fire off a 3A-only result if possible
- if (!request.partialResult.haveSent3A) {
- request.partialResult.haveSent3A =
- processPartial3AResult(frameNumber,
- request.partialResult.collectedResult,
- request.resultExtras);
- }
+ // Send partial capture result
+ sendPartialCaptureResult(result->result, request.resultExtras, frameNumber,
+ request.aeTriggerCancelOverride);
}
}
@@ -2479,9 +2332,9 @@
return;
}
if (mUsePartialResult &&
- !request.partialResult.collectedResult.isEmpty()) {
+ !request.collectedPartialResult.isEmpty()) {
collectedPartialResult.acquire(
- request.partialResult.collectedResult);
+ request.collectedPartialResult);
}
request.haveResultMetadata = true;
}
@@ -2524,7 +2377,7 @@
if (result->result != NULL && !isPartialResult) {
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
- request.partialResult.collectedResult = collectedPartialResult;
+ request.collectedPartialResult = collectedPartialResult;
} else {
CameraMetadata metadata;
metadata = result->result;
@@ -2642,6 +2495,7 @@
resultExtras.frameNumber);
}
}
+ resultExtras.errorStreamId = streamId;
if (listener != NULL) {
listener->notifyError(errorCode, resultExtras);
} else {
@@ -2702,7 +2556,7 @@
// send pending result and buffers
sendCaptureResult(r.pendingMetadata, r.resultExtras,
- r.partialResult.collectedResult, msg.frame_number,
+ r.collectedPartialResult, msg.frame_number,
r.hasInputBuffer, r.aeTriggerCancelOverride);
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index bee69ee..5b1c87e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -648,6 +648,10 @@
// receives the shutter event.
CameraMetadata pendingMetadata;
+ // The metadata of the partial results that framework receives from HAL so far
+ // and has sent out.
+ CameraMetadata collectedPartialResult;
+
// Buffers are added by process_capture_result when output buffers
// return from HAL but framework has not yet received the shutter
// event. They will be returned to the streams when framework receives
@@ -658,19 +662,6 @@
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
AeTriggerCancelOverride_t aeTriggerCancelOverride;
-
- // Fields used by the partial result only
- struct PartialResultInFlight {
- // Set by process_capture_result once 3A has been sent to clients
- bool haveSent3A;
- // Result metadata collected so far, when partial results are in use
- CameraMetadata collectedResult;
-
- PartialResultInFlight():
- haveSent3A(false) {
- }
- } partialResult;
-
// Default constructor needed by KeyedVector
InFlightRequest() :
shutterTimestamp(0),
@@ -706,23 +697,6 @@
const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
/**
- * For the partial result, check if all 3A state fields are available
- * and if so, queue up 3A-only result to the client. Returns true if 3A
- * is sent.
- */
- bool processPartial3AResult(uint32_t frameNumber,
- const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
-
- // Helpers for reading and writing 3A metadata into to/from partial results
- template<typename T>
- bool get3AResult(const CameraMetadata& result, int32_t tag,
- T* value, uint32_t frameNumber);
-
- template<typename T>
- bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
- uint32_t frameNumber);
-
- /**
* Override result metadata for cancelling AE precapture trigger applied in
* handleAePrecaptureCancelRequest().
*/
@@ -820,13 +794,24 @@
void returnOutputBuffers(const camera3_stream_buffer_t *outputBuffers,
size_t numBuffers, nsecs_t timestamp);
- // Insert the capture result given the pending metadata, result extras,
+ // Send a partial capture result.
+ void sendPartialCaptureResult(const camera_metadata_t * partialResult,
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
+ // Send a total capture result given the pending metadata and result extras,
// partial results, and the frame number to the result queue.
void sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult, uint32_t frameNumber,
bool reprocess, const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ // Insert the result to the result queue after updating frame number and overriding AE
+ // trigger cancel.
+ // mOutputLock must be held when calling this function.
+ void insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
/**** Scope for mInFlightLock ****/
// Remove the in-flight request of the given index from mInFlightMap
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 98a71bb..f85aa13 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -26,6 +26,8 @@
namespace android {
+static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+
void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
{
if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
@@ -54,6 +56,19 @@
}
}
+bool MediaLogService::dumpTryLock(Mutex& mutex)
+{
+ bool locked = false;
+ for (int i = 0; i < kDumpLockRetries; ++i) {
+ if (mutex.tryLock() == NO_ERROR) {
+ locked = true;
+ break;
+ }
+ usleep(kDumpLockSleepUs);
+ }
+ return locked;
+}
+
status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
{
// FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
@@ -68,9 +83,22 @@
Vector<NamedReader> namedReaders;
{
- Mutex::Autolock _l(mLock);
+ bool locked = dumpTryLock(mLock);
+
+ // failed to lock - MediaLogService is probably deadlocked
+ if (!locked) {
+ String8 result(kDeadlockedString);
+ if (fd >= 0) {
+ write(fd, result.string(), result.size());
+ } else {
+ ALOGW("%s:", result.string());
+ }
+ return NO_ERROR;
+ }
namedReaders = mNamedReaders;
+ mLock.unlock();
}
+
for (size_t i = 0; i < namedReaders.size(); i++) {
const NamedReader& namedReader = namedReaders[i];
if (fd >= 0) {
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index 2d89a41..c9bf2eb 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -43,6 +43,12 @@
uint32_t flags);
private:
+
+ // Internal dump
+ static const int kDumpLockRetries = 50;
+ static const int kDumpLockSleepUs = 20000;
+ static bool dumpTryLock(Mutex& mutex);
+
Mutex mLock;
class NamedReader {
public:
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 3d4e0b5..e1235b8 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -43,7 +43,7 @@
return itemsStr;
}
-static bool hasResourceType(String8 type, Vector<MediaResource> resources) {
+static bool hasResourceType(MediaResource::Type type, Vector<MediaResource> resources) {
for (size_t i = 0; i < resources.size(); ++i) {
if (resources[i].mType == type) {
return true;
@@ -52,7 +52,7 @@
return false;
}
-static bool hasResourceType(String8 type, ResourceInfos infos) {
+static bool hasResourceType(MediaResource::Type type, ResourceInfos infos) {
for (size_t i = 0; i < infos.size(); ++i) {
if (hasResourceType(type, infos[i].resources)) {
return true;
@@ -96,8 +96,8 @@
if (binder != NULL) {
sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder);
for (size_t i = 0; i < resources.size(); ++i) {
- service->notifyResourceGranted(pid, String16(resources[i].mType),
- String16(resources[i].mSubType), resources[i].mValue);
+ service->notifyResourceGranted(pid, String16(asString(resources[i].mType)),
+ String16(asString(resources[i].mSubType)), resources[i].mValue);
}
}
}
@@ -275,12 +275,12 @@
const MediaResource *nonSecureCodec = NULL;
const MediaResource *graphicMemory = NULL;
for (size_t i = 0; i < resources.size(); ++i) {
- String8 type = resources[i].mType;
- if (resources[i].mType == kResourceSecureCodec) {
+ MediaResource::Type type = resources[i].mType;
+ if (resources[i].mType == MediaResource::kSecureCodec) {
secureCodec = &resources[i];
- } else if (type == kResourceNonSecureCodec) {
+ } else if (type == MediaResource::kNonSecureCodec) {
nonSecureCodec = &resources[i];
- } else if (type == kResourceGraphicMemory) {
+ } else if (type == MediaResource::kGraphicMemory) {
graphicMemory = &resources[i];
}
}
@@ -288,19 +288,19 @@
// first pass to handle secure/non-secure codec conflict
if (secureCodec != NULL) {
if (!mSupportsMultipleSecureCodecs) {
- if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
return false;
}
}
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, String8(kResourceNonSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kNonSecureCodec, &clients)) {
return false;
}
}
}
if (nonSecureCodec != NULL) {
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
return false;
}
}
@@ -320,11 +320,11 @@
if (clients.size() == 0) {
// if we are here, run the fourth pass to free one codec with the different type.
if (secureCodec != NULL) {
- MediaResource temp(String8(kResourceNonSecureCodec), 1);
+ MediaResource temp(MediaResource::kNonSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
if (nonSecureCodec != NULL) {
- MediaResource temp(String8(kResourceSecureCodec), 1);
+ MediaResource temp(MediaResource::kSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
}
@@ -374,7 +374,7 @@
}
bool ResourceManagerService::getAllClients_l(
- int callingPid, const String8 &type, Vector<sp<IResourceManagerClient>> *clients) {
+ int callingPid, MediaResource::Type type, Vector<sp<IResourceManagerClient>> *clients) {
Vector<sp<IResourceManagerClient>> temp;
for (size_t i = 0; i < mMap.size(); ++i) {
ResourceInfos &infos = mMap.editValueAt(i);
@@ -384,7 +384,7 @@
// some higher/equal priority process owns the resource,
// this request can't be fulfilled.
ALOGE("getAllClients_l: can't reclaim resource %s from pid %d",
- type.string(), mMap.keyAt(i));
+ asString(type), mMap.keyAt(i));
return false;
}
temp.push_back(infos[j].client);
@@ -392,7 +392,7 @@
}
}
if (temp.size() == 0) {
- ALOGV("getAllClients_l: didn't find any resource %s", type.string());
+ ALOGV("getAllClients_l: didn't find any resource %s", asString(type));
return true;
}
clients->appendVector(temp);
@@ -400,7 +400,7 @@
}
bool ResourceManagerService::getLowestPriorityBiggestClient_l(
- int callingPid, const String8 &type, sp<IResourceManagerClient> *client) {
+ int callingPid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
int lowestPriorityPid;
int lowestPriority;
int callingPriority;
@@ -425,7 +425,7 @@
}
bool ResourceManagerService::getLowestPriorityPid_l(
- const String8 &type, int *lowestPriorityPid, int *lowestPriority) {
+ MediaResource::Type type, int *lowestPriorityPid, int *lowestPriority) {
int pid = -1;
int priority = -1;
for (size_t i = 0; i < mMap.size(); ++i) {
@@ -472,7 +472,7 @@
}
bool ResourceManagerService::getBiggestClient_l(
- int pid, const String8 &type, sp<IResourceManagerClient> *client) {
+ int pid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid);
@@ -495,7 +495,7 @@
}
if (clientTemp == NULL) {
- ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", type.string(), pid);
+ ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", asString(type), pid);
return false;
}
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 4769373..8f6fe9a 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -79,22 +79,22 @@
// Gets the list of all the clients who own the specified resource type.
// Returns false if any client belongs to a process with higher priority than the
// calling process. The clients will remain unchanged if returns false.
- bool getAllClients_l(int callingPid, const String8 &type,
+ bool getAllClients_l(int callingPid, MediaResource::Type type,
Vector<sp<IResourceManagerClient>> *clients);
// Gets the client who owns specified resource type from lowest possible priority process.
// Returns false if the calling process priority is not higher than the lowest process
// priority. The client will remain unchanged if returns false.
- bool getLowestPriorityBiggestClient_l(int callingPid, const String8 &type,
+ bool getLowestPriorityBiggestClient_l(int callingPid, MediaResource::Type type,
sp<IResourceManagerClient> *client);
// Gets lowest priority process that has the specified resource type.
// Returns false if failed. The output parameters will remain unchanged if failed.
- bool getLowestPriorityPid_l(const String8 &type, int *pid, int *priority);
+ bool getLowestPriorityPid_l(MediaResource::Type type, int *pid, int *priority);
// Gets the client who owns biggest piece of specified resource type from pid.
// Returns false if failed. The client will remain unchanged if failed.
- bool getBiggestClient_l(int pid, const String8 &type, sp<IResourceManagerClient> *client);
+ bool getBiggestClient_l(int pid, MediaResource::Type type, sp<IResourceManagerClient> *client);
bool isCallingPriorityHigher_l(int callingPid, int pid);
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index cffedc6..62b7711 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -152,24 +152,24 @@
void addResource() {
// kTestPid1 mTestClient1
Vector<MediaResource> resources1;
- resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+ resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
- resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+ resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
Vector<MediaResource> resources11;
- resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+ resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
// kTestPid2 mTestClient2
Vector<MediaResource> resources2;
- resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
- resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
+ resources2.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+ resources2.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
// kTestPid2 mTestClient3
Vector<MediaResource> resources3;
mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
- resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
- resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
+ resources3.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ resources3.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
const PidResourceInfosMap &map = mService->mMap;
@@ -237,14 +237,12 @@
void testGetAllClients() {
addResource();
- String8 type = String8(kResourceSecureCodec);
- String8 unknowType = String8("unknowType");
+ MediaResource::Type type = MediaResource::kSecureCodec;
Vector<sp<IResourceManagerClient> > clients;
EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, &clients));
// some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
// will fail.
EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, &clients));
- EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, unknowType, &clients));
EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, &clients));
EXPECT_EQ(2u, clients.size());
@@ -254,8 +252,8 @@
void testReclaimResourceSecure() {
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+ resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
// ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
{
@@ -356,7 +354,7 @@
mService->mSupportsSecureWithNonSecureCodec = true;
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+ resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
// secure codec from lowest process got reclaimed
@@ -374,8 +372,8 @@
void testReclaimResourceNonSecure() {
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+ resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
// ### secure codec can't coexist with non-secure codec ###
{
@@ -429,7 +427,7 @@
mService->mSupportsSecureWithNonSecureCodec = true;
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+ resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
// one non secure codec from lowest process got reclaimed
@@ -445,7 +443,7 @@
}
void testGetLowestPriorityBiggestClient() {
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
@@ -454,8 +452,8 @@
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, &client));
EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
- // kTestPid1 is the lowest priority process with kResourceGraphicMemory.
- // mTestClient1 has the largest kResourceGraphicMemory within kTestPid1.
+ // kTestPid1 is the lowest priority process with MediaResource::kGraphicMemory.
+ // mTestClient1 has the largest MediaResource::kGraphicMemory within kTestPid1.
EXPECT_EQ(mTestClient1, client);
}
@@ -464,7 +462,7 @@
int priority;
TestProcessInfo processInfo;
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
addResource();
@@ -475,7 +473,7 @@
processInfo.getPriority(kTestPid1, &priority1);
EXPECT_EQ(priority1, priority);
- type = String8(kResourceNonSecureCodec);
+ type = MediaResource::kNonSecureCodec;
EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
EXPECT_EQ(kTestPid2, pid);
int priority2;
@@ -484,7 +482,7 @@
}
void testGetBiggestClient() {
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));