Merge "Disable UpdateService temporarily."
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 44ed034..bb517aa 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -622,7 +622,7 @@
fprintf(stderr, " -o playback audio\n");
fprintf(stderr, " -w(rite) filename (write to .mp4 file)\n");
fprintf(stderr, " -k seek test\n");
- fprintf(stderr, " -O(verride) name of the component\n");
+ fprintf(stderr, " -N(ame) of the component\n");
fprintf(stderr, " -x display a histogram of decoding times/fps "
"(video only)\n");
fprintf(stderr, " -q don't show progress indicator\n");
@@ -708,7 +708,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kO:xSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -737,7 +737,7 @@
break;
}
- case 'O':
+ case 'N':
{
gComponentNameOverride.setTo(optarg);
break;
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index b9b3685..2114d40 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -332,10 +332,13 @@
return status;
}
secure = false;
- } else {
+ } else if (destination.mType == kDestinationTypeNativeHandle) {
hDestination.type = BufferType::NATIVE_HANDLE;
hDestination.secureMemory = hidl_handle(destination.mHandle);
secure = true;
+ } else {
+ android_errorWriteLog(0x534e4554, "70526702");
+ return UNKNOWN_ERROR;
}
::SharedBuffer hSource;
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 8506d95..1d70a4e 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -16,14 +16,14 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ICrypto"
-#include <utils/Log.h>
-
#include <binder/Parcel.h>
#include <binder/IMemory.h>
+#include <cutils/log.h>
#include <media/ICrypto.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
+#include <utils/Log.h>
namespace android {
@@ -362,6 +362,17 @@
reply->writeInt32(BAD_VALUE);
return OK;
}
+ sp<IMemory> dest = destination.mSharedMemory;
+ if (totalSize > dest->size() ||
+ (size_t)dest->offset() > dest->size() - totalSize) {
+ reply->writeInt32(BAD_VALUE);
+ android_errorWriteLog(0x534e4554, "71389378");
+ return OK;
+ }
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ android_errorWriteLog(0x534e4554, "70526702");
+ return OK;
}
AString errorDetailMsg;
diff --git a/drm/libmediadrm/PluginMetricsReporting.cpp b/drm/libmediadrm/PluginMetricsReporting.cpp
index 57ff5b8..cc7fb72 100644
--- a/drm/libmediadrm/PluginMetricsReporting.cpp
+++ b/drm/libmediadrm/PluginMetricsReporting.cpp
@@ -17,6 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "PluginMetricsReporting"
#include <utils/Log.h>
+#include <inttypes.h>
#include <media/PluginMetricsReporting.h>
@@ -46,7 +47,7 @@
// Report the package name.
if (metricsGroup.has_app_package_name()) {
- AString app_package_name(metricsGroup.app_package_name().c_str(),
+ std::string app_package_name(metricsGroup.app_package_name().c_str(),
metricsGroup.app_package_name().size());
analyticsItem.setPkgName(app_package_name);
}
@@ -81,10 +82,7 @@
analyticsItem.setFinalized(true);
if (!analyticsItem.selfrecord()) {
- // Note the cast to int is because we build on 32 and 64 bit.
- // The cast prevents a peculiar printf problem where one format cannot
- // satisfy both.
- ALOGE("selfrecord() returned false. sessioId %d", (int) sessionId);
+ ALOGE("selfrecord() returned false. sessioId %" PRId64, sessionId);
}
for (int i = 0; i < metricsGroup.metric_sub_group_size(); ++i) {
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
deleted file mode 120000
index 37f6822..0000000
--- a/include/media/IMediaCodecService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaCodecService.h
\ No newline at end of file
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
deleted file mode 120000
index 9850603..0000000
--- a/include/media/MediaDefs.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaDefs.h
\ No newline at end of file
diff --git a/media/OWNERS b/media/OWNERS
index 1605efd..d49eb8d 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -2,6 +2,7 @@
dwkang@google.com
elaurent@google.com
essick@google.com
+hkuang@google.com
hunga@google.com
jmtrivi@google.com
krocard@google.com
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index 716fe31..dfb54e2 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -288,6 +288,10 @@
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
if (mFrameDurationUs > 0) {
int64_t seekFrame = seekTimeUs / mFrameDurationUs;
+ if (seekFrame < 0 || seekFrame >= (int64_t)mOffsetVector.size()) {
+ android_errorWriteLog(0x534e4554, "70239507");
+ return ERROR_MALFORMED;
+ }
mCurrentTimeUs = seekFrame * mFrameDurationUs;
mOffset = mOffsetVector.itemAt(seekFrame);
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 3c23736..00c43dc 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -137,6 +137,149 @@
};
typedef int32_t aaudio_performance_mode_t;
+/**
+ * The USAGE attribute expresses "why" you are playing a sound, what is this sound used for.
+ * This information is used by certain platforms or routing policies
+ * to make more refined volume or routing decisions.
+ *
+ * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ */
+enum {
+ /**
+ * Use this for streaming media, music performance, video, podcasts, etcetera.
+ */
+ AAUDIO_USAGE_MEDIA = 1,
+
+ /**
+ * Use this for voice over IP, telephony, etcetera.
+ */
+ AAUDIO_USAGE_VOICE_COMMUNICATION = 2,
+
+ /**
+ * Use this for sounds associated with telephony such as busy tones, DTMF, etcetera.
+ */
+ AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING = 3,
+
+ /**
+ * Use this to demand the users attention.
+ */
+ AAUDIO_USAGE_ALARM = 4,
+
+ /**
+ * Use this for notifying the user when a message has arrived or some
+ * other background event has occured.
+ */
+ AAUDIO_USAGE_NOTIFICATION = 5,
+
+ /**
+ * Use this when the phone rings.
+ */
+ AAUDIO_USAGE_NOTIFICATION_RINGTONE = 6,
+
+ /**
+ * Use this to attract the users attention when, for example, the battery is low.
+ */
+ AAUDIO_USAGE_NOTIFICATION_EVENT = 10,
+
+ /**
+ * Use this for screen readers, etcetera.
+ */
+ AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY = 11,
+
+ /**
+ * Use this for driving or navigation directions.
+ */
+ AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE = 12,
+
+ /**
+ * Use this for user interface sounds, beeps, etcetera.
+ */
+ AAUDIO_USAGE_ASSISTANCE_SONIFICATION = 13,
+
+ /**
+ * Use this for game audio and sound effects.
+ */
+ AAUDIO_USAGE_GAME = 14,
+
+ /**
+ * Use this for audio responses to user queries, audio instructions or help utterances.
+ */
+ AAUDIO_USAGE_ASSISTANT = 16
+};
+typedef int32_t aaudio_usage_t;
+
+/**
+ * The CONTENT_TYPE attribute describes "what" you are playing.
+ * It expresses the general category of the content. This information is optional.
+ * But in case it is known (for instance {@link #AAUDIO_CONTENT_TYPE_MOVIE} for a
+ * movie streaming service or {@link #AAUDIO_CONTENT_TYPE_SPEECH} for
+ * an audio book application) this information might be used by the audio framework to
+ * enforce audio focus.
+ *
+ * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ */
+enum {
+
+ /**
+ * Use this for spoken voice, audio books, etcetera.
+ */
+ AAUDIO_CONTENT_TYPE_SPEECH = 1,
+
+ /**
+ * Use this for pre-recorded or live music.
+ */
+ AAUDIO_CONTENT_TYPE_MUSIC = 2,
+
+ /**
+ * Use this for a movie or video soundtrack.
+ */
+ AAUDIO_CONTENT_TYPE_MOVIE = 3,
+
+ /**
+ * Use this for sound is designed to accompany a user action,
+ * such as a click or beep sound made when the user presses a button.
+ */
+ AAUDIO_CONTENT_TYPE_SONIFICATION = 4
+};
+typedef int32_t aaudio_content_type_t;
+
+/**
+ * Defines the audio source.
+ * An audio source defines both a default physical source of audio signal, and a recording
+ * configuration.
+ *
+ * Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
+ */
+enum {
+ /**
+ * Use this preset when other presets do not apply.
+ */
+ AAUDIO_INPUT_PRESET_GENERIC = 1,
+
+ /**
+ * Use this preset when recording video.
+ */
+ AAUDIO_INPUT_PRESET_CAMCORDER = 5,
+
+ /**
+ * Use this preset when doing speech recognition.
+ */
+ AAUDIO_INPUT_PRESET_VOICE_RECOGNITION = 6,
+
+ /**
+ * Use this preset when doing telephony or voice messaging.
+ */
+ AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION = 7,
+
+ /**
+ * Use this preset to obtain an input with no effects.
+ * Note that this input will not have automatic gain control
+ * so the recorded volume may be very low.
+ */
+ AAUDIO_INPUT_PRESET_UNPROCESSED = 9,
+};
+typedef int32_t aaudio_input_preset_t;
+
typedef struct AAudioStreamStruct AAudioStream;
typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
@@ -308,6 +451,52 @@
aaudio_performance_mode_t mode);
/**
+ * Set the intended use case for the stream.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect how volume and focus is handled for the stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param usage the desired usage, eg. AAUDIO_USAGE_GAME
+ */
+AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
+ aaudio_usage_t usage);
+
+/**
+ * Set the type of audio data that the stream will carry.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect whether a stream is paused when a notification occurs.
+ *
+ * The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
+ */
+AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
+ aaudio_content_type_t contentType);
+
+/**
+ * Set the input (capture) preset for the stream.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect which microphones are used and how the
+ * recorded data is processed.
+ *
+ * The default, if you do not call this function, is AAUDIO_INPUT_PRESET_GENERIC.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param inputPreset the desired configuration for recording
+ */
+AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
+ aaudio_input_preset_t inputPreset);
+
+/**
* Return one of these values from the data callback function.
*/
enum {
@@ -820,6 +1009,30 @@
int64_t *framePosition,
int64_t *timeNanoseconds);
+/**
+ * Return the use case for the stream.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames read
+ */
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream);
+
+/**
+ * Return the content type for the stream.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
+ */
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream);
+
+/**
+ * Return the input preset for the stream.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
+ */
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream);
+
#ifdef __cplusplus
}
#endif
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index 2ba5250..98fbb6f 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -17,6 +17,9 @@
AAudioStreamBuilder_setSharingMode;
AAudioStreamBuilder_setDirection;
AAudioStreamBuilder_setBufferCapacityInFrames;
+ AAudioStreamBuilder_setUsage; # introduced=28
+ AAudioStreamBuilder_setContentType; # introduced=28
+ AAudioStreamBuilder_setInputPreset; # introduced=28
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
AAudioStream_close;
@@ -42,6 +45,9 @@
AAudioStream_getFormat;
AAudioStream_getSharingMode;
AAudioStream_getDirection;
+ AAudioStream_getUsage; # introduced=28
+ AAudioStream_getContentType; # introduced=28
+ AAudioStream_getInputPreset; # introduced=28
AAudioStream_getFramesWritten;
AAudioStream_getFramesRead;
AAudioStream_getTimestamp;
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 153fce3..97672a0 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -50,6 +50,12 @@
if (status != NO_ERROR) goto error;
status = parcel->writeInt32(getBufferCapacity());
if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getUsage());
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getContentType());
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getInputPreset());
+ if (status != NO_ERROR) goto error;
return NO_ERROR;
error:
ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
@@ -69,16 +75,25 @@
setSamplesPerFrame(value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
- setSharingMode(value);
+ setSharingMode((aaudio_sharing_mode_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
- setFormat(value);
+ setFormat((aaudio_format_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setDirection((aaudio_direction_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setBufferCapacity(value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setUsage((aaudio_usage_t) value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setContentType((aaudio_content_type_t) value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setInputPreset((aaudio_input_preset_t) value);
return NO_ERROR;
error:
ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index bb007ac..9e5ca8e 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -177,6 +177,24 @@
streamBuilder->setSharingMode(sharingMode);
}
+AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
+ aaudio_usage_t usage) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setUsage(usage);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
+ aaudio_content_type_t contentType) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setContentType(contentType);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
+ aaudio_input_preset_t inputPreset) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setInputPreset(inputPreset);
+}
+
AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
int32_t frames)
{
@@ -447,6 +465,24 @@
return audioStream->getSharingMode();
}
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getUsage();
+}
+
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getContentType();
+}
+
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getInputPreset();
+}
+
AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 6400eb4..23c4eb8 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -42,6 +42,9 @@
mAudioFormat = other.mAudioFormat;
mDirection = other.mDirection;
mBufferCapacity = other.mBufferCapacity;
+ mUsage = other.mUsage;
+ mContentType = other.mContentType;
+ mInputPreset = other.mInputPreset;
}
aaudio_result_t AAudioStreamParameters::validate() const {
@@ -98,6 +101,54 @@
// break;
}
+ switch (mUsage) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_USAGE_MEDIA:
+ case AAUDIO_USAGE_VOICE_COMMUNICATION:
+ case AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+ case AAUDIO_USAGE_ALARM:
+ case AAUDIO_USAGE_NOTIFICATION:
+ case AAUDIO_USAGE_NOTIFICATION_RINGTONE:
+ case AAUDIO_USAGE_NOTIFICATION_EVENT:
+ case AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ case AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+ case AAUDIO_USAGE_ASSISTANCE_SONIFICATION:
+ case AAUDIO_USAGE_GAME:
+ case AAUDIO_USAGE_ASSISTANT:
+ break; // valid
+ default:
+ ALOGE("usage not valid = %d", mUsage);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ switch (mContentType) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_CONTENT_TYPE_MUSIC:
+ case AAUDIO_CONTENT_TYPE_MOVIE:
+ case AAUDIO_CONTENT_TYPE_SONIFICATION:
+ case AAUDIO_CONTENT_TYPE_SPEECH:
+ break; // valid
+ default:
+ ALOGE("content type not valid = %d", mContentType);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ switch (mInputPreset) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_INPUT_PRESET_GENERIC:
+ case AAUDIO_INPUT_PRESET_CAMCORDER:
+ case AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION:
+ case AAUDIO_INPUT_PRESET_VOICE_RECOGNITION:
+ case AAUDIO_INPUT_PRESET_UNPROCESSED:
+ break; // valid
+ default:
+ ALOGE("input preset not valid = %d", mInputPreset);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
return AAUDIO_OK;
}
@@ -109,5 +160,8 @@
ALOGD("mAudioFormat = %6d", (int)mAudioFormat);
ALOGD("mDirection = %6d", mDirection);
ALOGD("mBufferCapacity = %6d", mBufferCapacity);
+ ALOGD("mUsage = %6d", mUsage);
+ ALOGD("mContentType = %6d", mContentType);
+ ALOGD("mInputPreset = %6d", mInputPreset);
}
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5e67c93..0c173f5 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -88,6 +88,30 @@
mDirection = direction;
}
+ aaudio_usage_t getUsage() const {
+ return mUsage;
+ }
+
+ void setUsage(aaudio_usage_t usage) {
+ mUsage = usage;
+ }
+
+ aaudio_content_type_t getContentType() const {
+ return mContentType;
+ }
+
+ void setContentType(aaudio_content_type_t contentType) {
+ mContentType = contentType;
+ }
+
+ aaudio_input_preset_t getInputPreset() const {
+ return mInputPreset;
+ }
+
+ void setInputPreset(aaudio_input_preset_t inputPreset) {
+ mInputPreset = inputPreset;
+ }
+
int32_t calculateBytesPerFrame() const {
return getSamplesPerFrame() * AAudioConvert_formatToSizeInBytes(getFormat());
}
@@ -109,6 +133,9 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+ aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
+ aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
};
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 8f5f5d3..289e0db 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -74,15 +74,28 @@
}
// Copy parameters from the Builder because the Builder may be deleted after this call.
+ // TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
mSharingMode = builder.getSharingMode();
mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
-
mPerformanceMode = builder.getPerformanceMode();
+ mUsage = builder.getUsage();
+ if (mUsage == AAUDIO_UNSPECIFIED) {
+ mUsage = AAUDIO_USAGE_MEDIA;
+ }
+ mContentType = builder.getContentType();
+ if (mContentType == AAUDIO_UNSPECIFIED) {
+ mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
+ }
+ mInputPreset = builder.getInputPreset();
+ if (mInputPreset == AAUDIO_UNSPECIFIED) {
+ mInputPreset = AAUDIO_INPUT_PRESET_GENERIC;
+ }
+
// callbacks
mFramesPerDataCallback = builder.getFramesPerDataCallback();
mDataCallbackProc = builder.getDataCallbackProc();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index b5d7fd5..82e7189 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -204,6 +204,18 @@
virtual aaudio_direction_t getDirection() const = 0;
+ aaudio_usage_t getUsage() const {
+ return mUsage;
+ }
+
+ aaudio_content_type_t getContentType() const {
+ return mContentType;
+ }
+
+ aaudio_input_preset_t getInputPreset() const {
+ return mInputPreset;
+ }
+
/**
* This is only valid after setSamplesPerFrame() and setFormat() have been called.
*/
@@ -471,6 +483,9 @@
aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_usage_t mUsage = AAUDIO_USAGE_MEDIA;
+ aaudio_content_type_t mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
+ aaudio_input_preset_t mInputPreset = AAUDIO_INPUT_PRESET_GENERIC;
// callback ----------------------------------
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 55eed92..5f4ab9b 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -104,11 +104,24 @@
? AUDIO_PORT_HANDLE_NONE
: getDeviceId();
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(builder.getContentType());
+ const audio_source_t source =
+ AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = AUDIO_USAGE_UNKNOWN, // only used for output
+ .source = source,
+ .flags = flags, // If attributes are set then the other flags parameter is ignored.
+ .tags = ""
+ };
+
mAudioRecord = new AudioRecord(
mOpPackageName // const String16& opPackageName TODO does not compile
);
mAudioRecord->set(
- AUDIO_SOURCE_VOICE_RECOGNITION,
+ AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
getSampleRate(),
format,
channelMask,
@@ -122,7 +135,7 @@
flags,
AUDIO_UID_INVALID, // DEFAULT uid
-1, // DEFAULT pid
- NULL, // DEFAULT audio_attributes_t
+ &attributes,
selectedDeviceId
);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 5113278..17a8d52 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -121,9 +121,22 @@
? AUDIO_PORT_HANDLE_NONE
: getDeviceId();
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(builder.getContentType());
+ const audio_usage_t usage =
+ AAudioConvert_usageToInternal(builder.getUsage());
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = usage,
+ .source = AUDIO_SOURCE_DEFAULT, // only used for recording
+ .flags = flags, // If attributes are set then the other flags parameter is ignored.
+ .tags = ""
+ };
+
mAudioTrack = new AudioTrack();
mAudioTrack->set(
- (audio_stream_type_t) AUDIO_STREAM_MUSIC,
+ AUDIO_STREAM_DEFAULT, // ignored because we pass attributes below
getSampleRate(),
format,
channelMask,
@@ -139,7 +152,7 @@
NULL, // DEFAULT audio_offload_info_t
AUDIO_UID_INVALID, // DEFAULT uid
-1, // DEFAULT pid
- NULL, // DEFAULT audio_attributes_t
+ &attributes,
// WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
// headphones a few times.
false, // DEFAULT doNotReconnect,
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index f709f41..c6adf33 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -26,6 +26,7 @@
#include "aaudio/AAudio.h"
#include <aaudio/AAudioTesting.h>
#include <math.h>
+#include <system/audio-base.h>
#include "utility/AAudioUtilities.h"
@@ -283,6 +284,61 @@
return aaudioFormat;
}
+// Make a message string from the condition.
+#define STATIC_ASSERT(condition) static_assert(condition, #condition)
+
+audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage) {
+ // The public aaudio_content_type_t constants are supposed to have the same
+ // values as the internal audio_content_type_t values.
+ STATIC_ASSERT(AAUDIO_USAGE_MEDIA == AUDIO_USAGE_MEDIA);
+ STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION == AUDIO_USAGE_VOICE_COMMUNICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING
+ == AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING);
+ STATIC_ASSERT(AAUDIO_USAGE_ALARM == AUDIO_USAGE_ALARM);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION == AUDIO_USAGE_NOTIFICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_RINGTONE
+ == AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_EVENT == AUDIO_USAGE_NOTIFICATION_EVENT);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE
+ == AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_SONIFICATION == AUDIO_USAGE_ASSISTANCE_SONIFICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_GAME == AUDIO_USAGE_GAME);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANT == AUDIO_USAGE_ASSISTANT);
+ if (usage == AAUDIO_UNSPECIFIED) {
+ usage = AAUDIO_USAGE_MEDIA;
+ }
+ return (audio_usage_t) usage; // same value
+}
+
+audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType) {
+ // The public aaudio_content_type_t constants are supposed to have the same
+ // values as the internal audio_content_type_t values.
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MUSIC == AUDIO_CONTENT_TYPE_MUSIC);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SPEECH == AUDIO_CONTENT_TYPE_SPEECH);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SONIFICATION == AUDIO_CONTENT_TYPE_SONIFICATION);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MOVIE == AUDIO_CONTENT_TYPE_MOVIE);
+ if (contentType == AAUDIO_UNSPECIFIED) {
+ contentType = AAUDIO_CONTENT_TYPE_MUSIC;
+ }
+ return (audio_content_type_t) contentType; // same value
+}
+
+audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset) {
+ // The public aaudio_input_preset_t constants are supposed to have the same
+ // values as the internal audio_source_t values.
+ STATIC_ASSERT(AAUDIO_UNSPECIFIED == AUDIO_SOURCE_DEFAULT);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_GENERIC == AUDIO_SOURCE_MIC);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_CAMCORDER == AUDIO_SOURCE_CAMCORDER);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_RECOGNITION == AUDIO_SOURCE_VOICE_RECOGNITION);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION == AUDIO_SOURCE_VOICE_COMMUNICATION);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_UNPROCESSED == AUDIO_SOURCE_UNPROCESSED);
+ if (preset == AAUDIO_UNSPECIFIED) {
+ preset = AAUDIO_INPUT_PRESET_GENERIC;
+ }
+ return (audio_source_t) preset; // same value
+}
+
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
int32_t bytesPerFrame,
int32_t *sizeInBytes) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 3afa976..f2347f5 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -167,6 +167,29 @@
aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal value
+ */
+
+audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal value
+ */
+audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal audio source
+ */
+audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset);
+
/**
* @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
*/
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 9f80695..33718fc 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -124,3 +124,15 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_attributes",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_attributes.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
new file mode 100644
index 0000000..9cbf113
--- /dev/null
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test AAudio attributes such as Usage, ContentType and InputPreset.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+constexpr int32_t DONT_SET = -1000;
+
+static void checkAttributes(aaudio_performance_mode_t perfMode,
+ aaudio_usage_t usage,
+ aaudio_content_type_t contentType,
+ aaudio_input_preset_t preset = DONT_SET,
+ aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
+
+ float *buffer = new float[kNumFrames * kChannelCount];
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+
+ // Set the attribute in the builder.
+ if (usage != DONT_SET) {
+ AAudioStreamBuilder_setUsage(aaudioBuilder, usage);
+ }
+ if (contentType != DONT_SET) {
+ AAudioStreamBuilder_setContentType(aaudioBuilder, contentType);
+ }
+ if (preset != DONT_SET) {
+ AAudioStreamBuilder_setInputPreset(aaudioBuilder, preset);
+ }
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ // Make sure we get the same attributes back from the stream.
+ aaudio_usage_t expectedUsage =
+ (usage == DONT_SET || usage == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_USAGE_MEDIA // default
+ : usage;
+ EXPECT_EQ(expectedUsage, AAudioStream_getUsage(aaudioStream));
+
+ aaudio_content_type_t expectedContentType =
+ (contentType == DONT_SET || contentType == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_CONTENT_TYPE_MUSIC // default
+ : contentType;
+ EXPECT_EQ(expectedContentType, AAudioStream_getContentType(aaudioStream));
+
+ aaudio_input_preset_t expectedPreset =
+ (preset == DONT_SET || preset == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_INPUT_PRESET_GENERIC // default
+ : preset;
+ EXPECT_EQ(expectedPreset, AAudioStream_getInputPreset(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ EXPECT_EQ(kNumFrames,
+ AAudioStream_read(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ } else {
+ EXPECT_EQ(kNumFrames,
+ AAudioStream_write(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+ delete[] buffer;
+}
+
+static const aaudio_usage_t sUsages[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_USAGE_MEDIA,
+ AAUDIO_USAGE_VOICE_COMMUNICATION,
+ AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AAUDIO_USAGE_ALARM,
+ AAUDIO_USAGE_NOTIFICATION,
+ AAUDIO_USAGE_NOTIFICATION_RINGTONE,
+ AAUDIO_USAGE_NOTIFICATION_EVENT,
+ AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AAUDIO_USAGE_GAME,
+ AAUDIO_USAGE_ASSISTANT
+};
+
+static const aaudio_content_type_t sContentypes[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_CONTENT_TYPE_SPEECH,
+ AAUDIO_CONTENT_TYPE_MUSIC,
+ AAUDIO_CONTENT_TYPE_MOVIE,
+ AAUDIO_CONTENT_TYPE_SONIFICATION
+};
+
+static const aaudio_input_preset_t sInputPresets[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_INPUT_PRESET_GENERIC,
+ AAUDIO_INPUT_PRESET_CAMCORDER,
+ AAUDIO_INPUT_PRESET_VOICE_RECOGNITION,
+ AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION,
+ AAUDIO_INPUT_PRESET_UNPROCESSED,
+};
+
+static void checkAttributesUsage(aaudio_performance_mode_t perfMode) {
+ for (aaudio_usage_t usage : sUsages) {
+ checkAttributes(perfMode, usage, DONT_SET);
+ }
+}
+
+static void checkAttributesContentType(aaudio_input_preset_t perfMode) {
+ for (aaudio_content_type_t contentType : sContentypes) {
+ checkAttributes(perfMode, DONT_SET, contentType);
+ }
+}
+
+static void checkAttributesInputPreset(aaudio_performance_mode_t perfMode) {
+ for (aaudio_input_preset_t inputPreset : sInputPresets) {
+ checkAttributes(perfMode,
+ DONT_SET,
+ DONT_SET,
+ inputPreset,
+ AAUDIO_DIRECTION_INPUT);
+ }
+}
+
+TEST(test_attributes, aaudio_usage_perfnone) {
+ checkAttributesUsage(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_content_type_perfnone) {
+ checkAttributesContentType(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_input_preset_perfnone) {
+ checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_usage_lowlat) {
+ checkAttributesUsage(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+TEST(test_attributes, aaudio_content_type_lowlat) {
+ checkAttributesContentType(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+TEST(test_attributes, aaudio_input_preset_lowlat) {
+ checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index bedde43..2df37a8 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -47,6 +47,8 @@
"libdl",
"libaudioutils",
"libaudiomanager",
+ "libmedia_helper",
+ "libmediametrics",
],
export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 0d4b462..bc294c5 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -26,6 +26,8 @@
#include <utils/Log.h>
#include <private/media/AudioTrackShared.h>
#include <media/IAudioFlinger.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/TypeConverter.h>
#define WAIT_PERIOD_MS 10
@@ -65,6 +67,34 @@
// ---------------------------------------------------------------------------
+static std::string audioFormatTypeString(audio_format_t value) {
+ std::string formatType;
+ if (FormatConverter::toString(value, formatType)) {
+ return formatType;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+void AudioRecord::MediaMetrics::gather(const AudioRecord *record)
+{
+ // key for media statistics is defined in the header
+ // attrs for media statistics
+ static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
+ static constexpr char kAudioRecordFormat[] = "android.media.audiorecord.format";
+ static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
+ static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
+
+ // constructor guarantees mAnalyticsItem is valid
+
+ mAnalyticsItem->setInt32(kAudioRecordLatency, record->mLatency);
+ mAnalyticsItem->setInt32(kAudioRecordSampleRate, record->mSampleRate);
+ mAnalyticsItem->setInt32(kAudioRecordChannelCount, record->mChannelCount);
+ mAnalyticsItem->setCString(kAudioRecordFormat,
+ audioFormatTypeString(record->mFormat).c_str());
+}
+
AudioRecord::AudioRecord(const String16 &opPackageName)
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
@@ -105,6 +135,8 @@
AudioRecord::~AudioRecord()
{
+ mMediaMetrics.gather(this);
+
if (mStatus == NO_ERROR) {
// Make sure that callback function exits in the case where
// it is looping on buffer empty condition in obtainBuffer().
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6d829a0..a3c66fe 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -31,6 +31,8 @@
#include <media/IAudioFlinger.h>
#include <media/AudioPolicyHelper.h>
#include <media/AudioResamplerPublic.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/TypeConverter.h>
#define WAIT_PERIOD_MS 10
#define WAIT_STREAM_END_TIMEOUT_SEC 120
@@ -157,6 +159,71 @@
// ---------------------------------------------------------------------------
+static std::string audioContentTypeString(audio_content_type_t value) {
+ std::string contentType;
+ if (AudioContentTypeConverter::toString(value, contentType)) {
+ return contentType;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+static std::string audioUsageString(audio_usage_t value) {
+ std::string usage;
+ if (UsageTypeConverter::toString(value, usage)) {
+ return usage;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
+{
+
+ // key for media statistics is defined in the header
+ // attrs for media statistics
+ static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+ static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
+ static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
+ static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
+ static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
+#if 0
+ // XXX: disabled temporarily for b/72027185
+ static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
+#endif
+ static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
+
+ // constructor guarantees mAnalyticsItem is valid
+
+#if 0
+ // XXX: disabled temporarily for b/72027185
+ // must gather underrun info before cleaning mProxy information.
+ const int32_t underrunFrames = track->getUnderrunFrames();
+ if (underrunFrames != 0) {
+ mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
+ }
+#endif
+
+ if (track->mTimestampStartupGlitchReported) {
+ mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
+ }
+
+ if (track->mStreamType != -1) {
+ // deprecated, but this will tell us who still uses it.
+ mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
+ }
+ // XXX: consider including from mAttributes: source type
+ mAnalyticsItem->setCString(kAudioTrackContentType,
+ audioContentTypeString(track->mAttributes.content_type).c_str());
+ mAnalyticsItem->setCString(kAudioTrackUsage,
+ audioUsageString(track->mAttributes.usage).c_str());
+ mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
+ mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
+}
+
+
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
mState(STATE_STOPPED),
@@ -236,6 +303,9 @@
AudioTrack::~AudioTrack()
{
+ // pull together the numbers, before we clean up our structures
+ mMediaMetrics.gather(this);
+
if (mStatus == NO_ERROR) {
// Make sure that callback function exits in the case where
// it is looping on buffer full condition in obtainBuffer().
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 074e547..fea973a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -21,6 +21,7 @@
#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/Modulo.h>
#include <utils/RefBase.h>
#include <utils/threads.h>
@@ -688,6 +689,24 @@
// activity and connected devices
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+private:
+ class MediaMetrics {
+ public:
+ MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")) {
+ }
+ ~MediaMetrics() {
+ // mAnalyticsItem alloc failure will be flagged in the constructor
+ // don't log empty records
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ void gather(const AudioRecord *record);
+ private:
+ std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+ };
+ MediaMetrics mMediaMetrics;
};
}; // namespace android
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 9fbd04b..c146db9 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -22,6 +22,7 @@
#include <media/AudioTimestamp.h>
#include <media/IAudioTrack.h>
#include <media/AudioResamplerPublic.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/Modulo.h>
#include <utils/threads.h>
@@ -1182,6 +1183,25 @@
pid_t mClientPid;
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+
+private:
+ class MediaMetrics {
+ public:
+ MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiotrack")) {
+ }
+ ~MediaMetrics() {
+ // mAnalyticsItem alloc failure will be flagged in the constructor
+ // don't log empty records
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ void gather(const AudioTrack *track);
+ private:
+ std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+ };
+ MediaMetrics mMediaMetrics;
};
}; // namespace android
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3c9bfdd..e7dc0fe 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -319,6 +319,7 @@
srcs: [
"AudioParameter.cpp",
+ "JAudioTrack.cpp",
"MediaPlayer2Factory.cpp",
"MediaPlayer2Manager.cpp",
"TestPlayerStub.cpp",
@@ -327,6 +328,7 @@
],
shared_libs: [
+ "libandroid_runtime",
"libaudioclient",
"libbinder",
"libcutils",
diff --git a/media/libmedia/JAudioTrack.cpp b/media/libmedia/JAudioTrack.cpp
new file mode 100644
index 0000000..b228d8b
--- /dev/null
+++ b/media/libmedia/JAudioTrack.cpp
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JAudioTrack"
+
+#include "media/JAudioAttributes.h"
+#include "media/JAudioFormat.h"
+#include "media/JAudioTrack.h"
+
+#include <android_media_AudioErrors.h>
+#include <android_runtime/AndroidRuntime.h>
+
+namespace android {
+
+// TODO: Store Java class/methodID as a member variable in the class.
+// TODO: Add NULL && Exception checks after every JNI call.
+JAudioTrack::JAudioTrack( // < Usages of the arguments are below >
+ audio_stream_type_t streamType, // AudioAudioAttributes
+ uint32_t sampleRate, // AudioFormat && bufferSizeInBytes
+ audio_format_t format, // AudioFormat && bufferSizeInBytes
+ audio_channel_mask_t channelMask, // AudioFormat && bufferSizeInBytes
+ size_t frameCount, // bufferSizeInBytes
+ audio_session_t sessionId, // AudioTrack
+ const audio_attributes_t* pAttributes, // AudioAttributes
+ float maxRequiredSpeed) { // bufferSizeInBytes
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
+ mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
+
+ maxRequiredSpeed = std::min(std::max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
+
+ int bufferSizeInBytes = 0;
+ if (sampleRate == 0 || frameCount > 0) {
+ // Manually calculate buffer size.
+ bufferSizeInBytes = audio_channel_count_from_out_mask(channelMask)
+ * audio_bytes_per_sample(format) * (frameCount > 0 ? frameCount : 1);
+ } else if (sampleRate > 0) {
+ // Call Java AudioTrack::getMinBufferSize().
+ jmethodID jGetMinBufferSize =
+ env->GetStaticMethodID(mAudioTrackCls, "getMinBufferSize", "(III)I");
+ bufferSizeInBytes = env->CallStaticIntMethod(mAudioTrackCls, jGetMinBufferSize,
+ sampleRate, outChannelMaskFromNative(channelMask), audioFormatFromNative(format));
+ }
+ bufferSizeInBytes = (int) (bufferSizeInBytes * maxRequiredSpeed);
+
+ // Create a Java AudioTrack object through its Builder.
+ jclass jBuilderCls = env->FindClass("android/media/AudioTrack$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ jmethodID jSetAudioAttributes = env->GetMethodID(jBuilderCls, "setAudioAttributes",
+ "(Landroid/media/AudioAttributes;)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioAttributes,
+ JAudioAttributes::createAudioAttributesObj(env, pAttributes, streamType));
+
+ jmethodID jSetAudioFormat = env->GetMethodID(jBuilderCls, "setAudioFormat",
+ "(Landroid/media/AudioFormat;)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioFormat,
+ JAudioFormat::createAudioFormatObj(env, sampleRate, format, channelMask));
+
+ jmethodID jSetBufferSizeInBytes = env->GetMethodID(jBuilderCls, "setBufferSizeInBytes",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetBufferSizeInBytes, bufferSizeInBytes);
+
+ // We only use streaming mode of Java AudioTrack.
+ jfieldID jModeStream = env->GetStaticFieldID(mAudioTrackCls, "MODE_STREAM", "I");
+ jint transferMode = env->GetStaticIntField(mAudioTrackCls, jModeStream);
+ jmethodID jSetTransferMode = env->GetMethodID(jBuilderCls, "setTransferMode",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetTransferMode,
+ transferMode /* Java AudioTrack::MODE_STREAM */);
+
+ if (sessionId != 0) {
+ jmethodID jSetSessionId = env->GetMethodID(jBuilderCls, "setSessionId",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetSessionId, sessionId);
+ }
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioTrack;");
+ mAudioTrackObj = env->CallObjectMethod(jBuilderObj, jBuild);
+}
+
+JAudioTrack::~JAudioTrack() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ env->DeleteGlobalRef(mAudioTrackCls);
+}
+
+size_t JAudioTrack::frameCount() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetBufferSizeInFrames = env->GetMethodID(
+ mAudioTrackCls, "getBufferSizeInFrames", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
+}
+
+size_t JAudioTrack::channelCount() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
+}
+
+status_t JAudioTrack::getPosition(uint32_t *position) {
+ if (position == NULL) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
+ mAudioTrackCls, "getPlaybackHeadPosition", "()I");
+ *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
+
+ return NO_ERROR;
+}
+
+bool JAudioTrack::getTimeStamp(AudioTimestamp& timestamp) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
+ jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
+
+ jfieldID jFramePosition = env->GetFieldID(jAudioTimeStampCls, "framePosition", "L");
+ jfieldID jNanoTime = env->GetFieldID(jAudioTimeStampCls, "nanoTime", "L");
+
+ jmethodID jGetTimestamp = env->GetMethodID(mAudioTrackCls,
+ "getTimestamp", "(Landroid/media/AudioTimestamp)B");
+ bool success = env->CallBooleanMethod(mAudioTrackObj, jGetTimestamp, jAudioTimeStampObj);
+
+ if (!success) {
+ return false;
+ }
+
+ long long framePosition = env->GetLongField(jAudioTimeStampObj, jFramePosition);
+ long long nanoTime = env->GetLongField(jAudioTimeStampObj, jNanoTime);
+
+ struct timespec ts;
+ const long long secondToNano = 1000000000LL; // 1E9
+ ts.tv_sec = nanoTime / secondToNano;
+ ts.tv_nsec = nanoTime % secondToNano;
+ timestamp.mTime = ts;
+ timestamp.mPosition = (uint32_t) framePosition;
+
+ return true;
+}
+
+status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
+ // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
+ // Should we do the same thing?
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
+ jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
+ jobject jPlaybackParamsObj = env->NewObject(jPlaybackParamsCls, jPlaybackParamsCtor);
+
+ jmethodID jSetAudioFallbackMode = env->GetMethodID(
+ jPlaybackParamsCls, "setAudioFallbackMode", "(I)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(
+ jPlaybackParamsObj, jSetAudioFallbackMode, playbackRate.mFallbackMode);
+
+ jmethodID jSetAudioStretchMode = env->GetMethodID(
+ jPlaybackParamsCls, "setAudioStretchMode", "(I)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(
+ jPlaybackParamsObj, jSetAudioStretchMode, playbackRate.mStretchMode);
+
+ jmethodID jSetPitch = env->GetMethodID(
+ jPlaybackParamsCls, "setPitch", "(F)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetPitch, playbackRate.mPitch);
+
+ jmethodID jSetSpeed = env->GetMethodID(
+ jPlaybackParamsCls, "setSpeed", "(F)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetSpeed, playbackRate.mSpeed);
+
+
+ // Set this Java PlaybackParams object into Java AudioTrack.
+ jmethodID jSetPlaybackParams = env->GetMethodID(
+ mAudioTrackCls, "setPlaybackParams", "(Landroid/media/PlaybackParams;)V");
+ env->CallVoidMethod(mAudioTrackObj, jSetPlaybackParams, jPlaybackParamsObj);
+ // TODO: Should we catch the Java IllegalArgumentException?
+
+ return NO_ERROR;
+}
+
+const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jmethodID jGetPlaybackParams = env->GetMethodID(
+ mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
+ jobject jPlaybackParamsObj = env->CallObjectMethod(mAudioTrackObj, jGetPlaybackParams);
+
+ AudioPlaybackRate playbackRate;
+ jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
+
+ jmethodID jGetAudioFallbackMode = env->GetMethodID(
+ jPlaybackParamsCls, "getAudioFallbackMode", "()I");
+ // TODO: Should we enable passing AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT?
+ // The enum is internal only, so it is not defined in PlaybackParmas.java.
+ // TODO: Is this right way to convert an int to an enum?
+ playbackRate.mFallbackMode = static_cast<AudioTimestretchFallbackMode>(
+ env->CallIntMethod(jPlaybackParamsObj, jGetAudioFallbackMode));
+
+ jmethodID jGetAudioStretchMode = env->GetMethodID(
+ jPlaybackParamsCls, "getAudioStretchMode", "()I");
+ playbackRate.mStretchMode = static_cast<AudioTimestretchStretchMode>(
+ env->CallIntMethod(jPlaybackParamsObj, jGetAudioStretchMode));
+
+ jmethodID jGetPitch = env->GetMethodID(jPlaybackParamsCls, "getPitch", "()F");
+ playbackRate.mPitch = env->CallFloatMethod(jPlaybackParamsObj, jGetPitch);
+
+ jmethodID jGetSpeed = env->GetMethodID(jPlaybackParamsCls, "getSpeed", "()F");
+ playbackRate.mSpeed = env->CallFloatMethod(jPlaybackParamsObj, jGetSpeed);
+
+ return playbackRate;
+}
+
+media::VolumeShaper::Status JAudioTrack::applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) {
+
+ jobject jConfigurationObj = createVolumeShaperConfigurationObj(configuration);
+ jobject jOperationObj = createVolumeShaperOperationObj(operation);
+
+ if (jConfigurationObj == NULL || jOperationObj == NULL) {
+ return media::VolumeShaper::Status(BAD_VALUE);
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
+ "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
+ jobject jVolumeShaperObj = env->CallObjectMethod(
+ mAudioTrackObj, jCreateVolumeShaper, jConfigurationObj);
+
+ jclass jVolumeShaperCls = env->FindClass("android/media/VolumeShaper");
+ jmethodID jApply = env->GetMethodID(jVolumeShaperCls, "apply",
+ "(Landroid/media/VolumeShaper$Operation;)V");
+ env->CallVoidMethod(jVolumeShaperObj, jApply, jOperationObj);
+
+ return media::VolumeShaper::Status(NO_ERROR);
+}
+
+status_t JAudioTrack::setAuxEffectSendLevel(float level) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
+ mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::attachAuxEffect(int effectId) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::setVolume(float left, float right) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ // TODO: Java setStereoVolume is deprecated. Do we really need this method?
+ jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::setVolume(float volume) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::start() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
+ // TODO: Should we catch the Java IllegalStateException from play()?
+ env->CallVoidMethod(mAudioTrackObj, jPlay);
+ return NO_ERROR;
+}
+
+ssize_t JAudioTrack::write(const void* buffer, size_t size, bool blocking) {
+ if (buffer == NULL) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jbyteArray jAudioData = env->NewByteArray(size);
+ env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
+
+ jclass jByteBufferCls = env->FindClass("java/nio/ByteBuffer");
+ jmethodID jWrap = env->GetStaticMethodID(jByteBufferCls, "wrap", "([B)Ljava/nio/ByteBuffer;");
+ jobject jByteBufferObj = env->CallStaticObjectMethod(jByteBufferCls, jWrap, jAudioData);
+
+ int writeMode = 0;
+ if (blocking) {
+ jfieldID jWriteBlocking = env->GetStaticFieldID(mAudioTrackCls, "WRITE_BLOCKING", "I");
+ writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteBlocking);
+ } else {
+ jfieldID jWriteNonBlocking = env->GetStaticFieldID(
+ mAudioTrackCls, "WRITE_NON_BLOCKING", "I");
+ writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteNonBlocking);
+ }
+
+ jmethodID jWrite = env->GetMethodID(mAudioTrackCls, "write", "(Ljava/nio/ByteBuffer;II)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jWrite, jByteBufferObj, size, writeMode);
+
+ if (result >= 0) {
+ return result;
+ } else {
+ return javaToNativeStatus(result);
+ }
+}
+
+void JAudioTrack::stop() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jStop);
+ // TODO: Should we catch IllegalStateException?
+}
+
+// TODO: Is the right implementation?
+bool JAudioTrack::stopped() const {
+ return !isPlaying();
+}
+
+void JAudioTrack::flush() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jFlush);
+}
+
+void JAudioTrack::pause() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jPause);
+ // TODO: Should we catch IllegalStateException?
+}
+
+bool JAudioTrack::isPlaying() const {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
+ int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
+
+ // TODO: In Java AudioTrack, there is no STOPPING state.
+ // This means while stopping, isPlaying() will return different value in two class.
+ // - in existing native AudioTrack: true
+ // - in JAudioTrack: false
+ // If not okay, also modify the implementation of stopped().
+ jfieldID jPlayStatePlaying = env->GetStaticFieldID(mAudioTrackCls, "PLAYSTATE_PLAYING", "I");
+ int statePlaying = env->GetStaticIntField(mAudioTrackCls, jPlayStatePlaying);
+ return currentPlayState == statePlaying;
+}
+
+uint32_t JAudioTrack::getSampleRate() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
+}
+
+status_t JAudioTrack::getBufferDurationInUs(int64_t *duration) {
+ if (duration == nullptr) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetBufferSizeInFrames = env->GetMethodID(
+ mAudioTrackCls, "getBufferSizeInFrames", "()I");
+ int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
+
+ const double secondToMicro = 1000000LL; // 1E6
+ int sampleRate = JAudioTrack::getSampleRate();
+ float speed = JAudioTrack::getPlaybackRate().mSpeed;
+
+ *duration = (int64_t) (bufferSizeInFrames * secondToMicro / (sampleRate * speed));
+ return NO_ERROR;
+}
+
+audio_format_t JAudioTrack::format() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
+ int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
+ return audioFormatToNative(javaFormat);
+}
+
+jobject JAudioTrack::createVolumeShaperConfigurationObj(
+ const sp<media::VolumeShaper::Configuration>& config) {
+
+ // TODO: Java VolumeShaper's setId() / setOptionFlags() are hidden.
+ if (config == NULL || config->getType() == media::VolumeShaper::Configuration::TYPE_ID) {
+ return NULL;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ // Referenced "android_media_VolumeShaper.h".
+ jfloatArray xarray = nullptr;
+ jfloatArray yarray = nullptr;
+ if (config->getType() == media::VolumeShaper::Configuration::TYPE_SCALE) {
+ // convert curve arrays
+ xarray = env->NewFloatArray(config->size());
+ yarray = env->NewFloatArray(config->size());
+ float * const x = env->GetFloatArrayElements(xarray, nullptr /* isCopy */);
+ float * const y = env->GetFloatArrayElements(yarray, nullptr /* isCopy */);
+ float *xptr = x, *yptr = y;
+ for (const auto &pt : *config.get()) {
+ *xptr++ = pt.first;
+ *yptr++ = pt.second;
+ }
+ env->ReleaseFloatArrayElements(xarray, x, 0 /* mode */);
+ env->ReleaseFloatArrayElements(yarray, y, 0 /* mode */);
+ }
+
+ jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Configuration$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ jmethodID jSetDuration = env->GetMethodID(jBuilderCls, "setDuration",
+ "(L)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetDuration, (jlong) config->getDurationMs());
+
+ jmethodID jSetInterpolatorType = env->GetMethodID(jBuilderCls, "setInterpolatorType",
+ "(I)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetInterpolatorType,
+ config->getInterpolatorType());
+
+ jmethodID jSetCurve = env->GetMethodID(jBuilderCls, "setCurve",
+ "([F[F)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetCurve, xarray, yarray);
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/VolumeShaper$Configuration;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+}
+
+jobject JAudioTrack::createVolumeShaperOperationObj(
+ const sp<media::VolumeShaper::Operation>& operation) {
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ // Set XOffset
+ jmethodID jSetXOffset = env->GetMethodID(jBuilderCls, "setXOffset",
+ "(F)Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetXOffset, operation->getXOffset());
+
+ int32_t flags = operation->getFlags();
+
+ if (operation->getReplaceId() >= 0) {
+ jmethodID jReplace = env->GetMethodID(jBuilderCls, "replace",
+ "(IB)Landroid/media/VolumeShaper$Operation$Builder;");
+ bool join = (flags | media::VolumeShaper::Operation::FLAG_JOIN) != 0;
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jReplace, operation->getReplaceId(), join);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_REVERSE) {
+ jmethodID jReverse = env->GetMethodID(jBuilderCls, "reverse",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jReverse);
+ }
+
+ // TODO: VolumeShaper Javadoc says "Do not call terminate() directly". Can we call this?
+ if (flags | media::VolumeShaper::Operation::FLAG_TERMINATE) {
+ jmethodID jTerminate = env->GetMethodID(jBuilderCls, "terminate",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jTerminate);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_DELAY) {
+ jmethodID jDefer = env->GetMethodID(jBuilderCls, "defer",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jDefer);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_CREATE_IF_NECESSARY) {
+ jmethodID jCreateIfNeeded = env->GetMethodID(jBuilderCls, "createIfNeeded",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jCreateIfNeeded);
+ }
+
+ // TODO: Handle error case (can it be NULL?)
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/VolumeShaper$Operation;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+}
+
+status_t JAudioTrack::javaToNativeStatus(int javaStatus) {
+ switch (javaStatus) {
+ case AUDIO_JAVA_SUCCESS:
+ return NO_ERROR;
+ case AUDIO_JAVA_BAD_VALUE:
+ return BAD_VALUE;
+ case AUDIO_JAVA_INVALID_OPERATION:
+ return INVALID_OPERATION;
+ case AUDIO_JAVA_PERMISSION_DENIED:
+ return PERMISSION_DENIED;
+ case AUDIO_JAVA_NO_INIT:
+ return NO_INIT;
+ case AUDIO_JAVA_WOULD_BLOCK:
+ return WOULD_BLOCK;
+ case AUDIO_JAVA_DEAD_OBJECT:
+ return DEAD_OBJECT;
+ default:
+ return UNKNOWN_ERROR;
+ }
+}
+
+} // namespace android
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index e6c8f9c..9b06047 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -277,6 +277,16 @@
TERMINATOR
};
+template<>
+const AudioContentTypeConverter::Table AudioContentTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_UNKNOWN),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_SPEECH),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_MUSIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_MOVIE),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_SONIFICATION),
+ TERMINATOR
+};
+
template <>
const UsageTypeConverter::Table UsageTypeConverter::mTable[] = {
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_UNKNOWN),
diff --git a/media/libmedia/include/media/JAudioAttributes.h b/media/libmedia/include/media/JAudioAttributes.h
new file mode 100644
index 0000000..fb11435
--- /dev/null
+++ b/media/libmedia/include/media/JAudioAttributes.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOATTRIBUTES_H
+#define ANDROID_JAUDIOATTRIBUTES_H
+
+#include <jni.h>
+#include <system/audio.h>
+
+namespace android {
+
+class JAudioAttributes {
+public:
+ /* Creates a Java AudioAttributes object. */
+ static jobject createAudioAttributesObj(JNIEnv *env,
+ const audio_attributes_t* pAttributes,
+ audio_stream_type_t streamType) {
+
+ jclass jBuilderCls = env->FindClass("android/media/AudioAttributes$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ if (pAttributes != NULL) {
+ // If pAttributes is not null, streamType is ignored.
+ jmethodID jSetUsage = env->GetMethodID(
+ jBuilderCls, "setUsage", "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetUsage, pAttributes->usage);
+
+ jmethodID jSetContentType = env->GetMethodID(jBuilderCls, "setContentType",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetContentType,
+ pAttributes->content_type);
+
+ // TODO: Java AudioAttributes.Builder.setCapturePreset() is systemApi and hidden.
+ // Can we use this method?
+// jmethodID jSetCapturePreset = env->GetMethodID(jBuilderCls, "setCapturePreset",
+// "(I)Landroid/media/AudioAttributes$Builder;");
+// jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetCapturePreset,
+// pAttributes->source);
+
+ jmethodID jSetFlags = env->GetMethodID(jBuilderCls, "setFlags",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetFlags, pAttributes->flags);
+
+ // TODO: Handle the 'tags' (char[] to HashSet<String>).
+ // How to parse the char[]? Is there any example of it?
+ // Also, the addTags() method is hidden.
+ } else {
+ // Call AudioAttributes.Builder.setLegacyStreamType().build()
+ jmethodID jSetLegacyStreamType = env->GetMethodID(jBuilderCls, "setLegacyStreamType",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetLegacyStreamType, streamType);
+ }
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/AudioAttributes;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+ }
+
+};
+
+} // namespace android
+
+#endif // ANDROID_JAUDIOATTRIBUTES_H
diff --git a/media/libmedia/include/media/JAudioFormat.h b/media/libmedia/include/media/JAudioFormat.h
new file mode 100644
index 0000000..00abdff
--- /dev/null
+++ b/media/libmedia/include/media/JAudioFormat.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOFORMAT_H
+#define ANDROID_JAUDIOFORMAT_H
+
+#include <android_media_AudioFormat.h>
+#include <jni.h>
+
+namespace android {
+
+class JAudioFormat {
+public:
+ /* Creates a Java AudioFormat object. */
+ static jobject createAudioFormatObj(JNIEnv *env,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask) {
+
+ jclass jBuilderCls = env->FindClass("android/media/AudioFormat$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ if (sampleRate == 0) {
+ jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
+ jfieldID jSampleRateUnspecified =
+ env->GetStaticFieldID(jAudioFormatCls, "SAMPLE_RATE_UNSPECIFIED", "I");
+ sampleRate = env->GetStaticIntField(jAudioFormatCls, jSampleRateUnspecified);
+ }
+
+ jmethodID jSetEncoding = env->GetMethodID(jBuilderCls, "setEncoding",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetEncoding,
+ audioFormatFromNative(format));
+
+ jmethodID jSetSampleRate = env->GetMethodID(jBuilderCls, "setSampleRate",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetSampleRate, sampleRate);
+
+ jmethodID jSetChannelMask = env->GetMethodID(jBuilderCls, "setChannelMask",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetChannelMask,
+ outChannelMaskFromNative(channelMask));
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioFormat;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+ }
+
+};
+
+} // namespace android
+
+#endif // ANDROID_JAUDIOFORMAT_H
diff --git a/media/libmedia/include/media/JAudioTrack.h b/media/libmedia/include/media/JAudioTrack.h
new file mode 100644
index 0000000..8af30b7
--- /dev/null
+++ b/media/libmedia/include/media/JAudioTrack.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOTRACK_H
+#define ANDROID_JAUDIOTRACK_H
+
+#include <jni.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/VolumeShaper.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+
+#include <media/AudioTimestamp.h> // It has dependency on audio.h/Errors.h, but doesn't
+ // include them in it. Therefore it is included here at last.
+
+namespace android {
+
+class JAudioTrack {
+public:
+
+ /* Creates an JAudioTrack object for non-offload mode.
+ * Once created, the track needs to be started before it can be used.
+ * Unspecified values are set to appropriate default values.
+ *
+ * Parameters:
+ *
+ * streamType: Select the type of audio stream this track is attached to
+ * (e.g. AUDIO_STREAM_MUSIC).
+ * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
+ * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
+ * 0 will not work with current policy implementation for direct output
+ * selection where an exact match is needed for sampling rate.
+ * (TODO: Check direct output after flags can be used in Java AudioTrack.)
+ * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
+ * For direct and offloaded tracks, the possible format(s) depends on the
+ * output sink.
+ * (TODO: How can we check whether a format is supported?)
+ * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
+ * frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the latency of the track.
+ * The actual size selected by the JAudioTrack could be larger if the
+ * requested size is not compatible with current audio HAL configuration.
+ * Zero means to use a default value.
+ * sessionId: Specific session ID, or zero to use default.
+ * pAttributes: If not NULL, supersedes streamType for use case selection.
+ * maxRequiredSpeed: For PCM tracks, this creates an appropriate buffer size that will allow
+ * maxRequiredSpeed playback. Values less than 1.0f and greater than
+ * AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
+ * and direct or offloaded tracks, this parameter is ignored.
+ * (TODO: Handle this after offload / direct track is supported.)
+ *
+ * TODO: Revive removed arguments after offload mode is supported.
+ */
+ JAudioTrack(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ const audio_attributes_t* pAttributes = NULL,
+ float maxRequiredSpeed = 1.0f);
+
+ /*
+ Temporarily removed constructor arguments:
+
+ // Q. Values are in audio-base.h, but where can we find explanation for them?
+ audio_output_flags_t flags,
+
+ // Q. May be used in AudioTrack.setPreferredDevice(AudioDeviceInfo)?
+ audio_port_handle_t selectedDeviceId,
+
+ // Should be deleted, since we don't use Binder anymore.
+ bool doNotReconnect,
+
+ // Do we need UID and PID?
+ uid_t uid,
+ pid_t pid,
+
+ // TODO: Uses these values when Java AudioTrack supports the offload mode.
+ callback_t cbf,
+ void* user,
+ int32_t notificationFrames,
+ const audio_offload_info_t *offloadInfo,
+
+ // Fixed to false, but what is this?
+ threadCanCallJava
+ */
+
+ virtual ~JAudioTrack();
+
+ size_t frameCount();
+ size_t channelCount();
+
+ /* Return the total number of frames played since playback start.
+ * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+ * It is reset to zero by flush(), reload(), and stop().
+ *
+ * Parameters:
+ *
+ * position: Address where to return play head position.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: position is NULL
+ */
+ status_t getPosition(uint32_t *position);
+
+ // TODO: Does this comment apply same to Java AudioTrack::getTimestamp?
+ // Changed the return type from status_t to bool, since Java AudioTrack::getTimestamp returns
+ // boolean. Will Java getTimestampWithStatus() be public?
+ /* Poll for a timestamp on demand.
+ * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
+ * or if you need to get the most recent timestamp outside of the event callback handler.
+ * Caution: calling this method too often may be inefficient;
+ * if you need a high resolution mapping between frame position and presentation time,
+ * consider implementing that at application level, based on the low resolution timestamps.
+ * Returns true if timestamp is valid.
+ * The timestamp parameter is undefined on return, if false is returned.
+ */
+ bool getTimeStamp(AudioTimestamp& timestamp);
+
+ /* Set source playback rate for timestretch
+ * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+ * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+ *
+ * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+ * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+ *
+ * Speed increases the playback rate of media, but does not alter pitch.
+ * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+ */
+ status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+ /* Return current playback rate */
+ const AudioPlaybackRate getPlaybackRate();
+
+ /* Sets the volume shaper object */
+ media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
+
+ /* Set the send level for this track. An auxiliary effect should be attached
+ * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
+ */
+ status_t setAuxEffectSendLevel(float level);
+
+ /* Attach track auxiliary output to specified effect. Use effectId = 0
+ * to detach track from effect.
+ *
+ * Parameters:
+ *
+ * effectId: effectId obtained from AudioEffect::id().
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: The effect is not an auxiliary effect.
+ * - BAD_VALUE: The specified effect ID is invalid.
+ */
+ status_t attachAuxEffect(int effectId);
+
+ /* Set volume for this track, mostly used for games' sound effects
+ * left and right volumes. Levels must be >= 0.0 and <= 1.0.
+ * This is the older API. New applications should use setVolume(float) when possible.
+ */
+ status_t setVolume(float left, float right);
+
+ /* Set volume for all channels. This is the preferred API for new applications,
+ * especially for multi-channel content.
+ */
+ status_t setVolume(float volume);
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::play()?
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ * If the track was previously paused, volume is ramped up over the first mix buffer.
+ */
+ status_t start();
+
+ // TODO: Does this comment still applies? It seems not. (obtainBuffer, AudioFlinger, ...)
+ /* As a convenience we provide a write() interface to the audio buffer.
+ * Input parameter 'size' is in byte units.
+ * This is implemented on top of obtainBuffer/releaseBuffer. For best
+ * performance use callbacks. Returns actual number of bytes written >= 0,
+ * or one of the following negative status codes:
+ * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
+ * BAD_VALUE size is invalid
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioTrack was stopped during the write
+ * DEAD_OBJECT when AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offload tracks
+ * or if mDoNotReconnect is true.
+ * or any other error code returned by IAudioTrack::start() or restoreTrack_l().
+ * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+ * false for the method to return immediately without waiting to try multiple times to write
+ * the full content of the buffer.
+ */
+ ssize_t write(const void* buffer, size_t size, bool blocking = true);
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::stop()?
+ /* Stop a track.
+ * In static buffer mode, the track is stopped immediately.
+ * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
+ * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+ * In streaming mode the stop does not occur immediately: any data remaining in the buffer
+ * is first drained, mixed, and output, and only then is the track marked as stopped.
+ */
+ void stop();
+ bool stopped() const;
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::flush()?
+ /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
+ * This has the effect of draining the buffers without mixing or output.
+ * Flush is intended for streaming mode, for example before switching to non-contiguous content.
+ * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
+ */
+ void flush();
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::pause()?
+ // At least we are not using obtainBuffer.
+ /* Pause a track. After pause, the callback will cease being called and
+ * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
+ * and will fill up buffers until the pool is exhausted.
+ * Volume is ramped down over the next mix buffer following the pause request,
+ * and then the track is marked as paused. It can be resumed with ramp up by start().
+ */
+ void pause();
+
+ bool isPlaying() const;
+
+ /* Return current source sample rate in Hz.
+ * If specified as zero in constructor, this will be the sink sample rate.
+ */
+ uint32_t getSampleRate();
+
+ /* Returns the buffer duration in microseconds at current playback rate. */
+ status_t getBufferDurationInUs(int64_t *duration);
+
+ audio_format_t format();
+
+private:
+ jclass mAudioTrackCls;
+ jobject mAudioTrackObj;
+
+ /* Creates a Java VolumeShaper.Configuration object from VolumeShaper::Configuration */
+ jobject createVolumeShaperConfigurationObj(
+ const sp<media::VolumeShaper::Configuration>& config);
+
+ /* Creates a Java VolumeShaper.Operation object from VolumeShaper::Operation */
+ jobject createVolumeShaperOperationObj(
+ const sp<media::VolumeShaper::Operation>& operation);
+
+ status_t javaToNativeStatus(int javaStatus);
+};
+
+}; // namespace android
+
+#endif // ANDROID_JAUDIOTRACK_H
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 84e22b1..86f0d4c 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -80,6 +80,11 @@
typedef audio_mode_t Type;
typedef Vector<Type> Collection;
};
+struct AudioContentTraits
+{
+ typedef audio_content_type_t Type;
+ typedef Vector<Type> Collection;
+};
struct UsageTraits
{
typedef audio_usage_t Type;
@@ -226,6 +231,7 @@
typedef TypeConverter<GainModeTraits> GainModeConverter;
typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+typedef TypeConverter<AudioContentTraits> AudioContentTypeConverter;
typedef TypeConverter<UsageTraits> UsageTypeConverter;
typedef TypeConverter<SourceTraits> SourceTypeConverter;
@@ -240,6 +246,7 @@
template<> const GainModeConverter::Table GainModeConverter::mTable[];
template<> const StreamTypeConverter::Table StreamTypeConverter::mTable[];
template<> const AudioModeConverter::Table AudioModeConverter::mTable[];
+template<> const AudioContentTypeConverter::Table AudioContentTypeConverter::mTable[];
template<> const UsageTypeConverter::Table UsageTypeConverter::mTable[];
template<> const SourceTypeConverter::Table SourceTypeConverter::mTable[];
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index 6b063e8..423dfb8 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -29,8 +29,6 @@
#include <utils/SortedVector.h>
#include <utils/threads.h>
-#include <media/stagefright/foundation/AString.h>
-
#include <binder/IServiceManager.h>
#include <media/IMediaAnalyticsService.h>
#include <media/MediaAnalyticsItem.h>
@@ -205,15 +203,11 @@
return mUid;
}
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgName(AString pkgName) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgName(const std::string &pkgName) {
mPkgName = pkgName;
return *this;
}
-AString MediaAnalyticsItem::getPkgName() const {
- return mPkgName;
-}
-
MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int64_t pkgVersionCode) {
mPkgVersionCode = pkgVersionCode;
return *this;
@@ -727,11 +721,11 @@
}
-AString MediaAnalyticsItem::toString() {
+std::string MediaAnalyticsItem::toString() {
return toString(-1);
}
-AString MediaAnalyticsItem::toString(int version) {
+std::string MediaAnalyticsItem::toString(int version) {
// v0 : released with 'o'
// v1 : bug fix (missing pid/finalized separator),
@@ -744,7 +738,7 @@
version = PROTO_LAST;
}
- AString result;
+ std::string result;
char buffer[512];
if (version == PROTO_V0) {
@@ -841,7 +835,7 @@
bool MediaAnalyticsItem::selfrecord(bool forcenew) {
if (DEBUG_API) {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGD("selfrecord of: %s [forcenew=%d]", p.c_str(), forcenew);
}
@@ -850,13 +844,13 @@
if (svc != NULL) {
MediaAnalyticsItem::SessionID_t newid = svc->submit(this, forcenew);
if (newid == SessionIDInvalid) {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGW("Failed to record: %s [forcenew=%d]", p.c_str(), forcenew);
return false;
}
return true;
} else {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGW("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
return false;
}
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index ec9b660..79ff093 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -18,6 +18,7 @@
#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
#include <cutils/properties.h>
+#include <string>
#include <sys/types.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
@@ -25,13 +26,10 @@
#include <utils/StrongPointer.h>
#include <utils/Timers.h>
-#include <media/stagefright/foundation/AString.h>
-
namespace android {
-
-
class IMediaAnalyticsService;
+class Parcel;
// the class interface
//
@@ -66,7 +64,7 @@
// values can be "component/component"
// basic values: "video", "audio", "drm"
// XXX: need to better define the format
- typedef AString Key;
+ typedef std::string Key;
static const Key kKeyNone; // ""
static const Key kKeyAny; // "*"
@@ -170,8 +168,8 @@
MediaAnalyticsItem &setUid(uid_t);
uid_t getUid() const;
- MediaAnalyticsItem &setPkgName(AString);
- AString getPkgName() const;
+ MediaAnalyticsItem &setPkgName(const std::string &pkgName);
+ std::string getPkgName() const { return mPkgName; }
MediaAnalyticsItem &setPkgVersionCode(int64_t);
int64_t getPkgVersionCode() const;
@@ -180,8 +178,8 @@
int32_t writeToParcel(Parcel *);
int32_t readFromParcel(const Parcel&);
- AString toString();
- AString toString(int version);
+ std::string toString();
+ std::string toString(int version);
// are we collecting analytics data
static bool isEnabled();
@@ -204,7 +202,7 @@
// to help validate that A doesn't mess with B's records
pid_t mPid;
uid_t mUid;
- AString mPkgName;
+ std::string mPkgName;
int64_t mPkgVersionCode;
// let's reuse a binder connection
diff --git a/media/libnblog/NBLog.cpp b/media/libnblog/NBLog.cpp
index c8c7195..d6fa3e3 100644
--- a/media/libnblog/NBLog.cpp
+++ b/media/libnblog/NBLog.cpp
@@ -259,7 +259,8 @@
*(int*) (buffer + sizeof(entry) + sizeof(HistTsEntry)) = author;
// Update lengths
buffer[offsetof(entry, length)] = sizeof(HistTsEntryWithAuthor);
- buffer[sizeof(buffer) + Entry::kPreviousLengthOffset] = sizeof(HistTsEntryWithAuthor);
+ buffer[offsetof(entry, data) + sizeof(HistTsEntryWithAuthor) + offsetof(ending, length)]
+ = sizeof(HistTsEntryWithAuthor);
// Write new buffer into FIFO
dst->write(buffer, sizeof(buffer));
return EntryIterator(mEntry).next();
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 95e3721..e16db00 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -118,9 +118,6 @@
"android.hardware.media.omx@1.0",
"android.hardware.graphics.allocator@2.0",
"android.hardware.graphics.mapper@2.0",
-
- // XXX: hack
- "libstagefright_soft_c2avcdec",
],
static_libs: [
@@ -136,7 +133,6 @@
"libstagefright_id3",
"libFLAC",
- // XXX: hack
"libstagefright_codec2_vndk",
],
diff --git a/media/libstagefright/CCodec.cpp b/media/libstagefright/CCodec.cpp
index 068ca5f..0103abd 100644
--- a/media/libstagefright/CCodec.cpp
+++ b/media/libstagefright/CCodec.cpp
@@ -18,11 +18,10 @@
#define LOG_TAG "CCodec"
#include <utils/Log.h>
-// XXX: HACK
-#include "codecs/avcdec/C2SoftAvcDec.h"
-
#include <thread>
+#include <C2PlatformSupport.h>
+
#include <gui/Surface.h>
#include <media/stagefright/CCodec.h>
@@ -181,8 +180,18 @@
// TODO: use C2ComponentStore to create component
mListener.reset(new CCodecListener(mChannel));
- std::shared_ptr<C2Component> comp(new C2SoftAvcDec(componentName.c_str(), 0));
- comp->setListener_vb(mListener, C2_DONT_BLOCK);
+ std::shared_ptr<C2Component> comp;
+ c2_status_t err = GetCodec2PlatformComponentStore()->createComponent(
+ componentName.c_str(), &comp);
+ if (err != C2_OK) {
+ Mutexed<State>::Locked state(mState);
+ state->mState = RELEASED;
+ state.unlock();
+ mCallback->onError(err, ACTION_CODE_FATAL);
+ state.lock();
+ return;
+ }
+ comp->setListener_vb(mListener, C2_MAY_BLOCK);
{
Mutexed<State>::Locked state(mState);
if (state->mState != ALLOCATING) {
@@ -233,6 +242,26 @@
setSurface(surface);
}
+ // XXX: hack
+ bool audio = mime.startsWithIgnoreCase("audio/");
+ if (encoder) {
+ outputFormat->setString("mime", mime);
+ inputFormat->setString("mime", AStringPrintf("%s/raw", audio ? "audio" : "video"));
+ if (audio) {
+ inputFormat->setInt32("channel-count", 1);
+ inputFormat->setInt32("sample-rate", 44100);
+ outputFormat->setInt32("channel-count", 1);
+ outputFormat->setInt32("sample-rate", 44100);
+ }
+ } else {
+ inputFormat->setString("mime", mime);
+ outputFormat->setString("mime", AStringPrintf("%s/raw", audio ? "audio" : "video"));
+ if (audio) {
+ outputFormat->setInt32("channel-count", 2);
+ outputFormat->setInt32("sample-rate", 44100);
+ }
+ }
+
// TODO
return OK;
diff --git a/media/libstagefright/CCodecBufferChannel.cpp b/media/libstagefright/CCodecBufferChannel.cpp
index 61f3f3c..eea9c78 100644
--- a/media/libstagefright/CCodecBufferChannel.cpp
+++ b/media/libstagefright/CCodecBufferChannel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2016, The Android Open Source Project
+ * Copyright 2017, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
#include <numeric>
#include <thread>
+#include <C2AllocatorGralloc.h>
#include <C2PlatformSupport.h>
#include <android/hardware/cas/native/1.0/IDescrambler.h>
@@ -47,18 +48,11 @@
using namespace hardware::cas::V1_0;
using namespace hardware::cas::native::V1_0;
+namespace {
+
// TODO: get this info from component
const static size_t kMinBufferArraySize = 16;
-void CCodecBufferChannel::OutputBuffers::flush(
- const std::list<std::unique_ptr<C2Work>> &flushedWork) {
- (void) flushedWork;
- // This is no-op by default unless we're in array mode where we need to keep
- // track of the flushed work.
-}
-
-namespace {
-
template <class T>
ssize_t findBufferSlot(
std::vector<T> *buffers,
@@ -76,16 +70,103 @@
return std::distance(buffers->begin(), it);
}
+sp<Codec2Buffer> allocateLinearBuffer(
+ const std::shared_ptr<C2BlockPool> &pool,
+ const sp<AMessage> &format,
+ size_t size,
+ const C2MemoryUsage &usage) {
+ std::shared_ptr<C2LinearBlock> block;
+
+ status_t err = pool->fetchLinearBlock(
+ size,
+ usage,
+ &block);
+ if (err != OK) {
+ return nullptr;
+ }
+
+ return Codec2Buffer::allocate(format, block);
+}
+
class LinearBuffer : public C2Buffer {
public:
explicit LinearBuffer(C2ConstLinearBlock block) : C2Buffer({ block }) {}
};
+class InputBuffersArray : public CCodecBufferChannel::InputBuffers {
+public:
+ InputBuffersArray() = default;
+
+ void add(
+ size_t index,
+ const sp<MediaCodecBuffer> &clientBuffer,
+ const std::shared_ptr<C2Buffer> &compBuffer,
+ bool available) {
+ if (mBufferArray.size() < index) {
+ mBufferArray.resize(index + 1);
+ }
+ mBufferArray[index].clientBuffer = clientBuffer;
+ mBufferArray[index].compBuffer = compBuffer;
+ mBufferArray[index].available = available;
+ }
+
+ bool isArrayMode() final { return true; }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode() final {
+ return nullptr;
+ }
+
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) final {
+ array->clear();
+ for (const auto &entry : mBufferArray) {
+ array->push(entry.clientBuffer);
+ }
+ }
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ if (mBufferArray[i].available) {
+ mBufferArray[i].available = false;
+ *index = i;
+ *buffer = mBufferArray[i].clientBuffer;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ if (!mBufferArray[i].available && mBufferArray[i].clientBuffer == buffer) {
+ mBufferArray[i].available = true;
+ return std::move(mBufferArray[i].compBuffer);
+ }
+ }
+ return nullptr;
+ }
+
+ void flush() override {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ mBufferArray[i].available = true;
+ mBufferArray[i].compBuffer.reset();
+ }
+ }
+
+private:
+ struct Entry {
+ sp<MediaCodecBuffer> clientBuffer;
+ std::shared_ptr<C2Buffer> compBuffer;
+ bool available;
+ };
+
+ std::vector<Entry> mBufferArray;
+};
+
class LinearInputBuffers : public CCodecBufferChannel::InputBuffers {
public:
using CCodecBufferChannel::InputBuffers::InputBuffers;
- virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
*buffer = nullptr;
ssize_t ret = findBufferSlot<wp<Codec2Buffer>>(
&mBuffers, kMinBufferArraySize,
@@ -93,25 +174,20 @@
if (ret < 0) {
return false;
}
- std::shared_ptr<C2LinearBlock> block;
-
- status_t err = mAlloc->fetchLinearBlock(
- // TODO: proper max input size
- 65536,
- { 0, C2MemoryUsage::CPU_WRITE },
- &block);
- if (err != OK) {
+ // TODO: proper max input size and usage
+ // TODO: read usage from intf
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ sp<Codec2Buffer> newBuffer = allocateLinearBuffer(mPool, mFormat, 65536, usage);
+ if (newBuffer == nullptr) {
return false;
}
-
- sp<Codec2Buffer> newBuffer = Codec2Buffer::allocate(mFormat, block);
mBuffers[ret] = newBuffer;
*index = ret;
*buffer = newBuffer;
return true;
}
- virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
auto it = std::find(mBuffers.begin(), mBuffers.end(), buffer);
if (it == mBuffers.end()) {
return nullptr;
@@ -122,80 +198,358 @@
return std::make_shared<LinearBuffer>(codecBuffer->share());
}
- virtual void flush() override {
+ void flush() override {
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode() final {
+ std::unique_ptr<InputBuffersArray> array(new InputBuffersArray);
+ // TODO
+ const size_t size = std::max(kMinBufferArraySize, mBuffers.size());
+ for (size_t i = 0; i < size; ++i) {
+ sp<Codec2Buffer> clientBuffer = mBuffers[i].promote();
+ bool available = false;
+ if (clientBuffer == nullptr) {
+ // TODO: proper max input size
+ // TODO: read usage from intf
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ clientBuffer = allocateLinearBuffer(mPool, mFormat, 65536, usage);
+ available = true;
+ }
+ array->add(
+ i,
+ clientBuffer,
+ std::make_shared<LinearBuffer>(clientBuffer->share()),
+ available);
+ }
+ return std::move(array);
}
private:
// Buffers we passed to the client. The index of a buffer matches what
// was passed in BufferCallback::onInputBufferAvailable().
std::vector<wp<Codec2Buffer>> mBuffers;
-
- // Buffer array we passed to the client. This only gets initialized at
- // getInput/OutputBufferArray() and when this is set we can't add more
- // buffers.
- std::vector<sp<Codec2Buffer>> mBufferArray;
};
-class GraphicOutputBuffers : public CCodecBufferChannel::OutputBuffers {
+// TODO: stub
+class GraphicInputBuffers : public CCodecBufferChannel::InputBuffers {
+public:
+ using CCodecBufferChannel::InputBuffers::InputBuffers;
+
+ bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
+ (void)index;
+ (void)buffer;
+ return false;
+ }
+
+ std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ (void)buffer;
+ return nullptr;
+ }
+
+ void flush() override {
+ }
+
+ std::unique_ptr<CCodecBufferChannel::InputBuffers> toArrayMode() final {
+ return nullptr;
+ }
+};
+
+class OutputBuffersArray : public CCodecBufferChannel::OutputBuffers {
public:
using CCodecBufferChannel::OutputBuffers::OutputBuffers;
- virtual bool registerBuffer(
+ void add(
+ size_t index,
+ const sp<MediaCodecBuffer> &clientBuffer,
+ const std::shared_ptr<C2Buffer> &compBuffer,
+ bool available) {
+ if (mBufferArray.size() < index) {
+ mBufferArray.resize(index + 1);
+ }
+ mBufferArray[index].clientBuffer = clientBuffer;
+ mBufferArray[index].compBuffer = compBuffer;
+ mBufferArray[index].available = available;
+ }
+
+ bool isArrayMode() final { return true; }
+
+ std::unique_ptr<CCodecBufferChannel::OutputBuffers> toArrayMode() final {
+ return nullptr;
+ }
+
+ bool registerBuffer(
+ const std::shared_ptr<C2Buffer> &buffer,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) final {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ if (mBufferArray[i].available && copy(buffer, mBufferArray[i].clientBuffer)) {
+ *index = i;
+ *codecBuffer = mBufferArray[i].clientBuffer;
+ mBufferArray[i].compBuffer = buffer;
+ mBufferArray[i].available = false;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool registerCsd(
+ const C2StreamCsdInfo::output *csd,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) final {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ if (mBufferArray[i].available
+ && mBufferArray[i].clientBuffer->capacity() <= csd->flexCount()) {
+ memcpy(mBufferArray[i].clientBuffer->base(), csd->m.value, csd->flexCount());
+ *index = i;
+ *codecBuffer = mBufferArray[i].clientBuffer;
+ mBufferArray[i].available = false;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) final {
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ if (!mBufferArray[i].available && mBufferArray[i].clientBuffer == buffer) {
+ mBufferArray[i].available = true;
+ return std::move(mBufferArray[i].compBuffer);
+ }
+ }
+ return nullptr;
+ }
+
+ void flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
+ (void) flushedWork;
+ for (size_t i = 0; i < mBufferArray.size(); ++i) {
+ mBufferArray[i].available = true;
+ mBufferArray[i].compBuffer.reset();
+ }
+ }
+
+ virtual bool copy(
+ const std::shared_ptr<C2Buffer> &buffer,
+ const sp<MediaCodecBuffer> &clientBuffer) = 0;
+
+ void getArray(Vector<sp<MediaCodecBuffer>> *array) final {
+ array->clear();
+ for (const auto &entry : mBufferArray) {
+ array->push(entry.clientBuffer);
+ }
+ }
+
+private:
+ struct Entry {
+ sp<MediaCodecBuffer> clientBuffer;
+ std::shared_ptr<C2Buffer> compBuffer;
+ bool available;
+ };
+
+ std::vector<Entry> mBufferArray;
+};
+
+class LinearOutputBuffersArray : public OutputBuffersArray {
+public:
+ using OutputBuffersArray::OutputBuffersArray;
+
+ bool copy(
+ const std::shared_ptr<C2Buffer> &buffer,
+ const sp<MediaCodecBuffer> &clientBuffer) final {
+ if (!buffer) {
+ clientBuffer->setRange(0u, 0u);
+ return true;
+ }
+ C2ReadView view = buffer->data().linearBlocks().front().map().get();
+ if (clientBuffer->capacity() < view.capacity()) {
+ return false;
+ }
+ clientBuffer->setRange(0u, view.capacity());
+ memcpy(clientBuffer->data(), view.data(), view.capacity());
+ return true;
+ }
+};
+
+class GraphicOutputBuffersArray : public OutputBuffersArray {
+public:
+ using OutputBuffersArray::OutputBuffersArray;
+
+ bool copy(
+ const std::shared_ptr<C2Buffer> &buffer,
+ const sp<MediaCodecBuffer> &clientBuffer) final {
+ if (!buffer) {
+ clientBuffer->setRange(0u, 0u);
+ return true;
+ }
+ clientBuffer->setRange(0u, 1u);
+ return true;
+ }
+};
+
+// Flexible in a sense that it does not have fixed array size.
+class FlexOutputBuffers : public CCodecBufferChannel::OutputBuffers {
+public:
+ using CCodecBufferChannel::OutputBuffers::OutputBuffers;
+
+ bool registerBuffer(
const std::shared_ptr<C2Buffer> &buffer,
size_t *index,
sp<MediaCodecBuffer> *codecBuffer) override {
*codecBuffer = nullptr;
ssize_t ret = findBufferSlot<BufferInfo>(
&mBuffers,
- kMinBufferArraySize,
- [] (const auto &elem) { return elem.mClientBuffer.promote() == nullptr; });
+ std::numeric_limits<size_t>::max(),
+ [] (const auto &elem) { return elem.clientBuffer.promote() == nullptr; });
if (ret < 0) {
return false;
}
sp<MediaCodecBuffer> newBuffer = new MediaCodecBuffer(
mFormat,
- buffer == nullptr ? kEmptyBuffer : kDummyBuffer);
+ convert(buffer));
mBuffers[ret] = { newBuffer, buffer };
*index = ret;
*codecBuffer = newBuffer;
return true;
}
- virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) override {
+ bool registerCsd(
+ const C2StreamCsdInfo::output *csd,
+ size_t *index,
+ sp<MediaCodecBuffer> *codecBuffer) final {
+ *codecBuffer = nullptr;
+ ssize_t ret = findBufferSlot<BufferInfo>(
+ &mBuffers,
+ std::numeric_limits<size_t>::max(),
+ [] (const auto &elem) { return elem.clientBuffer.promote() == nullptr; });
+ if (ret < 0) {
+ return false;
+ }
+ sp<MediaCodecBuffer> newBuffer = new MediaCodecBuffer(
+ mFormat,
+ ABuffer::CreateAsCopy(csd->m.value, csd->flexCount()));
+ mBuffers[ret] = { newBuffer, nullptr };
+ *index = ret;
+ *codecBuffer = newBuffer;
+ return true;
+ }
+
+ std::shared_ptr<C2Buffer> releaseBuffer(
+ const sp<MediaCodecBuffer> &buffer) override {
auto it = std::find_if(
mBuffers.begin(), mBuffers.end(),
[buffer] (const auto &elem) {
- return elem.mClientBuffer.promote() == buffer;
+ return elem.clientBuffer.promote() == buffer;
});
if (it == mBuffers.end()) {
return nullptr;
}
- return it->mBufferRef;
+ return std::move(it->bufferRef);
}
-private:
- static const sp<ABuffer> kEmptyBuffer;
- static const sp<ABuffer> kDummyBuffer;
+ void flush(
+ const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
+ (void) flushedWork;
+ // This is no-op by default unless we're in array mode where we need to keep
+ // track of the flushed work.
+ }
+ virtual sp<ABuffer> convert(const std::shared_ptr<C2Buffer> &buffer) = 0;
+
+protected:
struct BufferInfo {
// wp<> of MediaCodecBuffer for MediaCodec.
- wp<MediaCodecBuffer> mClientBuffer;
- // Buffer reference to hold until mClientBuffer is valid.
- std::shared_ptr<C2Buffer> mBufferRef;
+ wp<MediaCodecBuffer> clientBuffer;
+ // Buffer reference to hold until clientBuffer is valid.
+ std::shared_ptr<C2Buffer> bufferRef;
};
// Buffers we passed to the client. The index of a buffer matches what
// was passed in BufferCallback::onInputBufferAvailable().
std::vector<BufferInfo> mBuffers;
};
-const sp<ABuffer> GraphicOutputBuffers::kEmptyBuffer = new ABuffer(nullptr, 0);
-const sp<ABuffer> GraphicOutputBuffers::kDummyBuffer = new ABuffer(nullptr, 1);
+class LinearOutputBuffers : public FlexOutputBuffers {
+public:
+ using FlexOutputBuffers::FlexOutputBuffers;
+
+ virtual sp<ABuffer> convert(const std::shared_ptr<C2Buffer> &buffer) override {
+ if (buffer == nullptr) {
+ return new ABuffer(nullptr, 0);
+ }
+ if (buffer->data().type() != C2BufferData::LINEAR) {
+ // We expect linear output buffers from the component.
+ return nullptr;
+ }
+ if (buffer->data().linearBlocks().size() != 1u) {
+ // We expect one and only one linear block from the component.
+ return nullptr;
+ }
+ C2ReadView view = buffer->data().linearBlocks().front().map().get();
+ if (view.error() != C2_OK) {
+ // Mapping the linear block failed
+ return nullptr;
+ }
+ return new ABuffer(
+ // XXX: the data is supposed to be read-only. We don't have
+ // const equivalent of ABuffer however...
+ const_cast<uint8_t *>(view.data()),
+ view.capacity());
+ }
+
+ std::unique_ptr<CCodecBufferChannel::OutputBuffers> toArrayMode() override {
+ std::unique_ptr<OutputBuffersArray> array(new LinearOutputBuffersArray);
+
+ const size_t size = std::max(kMinBufferArraySize, mBuffers.size());
+ for (size_t i = 0; i < size; ++i) {
+ sp<MediaCodecBuffer> clientBuffer = mBuffers[i].clientBuffer.promote();
+ std::shared_ptr<C2Buffer> compBuffer = mBuffers[i].bufferRef;
+ bool available = false;
+ if (clientBuffer == nullptr) {
+ // TODO: proper max input size
+ clientBuffer = new MediaCodecBuffer(mFormat, new ABuffer(65536));
+ available = true;
+ compBuffer.reset();
+ }
+ array->add(i, clientBuffer, compBuffer, available);
+ }
+ return std::move(array);
+ }
+};
+
+class GraphicOutputBuffers : public FlexOutputBuffers {
+public:
+ using FlexOutputBuffers::FlexOutputBuffers;
+
+ sp<ABuffer> convert(const std::shared_ptr<C2Buffer> &buffer) override {
+ return buffer ? new ABuffer(nullptr, 1) : new ABuffer(nullptr, 0);
+ }
+
+ std::unique_ptr<CCodecBufferChannel::OutputBuffers> toArrayMode() override {
+ std::unique_ptr<OutputBuffersArray> array(new GraphicOutputBuffersArray);
+
+ const size_t size = std::max(kMinBufferArraySize, mBuffers.size());
+ for (size_t i = 0; i < size; ++i) {
+ sp<MediaCodecBuffer> clientBuffer = mBuffers[i].clientBuffer.promote();
+ std::shared_ptr<C2Buffer> compBuffer = mBuffers[i].bufferRef;
+ bool available = false;
+ if (clientBuffer == nullptr) {
+ clientBuffer = new MediaCodecBuffer(mFormat, new ABuffer(nullptr, 1));
+ available = true;
+ compBuffer.reset();
+ }
+ array->add(i, clientBuffer, compBuffer, available);
+ }
+ return std::move(array);
+ }
+};
} // namespace
CCodecBufferChannel::QueueGuard::QueueGuard(
CCodecBufferChannel::QueueSync &sync) : mSync(sync) {
std::unique_lock<std::mutex> l(mSync.mMutex);
+ // At this point it's guaranteed that mSync is not under state transition,
+ // as we are holding its mutex.
if (mSync.mCount == -1) {
mRunning = false;
} else {
@@ -206,6 +560,8 @@
CCodecBufferChannel::QueueGuard::~QueueGuard() {
if (mRunning) {
+ // We are not holding mutex at this point so that QueueSync::stop() can
+ // keep holding the lock until mCount reaches zero.
--mSync.mCount;
}
}
@@ -214,7 +570,7 @@
std::unique_lock<std::mutex> l(mMutex);
// If stopped, it goes to running state; otherwise no-op.
int32_t expected = -1;
- mCount.compare_exchange_strong(expected, 0);
+ (void)mCount.compare_exchange_strong(expected, 0);
}
void CCodecBufferChannel::QueueSync::stop() {
@@ -223,6 +579,11 @@
// no-op
return;
}
+ // Holding mutex here blocks creation of additional QueueGuard objects, so
+ // mCount can only decrement. In other words, threads that acquired the lock
+ // are allowed to finish execution but additional threads trying to acquire
+ // the lock at this point will block, and then get QueueGuard at STOPPED
+ // state.
int32_t expected = 0;
while (!mCount.compare_exchange_weak(expected, -1)) {
std::this_thread::yield();
@@ -232,8 +593,6 @@
CCodecBufferChannel::CCodecBufferChannel(
const std::function<void(status_t, enum ActionCode)> &onError)
: mOnError(onError),
- mInputBuffers(new LinearInputBuffers),
- mOutputBuffers(new GraphicOutputBuffers),
mFrameIndex(0u),
mFirstValidFrameIndex(0u) {
}
@@ -246,12 +605,50 @@
void CCodecBufferChannel::setComponent(const std::shared_ptr<C2Component> &component) {
mComponent = component;
- // TODO: get pool ID from params
- std::shared_ptr<C2BlockPool> pool;
- c2_status_t err = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, component, &pool);
- if (err == C2_OK) {
+ C2StreamFormatConfig::input inputFormat(0u);
+ C2StreamFormatConfig::output outputFormat(0u);
+ c2_status_t err = mComponent->intf()->query_vb(
+ { &inputFormat, &outputFormat },
+ {},
+ C2_DONT_BLOCK,
+ nullptr);
+ if (err != C2_OK) {
+ // TODO: error
+ return;
+ }
+
+ {
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
- (*buffers)->setAlloc(pool);
+
+ bool graphic = (inputFormat.value == C2FormatVideo);
+ if (graphic) {
+ buffers->reset(new GraphicInputBuffers);
+ } else {
+ buffers->reset(new LinearInputBuffers);
+ }
+
+ ALOGV("graphic = %s", graphic ? "true" : "false");
+ std::shared_ptr<C2BlockPool> pool;
+ err = GetCodec2BlockPool(
+ graphic ? C2BlockPool::BASIC_GRAPHIC : C2BlockPool::BASIC_LINEAR,
+ component,
+ &pool);
+ if (err == C2_OK) {
+ (*buffers)->setPool(pool);
+ } else {
+ // TODO: error
+ }
+ }
+
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+
+ bool graphic = (outputFormat.value == C2FormatVideo);
+ if (graphic) {
+ buffers->reset(new GraphicOutputBuffers);
+ } else {
+ buffers->reset(new LinearOutputBuffers);
+ }
}
}
@@ -314,17 +711,6 @@
status_t CCodecBufferChannel::renderOutputBuffer(
const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
ALOGV("renderOutputBuffer");
- sp<MediaCodecBuffer> inBuffer;
- size_t index;
- {
- Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
- if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
- inBuffer = nullptr;
- }
- }
- if (inBuffer != nullptr) {
- mCallback->onInputBufferAvailable(index, inBuffer);
- }
std::shared_ptr<C2Buffer> c2Buffer;
{
@@ -344,8 +730,9 @@
return UNKNOWN_ERROR;
}
+ native_handle_t *grallocHandle = UnwrapNativeCodec2GrallocHandle(blocks.front().handle());
sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(
- blocks.front().handle(),
+ grallocHandle,
GraphicBuffer::CLONE_HANDLE,
blocks.front().width(),
blocks.front().height(),
@@ -355,6 +742,7 @@
(uint64_t)GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
// TODO
blocks.front().width()));
+ native_handle_delete(grallocHandle);
status_t result = (*surface)->attachBuffer(graphicBuffer.get());
if (result != OK) {
@@ -385,85 +773,38 @@
}
status_t CCodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
- ALOGV("discardBuffer");
+ ALOGV("discardBuffer: %p", buffer.get());
{
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
- (void) (*buffers)->releaseBuffer(buffer);
+ (void)(*buffers)->releaseBuffer(buffer);
}
{
Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
- (void) (*buffers)->releaseBuffer(buffer);
+ (void)(*buffers)->releaseBuffer(buffer);
}
return OK;
}
-#if 0
-void fillBufferArray_l(Mutexed<Buffers>::Locked &buffers) {
- for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
- sp<Codec2Buffer> buffer(buffers->mClientBuffer.get(i).promote());
- if (buffer == nullptr) {
- buffer = allocateBuffer_l(buffers->mAlloc);
- }
- buffers->mBufferArray.push_back(buffer);
- }
- while (buffers->mBufferArray.size() < kMinBufferArraySize) {
- sp<Codec2Buffer> buffer = allocateBuffer_l(buffers->mAlloc);
- // allocate buffer
- buffers->mBufferArray.push_back(buffer);
- }
-}
-#endif
-
void CCodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
- (void) array;
- // TODO
-#if 0
array->clear();
- Mutexed<Buffers>::Locked buffers(mInputBuffers);
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
- if (!buffers->isArrayMode()) {
- // mBufferArray is empty.
- fillBufferArray_l(buffers);
+ if (!(*buffers)->isArrayMode()) {
+ *buffers = (*buffers)->toArrayMode();
}
- for (const auto &buffer : buffers->mBufferArray) {
- array->push_back(buffer);
- }
-#endif
+ (*buffers)->getArray(array);
}
void CCodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
- (void) array;
- // TODO
-#if 0
array->clear();
- Mutexed<Buffers>::Locked buffers(mOutputBuffers);
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
- if (!buffers->isArrayMode()) {
- if (linear) {
- // mBufferArray is empty.
- fillBufferArray_l(buffers);
-
- // We need to replace the allocator so that the component only returns
- // buffer from the array.
- ArrayModeAllocator::Builder builder(buffers->mBufferArray);
- for (size_t i = 0; i < buffers->mClientBuffer.size(); ++i) {
- if (buffers->mClientBuffer.get(i).promote() != nullptr) {
- builder.markUsing(i);
- }
- }
- buffers->mAlloc.reset(builder.build());
- } else {
- for (int i = 0; i < X; ++i) {
- buffers->mBufferArray.push_back(dummy buffer);
- }
- }
+ if (!(*buffers)->isArrayMode()) {
+ *buffers = (*buffers)->toArrayMode();
}
- for (const auto &buffer : buffers->mBufferArray) {
- array->push_back(buffer);
- }
-#endif
+ (*buffers)->getArray(array);
}
void CCodecBufferChannel::start(const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
@@ -513,6 +854,19 @@
void CCodecBufferChannel::onWorkDone(std::vector<std::unique_ptr<C2Work>> workItems) {
for (const auto &work : workItems) {
+ sp<MediaCodecBuffer> inBuffer;
+ size_t index;
+ {
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
+ ALOGW("no new buffer available");
+ inBuffer = nullptr;
+ }
+ }
+ if (inBuffer != nullptr) {
+ mCallback->onInputBufferAvailable(index, inBuffer);
+ }
+
if (work->result != OK) {
ALOGE("work failed to complete: %d", work->result);
mOnError(work->result, ACTION_CODE_FATAL);
@@ -539,7 +893,16 @@
}
const std::shared_ptr<C2Buffer> &buffer = worklet->output.buffers[0];
- // TODO: transfer infos() into buffer metadata
+ const C2StreamCsdInfo::output *csdInfo = nullptr;
+ if (buffer) {
+ // TODO: transfer infos() into buffer metadata
+ }
+ for (const auto &info : worklet->output.infos) {
+ if (info->coreIndex() == C2StreamCsdInfo::output::CORE_INDEX) {
+ ALOGV("csd found");
+ csdInfo = static_cast<const C2StreamCsdInfo::output *>(info.get());
+ }
+ }
int32_t flags = 0;
if (worklet->output.flags & C2BufferPack::FLAG_END_OF_STREAM) {
@@ -547,15 +910,43 @@
ALOGV("output EOS");
}
- size_t index;
sp<MediaCodecBuffer> outBuffer;
- Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
- if (!(*buffers)->registerBuffer(buffer, &index, &outBuffer)) {
- ALOGE("unable to register output buffer");
- mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ if (csdInfo != nullptr) {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if ((*buffers)->registerCsd(csdInfo, &index, &outBuffer)) {
+ outBuffer->meta()->setInt64("timeUs", worklet->output.ordinal.timestamp);
+ outBuffer->meta()->setInt32("flags", flags | MediaCodec::BUFFER_FLAG_CODECCONFIG);
+ ALOGV("csd index = %zu", index);
+
+ buffers.unlock();
+ mCallback->onOutputBufferAvailable(index, outBuffer);
+ buffers.lock();
+ } else {
+ ALOGE("unable to register output buffer");
+ buffers.unlock();
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ buffers.lock();
+ continue;
+ }
+ }
+
+ if (!buffer && !flags) {
+ ALOGV("Not reporting output buffer");
continue;
}
+ {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if (!(*buffers)->registerBuffer(buffer, &index, &outBuffer)) {
+ ALOGE("unable to register output buffer");
+
+ buffers.unlock();
+ mOnError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ buffers.lock();
+ continue;
+ }
+ }
+
outBuffer->meta()->setInt64("timeUs", worklet->output.ordinal.timestamp);
outBuffer->meta()->setInt32("flags", flags);
ALOGV("index = %zu", index);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 6ad11f4..7cfa4ce 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -552,7 +552,7 @@
//static
sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, bool nameIsType) {
static bool ccodecEnabled = property_get_bool("debug.stagefright.ccodec", false);
- if (ccodecEnabled && !nameIsType && name.startsWithIgnoreCase("codec2.")) {
+ if (ccodecEnabled && !nameIsType && name.startsWithIgnoreCase("c2.")) {
return new CCodec;
} else if (nameIsType || name.startsWithIgnoreCase("omx.")) {
// at this time only ACodec specifies a mime type.
diff --git a/media/libstagefright/codec2/Android.bp b/media/libstagefright/codec2/Android.bp
index 74609e8..ee5c3eb 100644
--- a/media/libstagefright/codec2/Android.bp
+++ b/media/libstagefright/codec2/Android.bp
@@ -42,27 +42,22 @@
"optional",
],
- srcs: ["SimpleC2Component.cpp"],
+ srcs: [
+ "SimpleC2Component.cpp",
+ "SimpleC2Interface.cpp",
+ ],
include_dirs: [
- "frameworks/av/media/libstagefright/codec2/include",
],
shared_libs: [
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.mapper@2.0",
- "libhidlbase",
- "libion",
"liblog",
"libstagefright_codec2",
+ "libstagefright_codec2_vndk",
"libstagefright_foundation",
"libutils",
],
- static_libs: [
- "libstagefright_codec2_vndk",
- ],
-
sanitize: {
misc_undefined: [
"unsigned-integer-overflow",
diff --git a/media/libstagefright/codec2/SimpleC2Component.cpp b/media/libstagefright/codec2/SimpleC2Component.cpp
index 0e4a354..4d75a31 100644
--- a/media/libstagefright/codec2/SimpleC2Component.cpp
+++ b/media/libstagefright/codec2/SimpleC2Component.cpp
@@ -18,12 +18,39 @@
#define LOG_TAG "SimpleC2Component"
#include <media/stagefright/foundation/ADebug.h>
-#include <C2PlatformSupport.h>
+#include <inttypes.h>
+#include <C2PlatformSupport.h>
#include <SimpleC2Component.h>
namespace android {
+std::unique_ptr<C2Work> SimpleC2Component::WorkQueue::pop_front() {
+ std::unique_ptr<C2Work> work = std::move(mQueue.front().work);
+ mQueue.pop_front();
+ return work;
+}
+
+void SimpleC2Component::WorkQueue::push_back(std::unique_ptr<C2Work> work) {
+ mQueue.push_back({ std::move(work), NO_DRAIN });
+}
+
+bool SimpleC2Component::WorkQueue::empty() const {
+ return mQueue.empty();
+}
+
+void SimpleC2Component::WorkQueue::clear() {
+ mQueue.clear();
+}
+
+uint32_t SimpleC2Component::WorkQueue::drainMode() const {
+ return mQueue.front().drainMode;
+}
+
+void SimpleC2Component::WorkQueue::markDrain(uint32_t drainMode) {
+ mQueue.push_back({ nullptr, drainMode });
+}
+
SimpleC2Component::SimpleC2Component(
const std::shared_ptr<C2ComponentInterface> &intf)
: mIntf(intf) {
@@ -55,7 +82,7 @@
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
while (!items->empty()) {
- queue->mQueue.push_back(std::move(items->front()));
+ queue->push_back(std::move(items->front()));
items->pop_front();
}
queue->mCondition.broadcast();
@@ -79,10 +106,12 @@
}
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
- ++queue->mGeneration;
- while (!queue->mQueue.empty()) {
- flushedWork->push_back(std::move(queue->mQueue.front()));
- queue->mQueue.pop_front();
+ queue->incGeneration();
+ while (!queue->empty()) {
+ std::unique_ptr<C2Work> work = queue->pop_front();
+ if (work) {
+ flushedWork->push_back(std::move(work));
+ }
}
}
{
@@ -96,8 +125,10 @@
return onFlush_sm();
}
-c2_status_t SimpleC2Component::drain_nb(drain_mode_t drainThrough) {
- (void) drainThrough;
+c2_status_t SimpleC2Component::drain_nb(drain_mode_t drainMode) {
+ if (drainMode == DRAIN_CHAIN) {
+ return C2_OMITTED;
+ }
{
Mutexed<ExecState>::Locked state(mExecState);
if (state->mState != RUNNING) {
@@ -106,14 +137,11 @@
}
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
- if (!queue->mQueue.empty()) {
- const std::unique_ptr<C2Work> &work = queue->mQueue.back();
- work->input.flags = (C2BufferPack::flags_t)(work->input.flags | C2BufferPack::FLAG_END_OF_STREAM);
- return C2_OK;
- }
+ queue->markDrain(drainMode);
+ queue->mCondition.broadcast();
}
- return onDrain_nb();
+ return C2_OK;
}
c2_status_t SimpleC2Component::start() {
@@ -161,7 +189,7 @@
}
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
- queue->mQueue.clear();
+ queue->clear();
}
{
Mutexed<PendingWork>::Locked pending(mPendingWork);
@@ -181,7 +209,7 @@
}
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
- queue->mQueue.clear();
+ queue->clear();
}
{
Mutexed<PendingWork>::Locked pending(mPendingWork);
@@ -192,11 +220,13 @@
}
c2_status_t SimpleC2Component::release() {
+ std::thread releasing;
{
Mutexed<ExecState>::Locked state(mExecState);
- mExitRequested = true;
- state->mThread.join();
+ releasing = std::move(state->mThread);
}
+ mExitRequested = true;
+ releasing.join();
onRelease();
return C2_OK;
}
@@ -221,6 +251,7 @@
{
Mutexed<PendingWork>::Locked pending(mPendingWork);
if (pending->count(frameIndex) == 0) {
+ ALOGW("unknown frame index: %" PRIu64, frameIndex);
return;
}
work = std::move(pending->at(frameIndex));
@@ -230,34 +261,56 @@
fillWork(work);
Mutexed<ExecState>::Locked state(mExecState);
state->mListener->onWorkDone_nb(shared_from_this(), vec(work));
+ ALOGV("returning pending work");
}
}
void SimpleC2Component::processQueue() {
std::unique_ptr<C2Work> work;
uint64_t generation;
+ int32_t drainMode;
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
nsecs_t deadline = systemTime() + ms2ns(250);
- while (queue->mQueue.empty()) {
- status_t err = queue.waitForConditionRelative(
- queue->mCondition, std::max(deadline - systemTime(), (nsecs_t)0));
+ while (queue->empty()) {
+ nsecs_t now = systemTime();
+ if (now >= deadline) {
+ return;
+ }
+ status_t err = queue.waitForConditionRelative(queue->mCondition, deadline - now);
if (err == TIMED_OUT) {
return;
}
}
- generation = queue->mGeneration;
- work = std::move(queue->mQueue.front());
- queue->mQueue.pop_front();
- }
- if (!work) {
- return;
+ generation = queue->generation();
+ drainMode = queue->drainMode();
+ work = queue->pop_front();
}
- // TODO: grab pool ID from intf
if (!mOutputBlockPool) {
- c2_status_t err = GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, shared_from_this(), &mOutputBlockPool);
+ c2_status_t err = [this] {
+ // TODO: don't use query_vb
+ C2StreamFormatConfig::output outputFormat(0u);
+ c2_status_t err = intf()->query_vb(
+ { &outputFormat },
+ {},
+ C2_DONT_BLOCK,
+ nullptr);
+ if (err != C2_OK) {
+ return err;
+ }
+ err = GetCodec2BlockPool(
+ (outputFormat.value == C2FormatVideo)
+ ? C2BlockPool::BASIC_GRAPHIC
+ : C2BlockPool::BASIC_LINEAR,
+ shared_from_this(),
+ &mOutputBlockPool);
+ if (err != C2_OK) {
+ return err;
+ }
+ return C2_OK;
+ }();
if (err != C2_OK) {
Mutexed<ExecState>::Locked state(mExecState);
state->mListener->onError_nb(shared_from_this(), err);
@@ -265,10 +318,20 @@
}
}
- bool done = process(work, mOutputBlockPool);
+ if (!work) {
+ c2_status_t err = drain(drainMode, mOutputBlockPool);
+ if (err != C2_OK) {
+ Mutexed<ExecState>::Locked state(mExecState);
+ state->mListener->onError_nb(shared_from_this(), err);
+ }
+ return;
+ }
+
+ process(work, mOutputBlockPool);
{
Mutexed<WorkQueue>::Locked queue(mWorkQueue);
- if (queue->mGeneration != generation) {
+ if (queue->generation() != generation) {
+ ALOGW("work form old generation: was %" PRIu64 " now %" PRIu64, queue->generation(), generation);
work->result = C2_NOT_FOUND;
queue.unlock();
{
@@ -279,10 +342,12 @@
return;
}
}
- if (done) {
+ if (work->worklets_processed != 0u) {
Mutexed<ExecState>::Locked state(mExecState);
+ ALOGV("returning this work");
state->mListener->onWorkDone_nb(shared_from_this(), vec(work));
} else {
+ ALOGV("queue pending work");
std::unique_ptr<C2Work> unexpected;
{
Mutexed<PendingWork>::Locked pending(mPendingWork);
@@ -301,4 +366,45 @@
}
}
+namespace {
+
+class GraphicBuffer : public C2Buffer {
+public:
+ GraphicBuffer(
+ const std::shared_ptr<C2GraphicBlock> &block,
+ const C2Rect &crop)
+ : C2Buffer({ block->share(crop, ::android::C2Fence()) }) {}
+};
+
+
+class LinearBuffer : public C2Buffer {
+public:
+ LinearBuffer(
+ const std::shared_ptr<C2LinearBlock> &block, size_t offset, size_t size)
+ : C2Buffer({ block->share(offset, size, ::android::C2Fence()) }) {}
+};
+
+} // namespace
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createLinearBuffer(
+ const std::shared_ptr<C2LinearBlock> &block) {
+ return createLinearBuffer(block, block->offset(), block->size());
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createLinearBuffer(
+ const std::shared_ptr<C2LinearBlock> &block, size_t offset, size_t size) {
+ return std::make_shared<LinearBuffer>(block, offset, size);
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createGraphicBuffer(
+ const std::shared_ptr<C2GraphicBlock> &block) {
+ return createGraphicBuffer(block, C2Rect(0, 0, block->width(), block->height()));
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createGraphicBuffer(
+ const std::shared_ptr<C2GraphicBlock> &block,
+ const C2Rect &crop) {
+ return std::make_shared<GraphicBuffer>(block, crop);
+}
+
} // namespace android
diff --git a/media/libstagefright/codec2/SimpleC2Interface.cpp b/media/libstagefright/codec2/SimpleC2Interface.cpp
new file mode 100644
index 0000000..f9cab26
--- /dev/null
+++ b/media/libstagefright/codec2/SimpleC2Interface.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleC2Interface"
+#include <utils/Log.h>
+
+#include <SimpleC2Interface.h>
+
+namespace android {
+
+c2_status_t SimpleC2Interface::query_vb(
+ const std::vector<C2Param* const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>>* const heapParams) const {
+ (void)mayBlock;
+
+ for (C2Param* const param : stackParams) {
+ if (param->coreIndex() != C2StreamFormatConfig::CORE_INDEX
+ || !param->forStream()
+ || param->stream() != 0u) {
+ param->invalidate();
+ continue;
+ }
+ if (param->forInput()) {
+ param->updateFrom(mInputFormat);
+ } else {
+ param->updateFrom(mOutputFormat);
+ }
+ }
+ if (heapParams) {
+ heapParams->clear();
+ for (const auto &index : heapParamIndices) {
+ if (index.coreIndex() != C2StreamFormatConfig::CORE_INDEX
+ || !index.forStream()
+ || index.stream() != 0u) {
+ heapParams->push_back(nullptr);
+ }
+ if (index.forInput()) {
+ heapParams->push_back(C2Param::Copy(mInputFormat));
+ } else {
+ heapParams->push_back(C2Param::Copy(mOutputFormat));
+ }
+ }
+ }
+
+ return C2_OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/include/C2Config.h b/media/libstagefright/codec2/include/C2Config.h
index bbbf338..83cb72c 100644
--- a/media/libstagefright/codec2/include/C2Config.h
+++ b/media/libstagefright/codec2/include/C2Config.h
@@ -66,6 +66,8 @@
kParamIndexMaxVideoSizeHint,
kParamIndexVideoSizeTuning,
+ kParamIndexCsd,
+
// video info
kParamIndexStructStart = 0x1,
@@ -129,6 +131,8 @@
typedef C2PortParam<C2Tuning, C2Uint64Array, kParamIndexBlockPools> C2PortBlockPoolsTuning;
+typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexCsd> C2StreamCsdInfo;
+
/*
Component description fields:
diff --git a/media/libstagefright/codec2/include/SimpleC2Component.h b/media/libstagefright/codec2/include/SimpleC2Component.h
index 48b8382..a4b6ee1 100644
--- a/media/libstagefright/codec2/include/SimpleC2Component.h
+++ b/media/libstagefright/codec2/include/SimpleC2Component.h
@@ -84,23 +84,28 @@
virtual c2_status_t onFlush_sm() = 0;
/**
- * Drain the component.
- */
- virtual c2_status_t onDrain_nb() = 0;
-
- /**
* Process the given work and finish pending work using finish().
*
* \param[in,out] work the work to process
* \param[in] pool the pool to use for allocating output blocks.
- *
- * \retval true |work| is done and ready for return to client
- * \retval false more data is needed for the |work| to be done;
- * mark |work| as pending.
*/
- virtual bool process(
+ virtual void process(
const std::unique_ptr<C2Work> &work,
- std::shared_ptr<C2BlockPool> pool) = 0;
+ const std::shared_ptr<C2BlockPool> &pool) = 0;
+
+ /**
+ * Drain the component and finish pending work using finish().
+ *
+ * \param[in] drainMode mode of drain.
+ * \param[in] pool the pool to use for allocating output blocks.
+ *
+ * \retval C2_OK The component has drained all pending output
+ * work.
+ * \retval C2_OMITTED Unsupported mode (e.g. DRAIN_CHAIN)
+ */
+ virtual c2_status_t drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) = 0;
// for derived classes
/**
@@ -116,6 +121,21 @@
*/
void finish(uint64_t frameIndex, std::function<void(const std::unique_ptr<C2Work> &)> fillWork);
+ std::shared_ptr<C2Buffer> createLinearBuffer(
+ const std::shared_ptr<C2LinearBlock> &block);
+
+ std::shared_ptr<C2Buffer> createLinearBuffer(
+ const std::shared_ptr<C2LinearBlock> &block, size_t offset, size_t size);
+
+ std::shared_ptr<C2Buffer> createGraphicBuffer(
+ const std::shared_ptr<C2GraphicBlock> &block);
+
+ std::shared_ptr<C2Buffer> createGraphicBuffer(
+ const std::shared_ptr<C2GraphicBlock> &block,
+ const C2Rect &crop);
+
+ static constexpr uint32_t NO_DRAIN = ~0u;
+
private:
const std::shared_ptr<C2ComponentInterface> mIntf;
std::atomic_bool mExitRequested;
@@ -135,10 +155,30 @@
};
Mutexed<ExecState> mExecState;
- struct WorkQueue {
+ class WorkQueue {
+ public:
+ inline WorkQueue() : mGeneration(0ul) {}
+
+ inline uint64_t generation() const { return mGeneration; }
+ inline void incGeneration() { ++mGeneration; }
+
+ std::unique_ptr<C2Work> pop_front();
+ void push_back(std::unique_ptr<C2Work> work);
+ bool empty() const;
+ uint32_t drainMode() const;
+ void markDrain(uint32_t drainMode);
+ void clear();
+
Condition mCondition;
- std::list<std::unique_ptr<C2Work>> mQueue;
+
+ private:
+ struct Entry {
+ std::unique_ptr<C2Work> work;
+ uint32_t drainMode;
+ };
+
uint64_t mGeneration;
+ std::list<Entry> mQueue;
};
Mutexed<WorkQueue> mWorkQueue;
diff --git a/media/libstagefright/codec2/include/SimpleC2Interface.h b/media/libstagefright/codec2/include/SimpleC2Interface.h
new file mode 100644
index 0000000..3796b0b
--- /dev/null
+++ b/media/libstagefright/codec2/include/SimpleC2Interface.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SIMPLE_C2_INTERFACE_H_
+#define SIMPLE_C2_INTERFACE_H_
+
+#include <C2Component.h>
+
+namespace android {
+
+class SimpleC2Interface : public C2ComponentInterface {
+public:
+ class Builder {
+ public:
+ inline Builder(
+ const char *name,
+ c2_node_id_t id)
+ : mIntf(new SimpleC2Interface(name, id)) {}
+
+ inline Builder(
+ const char *name,
+ c2_node_id_t id,
+ std::function<void(::android::SimpleC2Interface*)> deleter)
+ : mIntf(new SimpleC2Interface(name, id), deleter) {}
+
+ inline Builder &inputFormat(C2FormatKind input) {
+ mIntf->mInputFormat.value = input;
+ return *this;
+ }
+
+ inline Builder &outputFormat(C2FormatKind output) {
+ mIntf->mOutputFormat.value = output;
+ return *this;
+ }
+
+ inline std::shared_ptr<SimpleC2Interface> build() {
+ return mIntf;
+ }
+ private:
+ std::shared_ptr<SimpleC2Interface> mIntf;
+ };
+
+ virtual ~SimpleC2Interface() = default;
+
+ // From C2ComponentInterface
+ inline C2String getName() const override { return mName; }
+ inline c2_node_id_t getId() const override { return mId; }
+ c2_status_t query_vb(
+ const std::vector<C2Param* const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>>* const heapParams) const override;
+ inline c2_status_t config_vb(
+ const std::vector<C2Param* const> &,
+ c2_blocking_t,
+ std::vector<std::unique_ptr<C2SettingResult>>* const) override {
+ return C2_OMITTED;
+ }
+ inline c2_status_t createTunnel_sm(c2_node_id_t) override { return C2_OMITTED; }
+ inline c2_status_t releaseTunnel_sm(c2_node_id_t) override { return C2_OMITTED; }
+ inline c2_status_t querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> * const) const override {
+ return C2_OMITTED;
+ }
+ c2_status_t querySupportedValues_vb(
+ std::vector<C2FieldSupportedValuesQuery> &,
+ c2_blocking_t) const override {
+ return C2_OMITTED;
+ }
+
+private:
+ inline SimpleC2Interface(const char *name, c2_node_id_t id)
+ : mName(name), mId(id), mInputFormat(0u), mOutputFormat(0u) {}
+
+ const C2String mName;
+ const c2_node_id_t mId;
+ C2StreamFormatConfig::input mInputFormat;
+ C2StreamFormatConfig::output mOutputFormat;
+
+ SimpleC2Interface() = delete;
+};
+
+} // namespace android
+
+#endif // SIMPLE_C2_INTERFACE_H_
diff --git a/media/libstagefright/codec2/tests/Android.bp b/media/libstagefright/codec2/tests/Android.bp
index cf75061..f26fbd0 100644
--- a/media/libstagefright/codec2/tests/Android.bp
+++ b/media/libstagefright/codec2/tests/Android.bp
@@ -42,23 +42,14 @@
],
include_dirs: [
- "frameworks/av/media/libstagefright/codec2/include",
- "frameworks/av/media/libstagefright/codec2/vndk/include",
],
shared_libs: [
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.mapper@2.0",
"libcutils",
- "libhidlbase",
- "libion",
"liblog",
"libstagefright_codec2",
- "libutils",
- ],
-
- static_libs: [
"libstagefright_codec2_vndk",
+ "libutils",
],
cflags: [
@@ -80,22 +71,15 @@
],
include_dirs: [
- "frameworks/av/media/libstagefright/codec2/include",
- "frameworks/av/media/libstagefright/codec2/vndk/include",
"frameworks/native/include/media/openmax",
],
shared_libs: [
"libcutils",
- "libhidlbase",
- "libion",
"liblog",
"libstagefright_codec2",
- "libutils",
- ],
-
- static_libs: [
"libstagefright_codec2_vndk",
+ "libutils",
],
cflags: [
diff --git a/media/libstagefright/codec2/vndk/Android.bp b/media/libstagefright/codec2/vndk/Android.bp
index fb469d7..cc79dc0 100644
--- a/media/libstagefright/codec2/vndk/Android.bp
+++ b/media/libstagefright/codec2/vndk/Android.bp
@@ -1,4 +1,4 @@
-cc_library_static {
+cc_library {
name: "libstagefright_codec2_vndk",
srcs: [
diff --git a/media/libstagefright/codec2/vndk/C2AllocatorGralloc.cpp b/media/libstagefright/codec2/vndk/C2AllocatorGralloc.cpp
index b5ca90d..18db3e9 100644
--- a/media/libstagefright/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/libstagefright/codec2/vndk/C2AllocatorGralloc.cpp
@@ -20,6 +20,7 @@
#include <android/hardware/graphics/allocator/2.0/IAllocator.h>
#include <android/hardware/graphics/mapper/2.0/IMapper.h>
+#include <cutils/native_handle.h>
#include <hardware/gralloc.h>
#include <C2AllocatorGralloc.h>
@@ -50,6 +51,111 @@
return C2_CORRUPTED;
}
+static
+bool native_handle_is_invalid(const native_handle_t *const handle) {
+ // perform basic validation of a native handle
+ if (handle == nullptr) {
+ // null handle is considered valid
+ return false;
+ }
+ return ((size_t)handle->version != sizeof(native_handle_t) ||
+ handle->numFds < 0 ||
+ handle->numInts < 0 ||
+ // for sanity assume handles must occupy less memory than INT_MAX bytes
+ handle->numFds > int((INT_MAX - handle->version) / sizeof(int)) - handle->numInts);
+}
+
+class C2HandleGralloc : public C2Handle {
+private:
+ struct ExtraData {
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ uint32_t usage_lo;
+ uint32_t usage_hi;
+ uint32_t magic;
+ };
+
+ enum {
+ NUM_INTS = sizeof(ExtraData) / sizeof(int),
+ };
+ const static uint32_t MAGIC = '\xc2gr\x00';
+
+ static
+ const ExtraData* getExtraData(const C2Handle *const handle) {
+ if (handle == nullptr
+ || native_handle_is_invalid(handle)
+ || handle->numInts < NUM_INTS) {
+ return nullptr;
+ }
+ return reinterpret_cast<const ExtraData*>(
+ &handle->data[handle->numFds + handle->numInts - NUM_INTS]);
+ }
+
+ static
+ ExtraData *getExtraData(C2Handle *const handle) {
+ return const_cast<ExtraData *>(getExtraData(const_cast<const C2Handle *const>(handle)));
+ }
+
+public:
+ static bool isValid(const C2Handle *const o) {
+ if (o == nullptr) { // null handle is always valid
+ return true;
+ }
+ const ExtraData *xd = getExtraData(o);
+ // we cannot validate width/height/format/usage without accessing gralloc driver
+ return xd != nullptr && xd->magic == MAGIC;
+ }
+
+ static C2HandleGralloc* WrapNativeHandle(
+ const native_handle_t *const handle,
+ uint32_t width, uint32_t height, uint32_t format, uint64_t usage) {
+ //CHECK(handle != nullptr);
+ if (native_handle_is_invalid(handle) ||
+ handle->numInts > int((INT_MAX - handle->version) / sizeof(int)) - NUM_INTS - handle->numFds) {
+ return nullptr;
+ }
+ ExtraData xd = { width, height, format, uint32_t(usage & 0xFFFFFFFF), uint32_t(usage >> 32), MAGIC };
+ native_handle_t *res = native_handle_create(handle->numFds, handle->numInts + NUM_INTS);
+ if (res != nullptr) {
+ memcpy(&res->data, &handle->data, sizeof(int) * (handle->numFds + handle->numInts));
+ *getExtraData(res) = xd;
+ }
+ return reinterpret_cast<C2HandleGralloc *>(res);
+ }
+
+ static native_handle_t* UnwrapNativeHandle(const C2Handle *const handle) {
+ const ExtraData *xd = getExtraData(handle);
+ if (xd == nullptr || xd->magic != MAGIC) {
+ return nullptr;
+ }
+ native_handle_t *res = native_handle_create(handle->numFds, handle->numInts - NUM_INTS);
+ if (res != nullptr) {
+ memcpy(&res->data, &handle->data, sizeof(int) * (res->numFds + res->numInts));
+ }
+ return res;
+ }
+
+ static const C2HandleGralloc* Import(
+ const C2Handle *const handle,
+ uint32_t *width, uint32_t *height, uint32_t *format, uint64_t *usage) {
+ const ExtraData *xd = getExtraData(handle);
+ if (xd == nullptr) {
+ return nullptr;
+ }
+ *width = xd->width;
+ *height = xd->height;
+ *format = xd->format;
+ *usage = xd->usage_lo | (uint64_t(xd->usage_hi) << 32);
+
+ return reinterpret_cast<const C2HandleGralloc *>(handle);
+ }
+};
+
+native_handle_t* UnwrapNativeCodec2GrallocHandle(const C2Handle *const handle) {
+ return C2HandleGralloc::UnwrapNativeHandle(handle);
+}
+
class C2AllocationGralloc : public C2GraphicAllocation {
public:
virtual ~C2AllocationGralloc() override;
@@ -59,7 +165,7 @@
C2PlanarLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) override;
virtual c2_status_t unmap(C2Fence *fenceFd /* nullable */) override;
virtual bool isValid() const override { return true; }
- virtual const C2Handle *handle() const override { return mHandle; }
+ virtual const C2Handle *handle() const override { return mLockedHandle ? : mHandle; }
virtual bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) const override;
// internal methods
@@ -67,26 +173,31 @@
C2AllocationGralloc(
const IMapper::BufferDescriptorInfo &info,
const sp<IMapper> &mapper,
- hidl_handle &handle);
+ hidl_handle &hidlHandle,
+ const C2HandleGralloc *const handle);
int dup() const;
c2_status_t status() const;
private:
const IMapper::BufferDescriptorInfo mInfo;
const sp<IMapper> mMapper;
- const hidl_handle mHandle;
+ const hidl_handle mHidlHandle;
+ const C2HandleGralloc *mHandle;
buffer_handle_t mBuffer;
+ const C2HandleGralloc *mLockedHandle;
bool mLocked;
};
C2AllocationGralloc::C2AllocationGralloc(
const IMapper::BufferDescriptorInfo &info,
const sp<IMapper> &mapper,
- hidl_handle &handle)
+ hidl_handle &hidlHandle,
+ const C2HandleGralloc *const handle)
: C2GraphicAllocation(info.width, info.height),
mInfo(info),
mMapper(mapper),
- mHandle(std::move(handle)),
+ mHidlHandle(std::move(hidlHandle)),
+ mHandle(handle),
mBuffer(nullptr),
mLocked(false) {}
@@ -117,7 +228,7 @@
c2_status_t err = C2_OK;
if (!mBuffer) {
mMapper->importBuffer(
- mHandle, [&err, this](const auto &maperr, const auto &buffer) {
+ mHidlHandle, [&err, this](const auto &maperr, const auto &buffer) {
err = maperr2error(maperr);
if (err == C2_OK) {
mBuffer = static_cast<buffer_handle_t>(buffer);
@@ -126,6 +237,11 @@
if (err != C2_OK) {
return err;
}
+ if (mBuffer == nullptr) {
+ return C2_CORRUPTED;
+ }
+ mLockedHandle = C2HandleGralloc::WrapNativeHandle(
+ mBuffer, mInfo.width, mInfo.height, (uint32_t)mInfo.format, mInfo.usage);
}
if (mInfo.format == PixelFormat::YCBCR_420_888 || mInfo.format == PixelFormat::YV12) {
@@ -321,17 +437,30 @@
return err;
}
- allocation->reset(new C2AllocationGralloc(info, mMapper, buffer));
+
+ allocation->reset(new C2AllocationGralloc(
+ info, mMapper, buffer,
+ C2HandleGralloc::WrapNativeHandle(
+ buffer.getNativeHandle(),
+ info.width, info.height, (uint32_t)info.format, info.usage)));
return C2_OK;
}
c2_status_t C2AllocatorGralloc::Impl::priorGraphicAllocation(
const C2Handle *handle,
std::shared_ptr<C2GraphicAllocation> *allocation) {
- (void) handle;
+ IMapper::BufferDescriptorInfo info;
+ info.layerCount = 1u;
+ const C2HandleGralloc *grallocHandle = C2HandleGralloc::Import(
+ handle,
+ &info.width, &info.height, (uint32_t *)&info.format, (uint64_t *)&info.usage);
+ if (grallocHandle == nullptr) {
+ return C2_BAD_VALUE;
+ }
- // TODO: need to figure out BufferDescriptorInfo from the handle.
- allocation->reset();
+ hidl_handle hidlHandle = C2HandleGralloc::UnwrapNativeHandle(grallocHandle);
+
+ allocation->reset(new C2AllocationGralloc(info, mMapper, hidlHandle, grallocHandle));
return C2_OMITTED;
}
diff --git a/media/libstagefright/codec2/vndk/C2AllocatorIon.cpp b/media/libstagefright/codec2/vndk/C2AllocatorIon.cpp
index 3a95118..34c68bb 100644
--- a/media/libstagefright/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/libstagefright/codec2/vndk/C2AllocatorIon.cpp
@@ -27,35 +27,67 @@
namespace android {
+/* size_t <=> int(lo), int(hi) conversions */
+constexpr inline int size2intLo(size_t s) {
+ return int(s & 0xFFFFFFFF);
+}
+
+constexpr inline int size2intHi(size_t s) {
+ // cast to uint64_t as size_t may be 32 bits wide
+ return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
+}
+
+constexpr inline size_t ints2size(int intLo, int intHi) {
+ // convert in 2 stages to 64 bits as intHi may be negative
+ return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
+}
+
/* ========================================= ION HANDLE ======================================== */
+/**
+ * ION handle
+ *
+ * There can be only a sole ion client per process, this is captured in the ion fd that is passed
+ * to the constructor, but this should be managed by the ion buffer allocator/mapper.
+ *
+ * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
+ * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
+ * a refcount.
+ *
+ * This handle will not capture mapped fd-s as updating that would require a global mutex.
+ */
+
struct C2HandleIon : public C2Handle {
- C2HandleIon(int ionFd, ion_user_handle_t buffer) : C2Handle(cHeader),
- mFds{ ionFd, buffer },
- mInts{ kMagic } { }
+ // ion handle owns ionFd(!) and bufferFd
+ C2HandleIon(int bufferFd, size_t size)
+ : C2Handle(cHeader),
+ mFds{ bufferFd },
+ mInts{ int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic } { }
static bool isValid(const C2Handle * const o);
- int ionFd() const { return mFds.mIon; }
- ion_user_handle_t buffer() const { return mFds.mBuffer; }
-
- void setBuffer(ion_user_handle_t bufferFd) { mFds.mBuffer = bufferFd; }
+ int bufferFd() const { return mFds.mBuffer; }
+ size_t size() const {
+ return size_t(unsigned(mInts.mSizeLo))
+ | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
+ }
protected:
struct {
- int mIon;
- int mBuffer; // ion_user_handle_t
+ int mBuffer; // shared ion buffer
} mFds;
struct {
+ int mSizeLo; // low 32-bits of size
+ int mSizeHi; // high 32-bits of size
int mMagic;
} mInts;
private:
typedef C2HandleIon _type;
enum {
- kMagic = 'ion1',
+ kMagic = '\xc2io\x00',
numFds = sizeof(mFds) / sizeof(int),
numInts = sizeof(mInts) / sizeof(int),
- version = sizeof(C2Handle) + sizeof(mFds) + sizeof(mInts)
+ version = sizeof(C2Handle)
};
//constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
const static C2Handle cHeader;
@@ -82,6 +114,7 @@
/* ======================================= ION ALLOCATION ====================================== */
class C2AllocationIon : public C2LinearAllocation {
public:
+ /* Interface methods */
virtual c2_status_t map(
size_t offset, size_t size, C2MemoryUsage usage, int *fence,
void **addr /* nonnull */) override;
@@ -94,57 +127,108 @@
// internal methods
C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags);
C2AllocationIon(int ionFd, size_t size, int shareFd);
- int dup() const;
+
c2_status_t status() const;
protected:
class Impl;
Impl *mImpl;
+
+ // TODO: we could make this encapsulate shared_ptr and copiable
+ C2_DO_NOT_COPY(C2AllocationIon);
};
class C2AllocationIon::Impl {
-public:
- // NOTE: using constructor here instead of a factory method as we will need the
- // error value and this simplifies the error handling by the wrapper.
- Impl(int ionFd, size_t capacity, size_t align, unsigned heapMask, unsigned flags)
- : mInit(C2_OK),
- mHandle(ionFd, -1),
+private:
+ /**
+ * Constructs an ion allocation.
+ *
+ * \note We always create an ion allocation, even if the allocation or import fails
+ * so that we can capture the error.
+ *
+ * \param ionFd ion client (ownership transferred to created object)
+ * \param capacity size of allocation
+ * \param bufferFd buffer handle (ownership transferred to created object). Must be
+ * invalid if err is not 0.
+ * \param buffer ion buffer user handle (ownership transferred to created object). Must be
+ * invalid if err is not 0.
+ * \param err errno during buffer allocation or import
+ */
+ Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, int err)
+ : mIonFd(ionFd),
+ mHandle(bufferFd, capacity),
+ mBuffer(buffer),
+ mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
mMapFd(-1),
- mCapacity(capacity) {
- ion_user_handle_t buffer = -1;
- int ret = ion_alloc(mHandle.ionFd(), mCapacity, align, heapMask, flags, &buffer);
- if (ret == 0) {
- mHandle.setBuffer(buffer);
- } else {
- mInit = c2_map_errno<ENOMEM, EACCES, EINVAL>(-ret);
+ mMapSize(0) {
+ if (mInit != C2_OK) {
+ // close ionFd now on error
+ if (mIonFd >= 0) {
+ close(mIonFd);
+ mIonFd = -1;
+ }
+ // C2_CHECK(bufferFd < 0);
+ // C2_CHECK(buffer < 0);
}
}
- Impl(int ionFd, size_t capacity, int shareFd)
- : mInit(C2_OK),
- mHandle(ionFd, -1),
- mMapFd(-1),
- mCapacity(capacity) {
- ion_user_handle_t buffer;
- int ret = ion_import(mHandle.ionFd(), shareFd, &buffer);
- switch (-ret) {
- case 0:
- mHandle.setBuffer(buffer);
- break;
- case EBADF: // bad ion handle - should not happen
- case ENOTTY: // bad ion driver
- mInit = C2_CORRUPTED;
- break;
- default:
- mInit = c2_map_errno<ENOMEM, EACCES, EINVAL>(-ret);
- break;
+public:
+ /**
+ * Constructs an ion allocation by importing a shared buffer fd.
+ *
+ * \param ionFd ion client (ownership transferred to created object)
+ * \param capacity size of allocation
+ * \param bufferFd buffer handle (ownership transferred to created object)
+ *
+ * \return created ion allocation (implementation) which may be invalid if the
+ * import failed.
+ */
+ static Impl *Import(int ionFd, size_t capacity, int bufferFd) {
+ ion_user_handle_t buffer = -1;
+ int ret = ion_import(ionFd, bufferFd, &buffer);
+ return new Impl(ionFd, capacity, bufferFd, buffer, ret);
+ }
+
+ /**
+ * Constructs an ion allocation by allocating an ion buffer.
+ *
+ * \param ionFd ion client (ownership transferred to created object)
+ * \param size size of allocation
+ * \param align desired alignment of allocation
+ * \param heapMask mask of heaps considered
+ * \param flags ion allocation flags
+ *
+ * \return created ion allocation (implementation) which may be invalid if the
+ * allocation failed.
+ */
+ static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags) {
+ int bufferFd = -1;
+ ion_user_handle_t buffer = -1;
+ int ret = ion_alloc(ionFd, size, align, heapMask, flags, &buffer);
+ if (ret == 0) {
+ // get buffer fd for native handle constructor
+ ret = ion_share(ionFd, buffer, &bufferFd);
+ if (ret != 0) {
+ ion_free(ionFd, buffer);
+ buffer = -1;
+ }
}
- (void)mCapacity; // TODO
+ return new Impl(ionFd, size, bufferFd, buffer, ret);
}
c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, int *fenceFd, void **addr) {
(void)fenceFd; // TODO: wait for fence
*addr = nullptr;
+ if (mMapSize > 0) {
+ // TODO: technically we should return DUPLICATE here, but our block views don't
+ // actually unmap, so we end up remapping an ion buffer multiple times.
+ //
+ // return C2_DUPLICATE;
+ }
+ if (size == 0) {
+ return C2_BAD_VALUE;
+ }
+
int prot = PROT_NONE;
int flags = MAP_PRIVATE;
if (usage.consumer & C2MemoryUsage::CPU_READ) {
@@ -161,7 +245,7 @@
c2_status_t err = C2_OK;
if (mMapFd == -1) {
- int ret = ion_map(mHandle.ionFd(), mHandle.buffer(), mapSize, prot,
+ int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
flags, mapOffset, (unsigned char**)&mMapAddr, &mMapFd);
if (ret) {
mMapFd = -1;
@@ -187,6 +271,9 @@
}
c2_status_t unmap(void *addr, size_t size, int *fenceFd) {
+ if (mMapFd < 0 || mMapSize == 0) {
+ return C2_NOT_FOUND;
+ }
if (addr != (uint8_t *)mMapAddr + mMapAlignmentBytes ||
size + mMapAlignmentBytes != mMapSize) {
return C2_BAD_VALUE;
@@ -196,44 +283,43 @@
return c2_map_errno<EINVAL>(errno);
}
if (fenceFd) {
- *fenceFd = -1;
+ *fenceFd = -1; // not using fences
}
+ mMapSize = 0;
return C2_OK;
}
~Impl() {
- if (mMapFd != -1) {
+ if (mMapFd >= 0) {
close(mMapFd);
mMapFd = -1;
}
-
- (void)ion_free(mHandle.ionFd(), mHandle.buffer());
+ if (mInit == C2_OK) {
+ (void)ion_free(mIonFd, mBuffer);
+ }
+ if (mIonFd >= 0) {
+ close(mIonFd);
+ }
+ native_handle_close(&mHandle);
}
c2_status_t status() const {
return mInit;
}
- const C2Handle * handle() const {
+ const C2Handle *handle() const {
return &mHandle;
}
- int dup() const {
- int fd = -1;
- if (mInit != 0 || ion_share(mHandle.ionFd(), mHandle.buffer(), &fd) != 0) {
- fd = -1;
- }
- return fd;
- }
-
private:
- c2_status_t mInit;
+ int mIonFd;
C2HandleIon mHandle;
+ ion_user_handle_t mBuffer;
+ c2_status_t mInit;
int mMapFd; // only one for now
void *mMapAddr;
size_t mMapAlignmentBytes;
size_t mMapSize;
- size_t mCapacity;
};
c2_status_t C2AllocationIon::map(
@@ -268,15 +354,11 @@
C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags)
: C2LinearAllocation(size),
- mImpl(new Impl(ionFd, size, align, heapMask, flags)) { }
+ mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags)) { }
C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd)
: C2LinearAllocation(size),
- mImpl(new Impl(ionFd, size, shareFd)) { }
-
-int C2AllocationIon::dup() const {
- return mImpl->dup();
-}
+ mImpl(Impl::Import(ionFd, size, shareFd)) { }
/* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon::C2AllocatorIon() : mInit(C2_OK), mIonFd(ion_open()) {
@@ -328,7 +410,7 @@
#endif
std::shared_ptr<C2AllocationIon> alloc
- = std::make_shared<C2AllocationIon>(mIonFd, capacity, align, heapMask, flags);
+ = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags);
c2_status_t ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
@@ -350,7 +432,7 @@
// TODO: get capacity and validate it
const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
std::shared_ptr<C2AllocationIon> alloc
- = std::make_shared<C2AllocationIon>(mIonFd, 0 /* capacity */, h->buffer());
+ = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd());
c2_status_t ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
diff --git a/media/libstagefright/codec2/vndk/C2Store.cpp b/media/libstagefright/codec2/vndk/C2Store.cpp
index 204f895..eb72d17 100644
--- a/media/libstagefright/codec2/vndk/C2Store.cpp
+++ b/media/libstagefright/codec2/vndk/C2Store.cpp
@@ -404,6 +404,7 @@
C2PlatformComponentStore::C2PlatformComponentStore() {
// TODO: move this also into a .so so it can be updated
mComponents.emplace("c2.google.avc.decoder", "libstagefright_soft_c2avcdec.so");
+ mComponents.emplace("c2.google.aac.decoder", "libstagefright_soft_c2aacdec.so");
}
c2_status_t C2PlatformComponentStore::copyBuffer(
diff --git a/media/libstagefright/codec2/vndk/include/C2AllocatorGralloc.h b/media/libstagefright/codec2/vndk/include/C2AllocatorGralloc.h
index 374b0ed..5311747 100644
--- a/media/libstagefright/codec2/vndk/include/C2AllocatorGralloc.h
+++ b/media/libstagefright/codec2/vndk/include/C2AllocatorGralloc.h
@@ -24,12 +24,18 @@
namespace android {
+/**
+ * Unwrap the native handle from a Codec2 handle allocated by C2AllocatorGralloc.
+ *
+ * @param handle a handle allocated by C2AllocatorGralloc. This includes handles returned for a
+ * graphic block allocation handle returned.
+ *
+ * @return a new NON-OWNING native handle that must be deleted using native_handle_delete.
+ */
+native_handle_t*UnwrapNativeCodec2GrallocHandle(const C2Handle *const handle);
+
class C2AllocatorGralloc : public C2Allocator {
public:
- // (usage, capacity) => (align, heapMask, flags)
- typedef std::function<int (C2MemoryUsage, size_t,
- /* => */ size_t*, unsigned*, unsigned*)> usage_mapper_fn;
-
virtual id_t getId() const override;
virtual C2String getName() const override;
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 21c00a1..abf3b1c 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -1,4 +1,45 @@
cc_library_shared {
+ name: "libstagefright_soft_c2aacdec",
+// vendor_available: true,
+// vndk: {
+// enabled: true,
+// },
+
+ srcs: [
+ "C2SoftAac.cpp",
+ "DrcPresModeWrap.cpp",
+ ],
+
+ cflags: ["-Werror"],
+
+ sanitize: {
+ misc_undefined: [
+ "signed-integer-overflow",
+ "unsigned-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+ static_libs: [
+ "libFraunhoferAAC",
+ "libstagefright_codec2_vndk"
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "libion",
+ "liblog",
+ "libstagefright_codec2",
+ "libstagefright_foundation",
+ "libstagefright_simple_c2component",
+ "libutils",
+ ],
+}
+
+cc_library_shared {
name: "libstagefright_soft_aacdec",
vendor_available: true,
vndk: {
diff --git a/media/libstagefright/codecs/aacdec/C2SoftAac.cpp b/media/libstagefright/codecs/aacdec/C2SoftAac.cpp
new file mode 100644
index 0000000..390f36c
--- /dev/null
+++ b/media/libstagefright/codecs/aacdec/C2SoftAac.cpp
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftAac"
+#include <utils/Log.h>
+
+#include "C2SoftAac.h"
+
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/misc.h>
+
+#include <inttypes.h>
+#include <math.h>
+#include <numeric>
+
+#define FILEREAD_MAX_LAYERS 2
+
+#define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
+#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
+// names of properties that can be used to override the default DRC settings
+#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
+#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
+#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
+#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
+#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+
+namespace android {
+
+C2SoftAac::C2SoftAac(const char *name, c2_node_id_t id)
+ : SimpleC2Component(
+ SimpleC2Interface::Builder(name, id)
+ .inputFormat(C2FormatCompressed)
+ .outputFormat(C2FormatAudio)
+ .build()),
+ mAACDecoder(NULL),
+ mStreamInfo(NULL),
+ mIsADTS(false),
+ mSignalledError(false),
+ mOutputDelayRingBuffer(NULL) {
+}
+
+C2SoftAac::~C2SoftAac() {
+ onRelease();
+}
+
+c2_status_t C2SoftAac::onInit() {
+ status_t err = initDecoder();
+ return err == OK ? C2_OK : C2_CORRUPTED;
+}
+
+c2_status_t C2SoftAac::onStop() {
+ drainDecoder();
+ // reset the "configured" state
+ mOutputDelayCompensated = 0;
+ mOutputDelayRingBufferWritePos = 0;
+ mOutputDelayRingBufferReadPos = 0;
+ mOutputDelayRingBufferFilled = 0;
+ mBuffersInfo.clear();
+
+ // To make the codec behave the same before and after a reset, we need to invalidate the
+ // streaminfo struct. This does that:
+ mStreamInfo->sampleRate = 0; // TODO: mStreamInfo is read only
+
+ mSignalledError = false;
+
+ return C2_OK;
+}
+
+void C2SoftAac::onReset() {
+ (void)onStop();
+}
+
+void C2SoftAac::onRelease() {
+ if (mAACDecoder) {
+ aacDecoder_Close(mAACDecoder);
+ mAACDecoder = NULL;
+ }
+ if (mOutputDelayRingBuffer) {
+ delete[] mOutputDelayRingBuffer;
+ mOutputDelayRingBuffer = NULL;
+ }
+}
+
+status_t C2SoftAac::initDecoder() {
+ ALOGV("initDecoder()");
+ status_t status = UNKNOWN_ERROR;
+ mAACDecoder = aacDecoder_Open(TT_MP4_ADIF, /* num layers */ 1);
+ if (mAACDecoder != NULL) {
+ mStreamInfo = aacDecoder_GetStreamInfo(mAACDecoder);
+ if (mStreamInfo != NULL) {
+ status = OK;
+ }
+ }
+
+ mOutputDelayCompensated = 0;
+ mOutputDelayRingBufferSize = 2048 * MAX_CHANNEL_COUNT * kNumDelayBlocksMax;
+ mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize];
+ mOutputDelayRingBufferWritePos = 0;
+ mOutputDelayRingBufferReadPos = 0;
+ mOutputDelayRingBufferFilled = 0;
+
+ if (mAACDecoder == NULL) {
+ ALOGE("AAC decoder is null. TODO: Can not call aacDecoder_SetParam in the following code");
+ }
+
+ //aacDecoder_SetParam(mAACDecoder, AAC_PCM_LIMITER_ENABLE, 0);
+
+ //init DRC wrapper
+ mDrcWrap.setDecoderHandle(mAACDecoder);
+ mDrcWrap.submitStreamData(mStreamInfo);
+
+ // for streams that contain metadata, use the mobile profile DRC settings unless overridden by platform properties
+ // TODO: change the DRC settings depending on audio output device type (HDMI, loadspeaker, headphone)
+ char value[PROPERTY_VALUE_MAX];
+ // DRC_PRES_MODE_WRAP_DESIRED_TARGET
+ if (property_get(PROP_DRC_OVERRIDE_REF_LEVEL, value, NULL)) {
+ unsigned refLevel = atoi(value);
+ ALOGV("AAC decoder using desired DRC target reference level of %d instead of %d", refLevel,
+ DRC_DEFAULT_MOBILE_REF_LEVEL);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET, refLevel);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET, DRC_DEFAULT_MOBILE_REF_LEVEL);
+ }
+ // DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR
+ if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL)) {
+ unsigned cut = atoi(value);
+ ALOGV("AAC decoder using desired DRC attenuation factor of %d instead of %d", cut,
+ DRC_DEFAULT_MOBILE_DRC_CUT);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, cut);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, DRC_DEFAULT_MOBILE_DRC_CUT);
+ }
+ // DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR
+ if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL)) {
+ unsigned boost = atoi(value);
+ ALOGV("AAC decoder using desired DRC boost factor of %d instead of %d", boost,
+ DRC_DEFAULT_MOBILE_DRC_BOOST);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR, boost);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR, DRC_DEFAULT_MOBILE_DRC_BOOST);
+ }
+ // DRC_PRES_MODE_WRAP_DESIRED_HEAVY
+ if (property_get(PROP_DRC_OVERRIDE_HEAVY, value, NULL)) {
+ unsigned heavy = atoi(value);
+ ALOGV("AAC decoder using desried DRC heavy compression switch of %d instead of %d", heavy,
+ DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY, heavy);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY, DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ }
+ // DRC_PRES_MODE_WRAP_ENCODER_TARGET
+ if (property_get(PROP_DRC_OVERRIDE_ENC_LEVEL, value, NULL)) {
+ unsigned encoderRefLevel = atoi(value);
+ ALOGV("AAC decoder using encoder-side DRC reference level of %d instead of %d",
+ encoderRefLevel, DRC_DEFAULT_MOBILE_ENC_LEVEL);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, encoderRefLevel);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
+ }
+
+ // By default, the decoder creates a 5.1 channel downmix signal.
+ // For seven and eight channel input streams, enable 6.1 and 7.1 channel output
+ aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1);
+
+ return status;
+}
+
+bool C2SoftAac::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamples) {
+ if (numSamples == 0) {
+ return true;
+ }
+ if (outputDelayRingBufferSpaceLeft() < numSamples) {
+ ALOGE("RING BUFFER WOULD OVERFLOW");
+ return false;
+ }
+ if (mOutputDelayRingBufferWritePos + numSamples <= mOutputDelayRingBufferSize
+ && (mOutputDelayRingBufferReadPos <= mOutputDelayRingBufferWritePos
+ || mOutputDelayRingBufferReadPos > mOutputDelayRingBufferWritePos + numSamples)) {
+ // faster memcopy loop without checks, if the preconditions allow this
+ for (int32_t i = 0; i < numSamples; i++) {
+ mOutputDelayRingBuffer[mOutputDelayRingBufferWritePos++] = samples[i];
+ }
+
+ if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
+ }
+ } else {
+ ALOGV("slow C2SoftAac::outputDelayRingBufferPutSamples()");
+
+ for (int32_t i = 0; i < numSamples; i++) {
+ mOutputDelayRingBuffer[mOutputDelayRingBufferWritePos] = samples[i];
+ mOutputDelayRingBufferWritePos++;
+ if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
+ }
+ }
+ }
+ mOutputDelayRingBufferFilled += numSamples;
+ return true;
+}
+
+int32_t C2SoftAac::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numSamples) {
+
+ if (numSamples > mOutputDelayRingBufferFilled) {
+ ALOGE("RING BUFFER WOULD UNDERRUN");
+ return -1;
+ }
+
+ if (mOutputDelayRingBufferReadPos + numSamples <= mOutputDelayRingBufferSize
+ && (mOutputDelayRingBufferWritePos < mOutputDelayRingBufferReadPos
+ || mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferReadPos + numSamples)) {
+ // faster memcopy loop without checks, if the preconditions allow this
+ if (samples != 0) {
+ for (int32_t i = 0; i < numSamples; i++) {
+ samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos++];
+ }
+ } else {
+ mOutputDelayRingBufferReadPos += numSamples;
+ }
+ if (mOutputDelayRingBufferReadPos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferReadPos -= mOutputDelayRingBufferSize;
+ }
+ } else {
+ ALOGV("slow C2SoftAac::outputDelayRingBufferGetSamples()");
+
+ for (int32_t i = 0; i < numSamples; i++) {
+ if (samples != 0) {
+ samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos];
+ }
+ mOutputDelayRingBufferReadPos++;
+ if (mOutputDelayRingBufferReadPos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferReadPos -= mOutputDelayRingBufferSize;
+ }
+ }
+ }
+ mOutputDelayRingBufferFilled -= numSamples;
+ return numSamples;
+}
+
+int32_t C2SoftAac::outputDelayRingBufferSamplesAvailable() {
+ return mOutputDelayRingBufferFilled;
+}
+
+int32_t C2SoftAac::outputDelayRingBufferSpaceLeft() {
+ return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable();
+}
+
+void C2SoftAac::drainRingBuffer(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool,
+ bool eos) {
+ while (!mBuffersInfo.empty() && outputDelayRingBufferSamplesAvailable()
+ >= mStreamInfo->frameSize * mStreamInfo->numChannels) {
+ Info &outInfo = mBuffersInfo.front();
+ ALOGV("outInfo.frameIndex = %" PRIu64, outInfo.frameIndex);
+ int samplesize = mStreamInfo->numChannels * sizeof(int16_t);
+
+ int available = outputDelayRingBufferSamplesAvailable();
+ int numFrames = outInfo.decodedSizes.size();
+ int numSamples = numFrames * (mStreamInfo->frameSize * mStreamInfo->numChannels);
+ if (available < numSamples) {
+ if (eos) {
+ numSamples = available;
+ } else {
+ break;
+ }
+ }
+ ALOGV("%d samples available (%d), or %d frames",
+ numSamples, available, numFrames);
+ ALOGV("getting %d from ringbuffer", numSamples);
+
+ std::shared_ptr<C2LinearBlock> block;
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ // TODO: error handling, proper usage, etc.
+ c2_status_t err = pool->fetchLinearBlock(numSamples * sizeof(int16_t), usage, &block);
+ if (err != C2_OK) {
+ ALOGE("err = %d", err);
+ }
+
+ C2WriteView wView = block->map().get();
+ // TODO
+ INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(wView.data());
+ int32_t ns = outputDelayRingBufferGetSamples(outBuffer, numSamples);
+ if (ns != numSamples) {
+ ALOGE("not a complete frame of samples available");
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ auto fillWork = [buffer = createLinearBuffer(block)](const std::unique_ptr<C2Work> &work) {
+ work->worklets.front()->output.flags = work->input.flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.buffers.push_back(buffer);
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->worklets_processed = 1u;
+ };
+ if (work && work->input.ordinal.frame_index == outInfo.frameIndex) {
+ fillWork(work);
+ } else {
+ finish(outInfo.frameIndex, fillWork);
+ }
+
+ ALOGV("out timestamp %" PRIu64 " / %u", outInfo.timestamp, block->capacity());
+ mBuffersInfo.pop_front();
+ }
+}
+
+void C2SoftAac::process(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ work->worklets_processed = 0u;
+ if (mSignalledError) {
+ return;
+ }
+
+ UCHAR* inBuffer[FILEREAD_MAX_LAYERS];
+ UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0};
+ UINT bytesValid[FILEREAD_MAX_LAYERS] = {0};
+
+ INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
+ C2ReadView view = work->input.buffers[0]->data().linearBlocks().front().map().get();
+ size_t offset = 0u;
+ size_t size = view.capacity();
+
+ bool eos = (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) != 0;
+ bool codecConfig = (work->input.flags & C2BufferPack::FLAG_CODEC_CONFIG) != 0;
+
+ //TODO
+#if 0
+ if (mInputBufferCount == 0 && !codecConfig) {
+ ALOGW("first buffer should have FLAG_CODEC_CONFIG set");
+ codecConfig = true;
+ }
+#endif
+ if (codecConfig) {
+ // const_cast because of libAACdec method signature.
+ inBuffer[0] = const_cast<UCHAR *>(view.data() + offset);
+ inBufferLength[0] = size;
+
+ AAC_DECODER_ERROR decoderErr =
+ aacDecoder_ConfigRaw(mAACDecoder,
+ inBuffer,
+ inBufferLength);
+
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGE("aacDecoder_ConfigRaw decoderErr = 0x%4.4x", decoderErr);
+ mSignalledError = true;
+ // TODO: error
+ return;
+ }
+
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.buffers.push_back(nullptr);
+
+ return;
+ }
+
+ Info inInfo;
+ inInfo.frameIndex = work->input.ordinal.frame_index;
+ inInfo.timestamp = work->input.ordinal.timestamp;
+ inInfo.bufferSize = size;
+ inInfo.decodedSizes.clear();
+ while (size > 0u) {
+ ALOGV("size = %zu", size);
+ if (mIsADTS) {
+ size_t adtsHeaderSize = 0;
+ // skip 30 bits, aac_frame_length follows.
+ // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
+
+ const uint8_t *adtsHeader = view.data() + offset;
+
+ bool signalError = false;
+ if (size < 7) {
+ ALOGE("Audio data too short to contain even the ADTS header. "
+ "Got %zu bytes.", size);
+ hexdump(adtsHeader, size);
+ signalError = true;
+ } else {
+ bool protectionAbsent = (adtsHeader[1] & 1);
+
+ unsigned aac_frame_length =
+ ((adtsHeader[3] & 3) << 11)
+ | (adtsHeader[4] << 3)
+ | (adtsHeader[5] >> 5);
+
+ if (size < aac_frame_length) {
+ ALOGE("Not enough audio data for the complete frame. "
+ "Got %zu bytes, frame size according to the ADTS "
+ "header is %u bytes.",
+ size, aac_frame_length);
+ hexdump(adtsHeader, size);
+ signalError = true;
+ } else {
+ adtsHeaderSize = (protectionAbsent ? 7 : 9);
+ if (aac_frame_length < adtsHeaderSize) {
+ signalError = true;
+ } else {
+ // const_cast because of libAACdec method signature.
+ inBuffer[0] = const_cast<UCHAR *>(adtsHeader + adtsHeaderSize);
+ inBufferLength[0] = aac_frame_length - adtsHeaderSize;
+
+ offset += adtsHeaderSize;
+ size -= adtsHeaderSize;
+ }
+ }
+ }
+
+ if (signalError) {
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
+ return;
+ }
+ } else {
+ // const_cast because of libAACdec method signature.
+ inBuffer[0] = const_cast<UCHAR *>(view.data() + offset);
+ inBufferLength[0] = size;
+ }
+
+ // Fill and decode
+ bytesValid[0] = inBufferLength[0];
+
+ INT prevSampleRate = mStreamInfo->sampleRate;
+ INT prevNumChannels = mStreamInfo->numChannels;
+
+ aacDecoder_Fill(mAACDecoder,
+ inBuffer,
+ inBufferLength,
+ bytesValid);
+
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
+
+ UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
+ size -= inBufferUsedLength;
+ offset += inBufferUsedLength;
+
+ AAC_DECODER_ERROR decoderErr;
+ do {
+ if (outputDelayRingBufferSpaceLeft() <
+ (mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ ALOGV("skipping decode: not enough space left in ringbuffer");
+ break;
+ }
+
+ int numConsumed = mStreamInfo->numTotalBytes;
+ decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ 0 /* flags */);
+
+ numConsumed = mStreamInfo->numTotalBytes - numConsumed;
+
+ if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+ break;
+ }
+ inInfo.decodedSizes.push_back(numConsumed);
+
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
+
+ if (bytesValid[0] != 0) {
+ ALOGE("bytesValid[0] != 0 should never happen");
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ size_t numOutBytes =
+ mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
+
+ if (decoderErr == AAC_DEC_OK) {
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+ } else {
+ ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
+
+ memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
+
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+
+ // Discard input buffer.
+ size = 0;
+
+ aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+
+ // After an error, replace bufferSize with the sum of the
+ // decodedSizes to resynchronize the in/out lists.
+ inInfo.decodedSizes.pop_back();
+ inInfo.bufferSize = std::accumulate(
+ inInfo.decodedSizes.begin(), inInfo.decodedSizes.end(), 0);
+
+ // fall through
+ }
+
+ /*
+ * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
+ * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
+ * rate system and the sampling rate in the final output is actually
+ * doubled compared with the core AAC decoder sampling rate.
+ *
+ * Explicit signalling is done by explicitly defining SBR audio object
+ * type in the bitstream. Implicit signalling is done by embedding
+ * SBR content in AAC extension payload specific to SBR, and hence
+ * requires an AAC decoder to perform pre-checks on actual audio frames.
+ *
+ * Thus, we could not say for sure whether a stream is
+ * AAC+/eAAC+ until the first data frame is decoded.
+ */
+ if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
+ // TODO:
+#if 0
+ if ((mInputBufferCount > 2) && (mOutputBufferCount <= 1)) {
+ ALOGW("Invalid AAC stream");
+ mSignalledError = true;
+ // TODO: notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return false;
+ }
+#endif
+ }
+ ALOGV("size = %zu", size);
+ } while (decoderErr == AAC_DEC_OK);
+ }
+
+ int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels;
+
+ mBuffersInfo.push_back(std::move(inInfo));
+
+ if (!eos && mOutputDelayCompensated < outputDelay) {
+ // discard outputDelay at the beginning
+ int32_t toCompensate = outputDelay - mOutputDelayCompensated;
+ int32_t discard = outputDelayRingBufferSamplesAvailable();
+ if (discard > toCompensate) {
+ discard = toCompensate;
+ }
+ int32_t discarded = outputDelayRingBufferGetSamples(0, discard);
+ mOutputDelayCompensated += discarded;
+ return;
+ }
+
+ if (eos) {
+ drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
+ } else {
+ drainRingBuffer(work, pool, false /* not EOS */);
+ }
+}
+
+c2_status_t C2SoftAac::drainInternal(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work) {
+ if (drainMode == NO_DRAIN) {
+ ALOGW("drain with NO_DRAIN: no-op");
+ return C2_OK;
+ }
+ if (drainMode == DRAIN_CHAIN) {
+ ALOGW("DRAIN_CHAIN not supported");
+ return C2_OMITTED;
+ }
+
+ bool eos = (drainMode == DRAIN_COMPONENT_WITH_EOS);
+
+ drainDecoder();
+ drainRingBuffer(work, pool, eos);
+
+ if (eos) {
+ auto fillEmptyWork = [](const std::unique_ptr<C2Work> &work) {
+ work->worklets.front()->output.flags = work->input.flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.buffers.emplace_back(nullptr);
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->worklets_processed = 1u;
+ };
+ while (mBuffersInfo.size() > 1u) {
+ finish(mBuffersInfo.front().frameIndex, fillEmptyWork);
+ mBuffersInfo.pop_front();
+ }
+ if (work->worklets_processed == 0u) {
+ fillEmptyWork(work);
+ }
+ mBuffersInfo.clear();
+ }
+
+ return C2_OK;
+}
+
+c2_status_t C2SoftAac::drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ return drainInternal(drainMode, pool, nullptr);
+}
+
+c2_status_t C2SoftAac::onFlush_sm() {
+ drainDecoder();
+ mBuffersInfo.clear();
+
+ int avail;
+ while ((avail = outputDelayRingBufferSamplesAvailable()) > 0) {
+ if (avail > mStreamInfo->frameSize * mStreamInfo->numChannels) {
+ avail = mStreamInfo->frameSize * mStreamInfo->numChannels;
+ }
+ int32_t ns = outputDelayRingBufferGetSamples(0, avail);
+ if (ns != avail) {
+ ALOGW("not a complete frame of samples available");
+ break;
+ }
+ }
+ mOutputDelayRingBufferReadPos = mOutputDelayRingBufferWritePos;
+
+ return C2_OK;
+}
+
+void C2SoftAac::drainDecoder() {
+ // flush decoder until outputDelay is compensated
+ while (mOutputDelayCompensated > 0) {
+ // a buffer big enough for MAX_CHANNEL_COUNT channels of decoded HE-AAC
+ INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
+
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
+
+ AAC_DECODER_ERROR decoderErr =
+ aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ AACDEC_FLUSH);
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
+
+ int32_t tmpOutBufferSamples = mStreamInfo->frameSize * mStreamInfo->numChannels;
+ if (tmpOutBufferSamples > mOutputDelayCompensated) {
+ tmpOutBufferSamples = mOutputDelayCompensated;
+ }
+ outputDelayRingBufferPutSamples(tmpOutBuffer, tmpOutBufferSamples);
+
+ mOutputDelayCompensated -= tmpOutBufferSamples;
+ }
+}
+
+class C2SoftAacDecFactory : public C2ComponentFactory {
+public:
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(::android::C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(new C2SoftAac("aac", id), deleter);
+ return C2_OK;
+ }
+
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) override {
+ *interface =
+ SimpleC2Interface::Builder("aac", id, deleter)
+ .inputFormat(C2FormatCompressed)
+ .outputFormat(C2FormatVideo)
+ .build();
+ return C2_OK;
+ }
+
+ virtual ~C2SoftAacDecFactory() override = default;
+};
+
+} // namespace android
+
+extern "C" ::android::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
+ return new ::android::C2SoftAacDecFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::android::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
+ delete factory;
+}
diff --git a/media/libstagefright/codecs/aacdec/C2SoftAac.h b/media/libstagefright/codecs/aacdec/C2SoftAac.h
new file mode 100644
index 0000000..b877635
--- /dev/null
+++ b/media/libstagefright/codecs/aacdec/C2SoftAac.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2_SOFT_AAC_H_
+#define C2_SOFT_AAC_H_
+
+#include <SimpleC2Component.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+#include "aacdecoder_lib.h"
+#include "DrcPresModeWrap.h"
+
+namespace android {
+
+struct C2SoftAac : public SimpleC2Component {
+ C2SoftAac(const char *name, c2_node_id_t id);
+ virtual ~C2SoftAac();
+
+ // From SimpleC2Component
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool) override;
+ c2_status_t drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) override;
+
+private:
+ enum {
+ kNumDelayBlocksMax = 8,
+ };
+
+ HANDLE_AACDECODER mAACDecoder;
+ CStreamInfo *mStreamInfo;
+ bool mIsADTS;
+ bool mIsFirst;
+ size_t mInputBufferCount;
+ size_t mOutputBufferCount;
+ bool mSignalledError;
+ struct Info {
+ uint64_t frameIndex;
+ size_t bufferSize;
+ uint64_t timestamp;
+ std::vector<int32_t> decodedSizes;
+ };
+ std::list<Info> mBuffersInfo;
+
+ CDrcPresModeWrapper mDrcWrap;
+
+ enum {
+ NONE,
+ AWAITING_DISABLED,
+ AWAITING_ENABLED
+ } mOutputPortSettingsChange;
+
+ void initPorts();
+ status_t initDecoder();
+ bool isConfigured() const;
+ void drainDecoder();
+
+ void drainRingBuffer(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool,
+ bool eos);
+ c2_status_t drainInternal(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work);
+
+// delay compensation
+ bool mEndOfInput;
+ bool mEndOfOutput;
+ int32_t mOutputDelayCompensated;
+ int32_t mOutputDelayRingBufferSize;
+ short *mOutputDelayRingBuffer;
+ int32_t mOutputDelayRingBufferWritePos;
+ int32_t mOutputDelayRingBufferReadPos;
+ int32_t mOutputDelayRingBufferFilled;
+ bool outputDelayRingBufferPutSamples(INT_PCM *samples, int numSamples);
+ int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples);
+ int32_t outputDelayRingBufferSamplesAvailable();
+ int32_t outputDelayRingBufferSpaceLeft();
+
+ DISALLOW_EVIL_CONSTRUCTORS(C2SoftAac);
+};
+
+} // namespace android
+
+#endif // C2_SOFT_AAC_H_
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index 259fb25..04e5dc1 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -46,7 +46,6 @@
static_libs: [
"libavcdec",
- "libstagefright_codec2_vndk",
],
srcs: ["C2SoftAvcDec.cpp"],
@@ -58,21 +57,16 @@
include_dirs: [
"external/libavc/decoder",
"external/libavc/common",
- "frameworks/av/media/libstagefright/codec2/include",
- "frameworks/av/media/libstagefright/codec2/vndk/include",
],
shared_libs: [
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.mapper@2.0",
- "libhidlbase",
- "libion",
"liblog",
+ "libutils",
"libmedia",
"libstagefright_codec2",
+ "libstagefright_codec2_vndk",
"libstagefright_foundation",
"libstagefright_simple_c2component",
- "libutils",
],
sanitize: {
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
index 01d120e..ffe6332 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-//#define LOG_NDEBUG 0
+#define LOG_NDEBUG 0
#define LOG_TAG "C2SoftAvcDec"
#include <utils/Log.h>
@@ -29,6 +29,7 @@
#include "C2SoftAvcDec.h"
#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
@@ -68,9 +69,9 @@
#define IVDEXT_CMD_CTL_SET_NUM_CORES \
(IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_SET_NUM_CORES
-
namespace {
+#if 0
using SupportedValuesWithFields = C2SoftAvcDecIntf::SupportedValuesWithFields;
struct ValidateParam {
@@ -201,15 +202,23 @@
private:
const char *mExpected;
};
+#endif
-class GraphicBuffer : public C2Buffer {
-public:
- explicit GraphicBuffer(const std::shared_ptr<C2GraphicBlock> &block)
- : C2Buffer({ block->share(C2Rect(block->width(), block->height()), ::android::C2Fence()) }) {}
-};
+void fillEmptyWork(const std::unique_ptr<C2Work> &work) {
+ uint32_t flags = 0;
+ if ((work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
+ flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ }
+ work->worklets.front()->output.flags = (C2BufferPack::flags_t)flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.buffers.emplace_back(nullptr);
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->worklets_processed = 1u;
+}
} // namespace
+#if 0
#define CASE(member) \
case decltype(component->member)::CORE_INDEX: \
return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor( \
@@ -599,13 +608,18 @@
frameRate.restrictingFields.clear();
frameRate.restrictingFields.insert(fields.begin(), fields.end());
}
+#endif
///////////////////////////////////////////////////////////////////////////////
C2SoftAvcDec::C2SoftAvcDec(
const char *name,
c2_node_id_t id)
- : SimpleC2Component(std::make_shared<C2SoftAvcDecIntf>(name, id)),
+ : SimpleC2Component(
+ SimpleC2Interface::Builder(name, id)
+ .inputFormat(C2FormatCompressed)
+ .outputFormat(C2FormatVideo)
+ .build()),
mCodecCtx(NULL),
mFlushOutBuffer(NULL),
mIvColorFormat(IV_YUV_420P),
@@ -684,11 +698,6 @@
return C2_OK;
}
-c2_status_t C2SoftAvcDec::onDrain_nb() {
- // TODO
- return C2_OK;
-}
-
static void *ivd_aligned_malloc(void *ctxt, WORD32 alignment, WORD32 size) {
(void) ctxt;
return memalign(alignment, size);
@@ -875,7 +884,6 @@
ALOGE("Error in create: 0x%x",
s_create_op.s_ivd_create_op_t.u4_error_code);
deInitDecoder();
- mCodecCtx = NULL;
return UNKNOWN_ERROR;
}
}
@@ -914,6 +922,7 @@
s_delete_op.s_ivd_delete_op_t.u4_error_code);
return UNKNOWN_ERROR;
}
+ mCodecCtx = NULL;
}
mChangingResolution = false;
@@ -1018,25 +1027,67 @@
return true;
}
-bool C2SoftAvcDec::process(const std::unique_ptr<C2Work> &work, std::shared_ptr<C2BlockPool> pool) {
- bool isInFlush = false;
- bool eos = false;
+c2_status_t C2SoftAvcDec::ensureDecoderState(const std::shared_ptr<C2BlockPool> &pool) {
+ if (NULL == mCodecCtx) {
+ if (OK != initDecoder()) {
+ ALOGE("Failed to initialize decoder");
+ // TODO: notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return C2_CORRUPTED;
+ }
+ }
+ if (mWidth != mStride) {
+ /* Set the run-time (dynamic) parameters */
+ mStride = mWidth;
+ setParams(mStride);
+ }
- bool done = false;
- work->result = C2_OK;
+ if (!mAllocatedBlock) {
+ // TODO: error handling
+ // TODO: format & usage
+ uint32_t format = HAL_PIXEL_FORMAT_YV12;
+ C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
+ ALOGV("using allocator %u", pool->getAllocatorId());
- const C2ConstLinearBlock &buffer =
- work->input.buffers[0]->data().linearBlocks().front();
- auto fillEmptyWork = [](const std::unique_ptr<C2Work> &work) {
+ (void)pool->fetchGraphicBlock(
+ mWidth, mHeight, format, usage, &mAllocatedBlock);
+ ALOGV("provided (%dx%d) required (%dx%d)",
+ mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
+ }
+ return C2_OK;
+}
+
+void C2SoftAvcDec::finishWork(uint64_t index, const std::unique_ptr<C2Work> &work) {
+ std::shared_ptr<C2Buffer> buffer = createGraphicBuffer(std::move(mAllocatedBlock));
+ auto fillWork = [buffer](const std::unique_ptr<C2Work> &work) {
uint32_t flags = 0;
- if ((work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
+ if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
flags |= C2BufferPack::FLAG_END_OF_STREAM;
+ ALOGV("EOS");
}
work->worklets.front()->output.flags = (C2BufferPack::flags_t)flags;
work->worklets.front()->output.buffers.clear();
- work->worklets.front()->output.buffers.emplace_back(nullptr);
+ work->worklets.front()->output.buffers.push_back(buffer);
work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->worklets_processed = 1u;
};
+ if (work && index == work->input.ordinal.frame_index) {
+ fillWork(work);
+ } else {
+ finish(index, fillWork);
+ }
+}
+
+void C2SoftAvcDec::process(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ bool eos = false;
+
+ work->result = C2_OK;
+ work->worklets_processed = 0u;
+
+ const C2ConstLinearBlock &buffer =
+ work->input.buffers[0]->data().linearBlocks().front();
if (buffer.capacity() == 0) {
ALOGV("empty input: %llu", (long long)work->input.ordinal.frame_index);
// TODO: result?
@@ -1044,51 +1095,21 @@
if ((work->input.flags & C2BufferPack::FLAG_END_OF_STREAM)) {
eos = true;
}
- done = true;
+ return;
} else if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
ALOGV("input EOS: %llu", (long long)work->input.ordinal.frame_index);
eos = true;
}
- std::unique_ptr<C2ReadView> deferred;
- std::unique_ptr<C2ReadView> input(new C2ReadView(
- work->input.buffers[0]->data().linearBlocks().front().map().get()));
+ C2ReadView input = work->input.buffers[0]->data().linearBlocks().front().map().get();
uint32_t workIndex = work->input.ordinal.frame_index & 0xFFFFFFFF;
size_t inOffset = 0u;
- while (input || isInFlush) {
+ while (inOffset < input.capacity()) {
if (mSignalledError) {
- return done;
+ break;
}
- if (NULL == mCodecCtx) {
- if (OK != initDecoder()) {
- ALOGE("Failed to initialize decoder");
- // TODO: notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
- mSignalledError = true;
- return done;
- }
- }
- if (mWidth != mStride) {
- /* Set the run-time (dynamic) parameters */
- mStride = mWidth;
- setParams(mStride);
- }
-
- if (isInFlush) {
- ALOGV("flushing");
- }
-
- if (!mAllocatedBlock) {
- // TODO: error handling
- // TODO: format & usage
- uint32_t format = HAL_PIXEL_FORMAT_YV12;
- C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
- ALOGV("using allocator %u", pool->getAllocatorId());
-
- (void)pool->fetchGraphicBlock(
- mWidth, mHeight, format, usage, &mAllocatedBlock);
- ALOGV("provided (%dx%d) required (%dx%d)", mAllocatedBlock->width(), mAllocatedBlock->height(), mWidth, mHeight);
- }
+ (void)ensureDecoderState(pool);
C2GraphicView output = mAllocatedBlock->map().get();
if (output.error() != OK) {
ALOGE("mapped err = %d", output.error());
@@ -1099,11 +1120,11 @@
WORD32 timeDelay, timeTaken;
//size_t sizeY, sizeUV;
- if (!setDecodeArgs(&s_dec_ip, &s_dec_op, input.get(), &output, workIndex, inOffset)) {
+ if (!setDecodeArgs(&s_dec_ip, &s_dec_op, &input, &output, workIndex, inOffset)) {
ALOGE("Decoder arg setup failed");
// TODO: notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
- return done;
+ break;
}
ALOGV("Decoder arg setup succeeded");
// If input dump is enabled, then write to file
@@ -1116,6 +1137,7 @@
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ ALOGV("status = %d, error_code = %d", status, (s_dec_op.u4_error_code & 0xFF));
bool unsupportedResolution =
(IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_dec_op.u4_error_code & 0xFF));
@@ -1125,7 +1147,7 @@
ALOGE("Unsupported resolution : %dx%d", mWidth, mHeight);
// TODO: notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
mSignalledError = true;
- return done;
+ break;
}
bool allocationFailed = (IVD_MEM_ALLOC_FAILED == (s_dec_op.u4_error_code & 0xFF));
@@ -1133,7 +1155,7 @@
ALOGE("Allocation failure in decoder");
// TODO: notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
mSignalledError = true;
- return done;
+ break;
}
bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
@@ -1146,26 +1168,23 @@
PRINT_TIME("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
- if (input) {
- ALOGV("bytes total=%u", input->capacity());
- }
+ ALOGV("bytes total=%u", input.capacity());
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
- if (1 != s_dec_op.u4_frame_decoded_flag && input) {
+ if (1 != s_dec_op.u4_frame_decoded_flag) {
/* If the input did not contain picture data, return work without
* buffer */
ALOGV("no picture data: %u", workIndex);
fillEmptyWork(work);
- done = true;
}
- // If the decoder is in the changing resolution mode and there is no output present,
- // that means the switching is done and it's ready to reset the decoder and the plugin.
- if (mChangingResolution && !s_dec_op.u4_output_present) {
- ALOGV("changing resolution");
- mChangingResolution = false;
+ if (resChanged) {
+ ALOGV("res changed");
+ if (mFlushNeeded) {
+ drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
+ }
resetDecoder();
resetPlugin();
mStride = mWidth;
@@ -1173,19 +1192,6 @@
continue;
}
- if (resChanged) {
- ALOGV("res changed");
- mChangingResolution = true;
- if (mFlushNeeded) {
- setFlushMode();
- isInFlush = true;
- deferred = std::move(input);
- }
- continue;
- }
-
- // Combine the resolution change and coloraspects change in one PortSettingChange event
- // if necessary.
if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
uint32_t width = s_dec_op.u4_pic_wd;
uint32_t height = s_dec_op.u4_pic_ht;
@@ -1195,73 +1201,76 @@
mWidth = width;
mHeight = height;
}
- } else if (mUpdateColorAspects) {
+ // TODO: continue?
+ }
+
+ if (mUpdateColorAspects) {
//notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
// kDescribeColorAspectsIndex, NULL);
ALOGV("update color aspect");
mUpdateColorAspects = false;
- continue;
}
if (s_dec_op.u4_output_present) {
ALOGV("output_present: %d", s_dec_op.u4_ts);
- auto fillWork = [this](const std::unique_ptr<C2Work> &work) {
- uint32_t flags = 0;
- if (work->input.flags & C2BufferPack::FLAG_END_OF_STREAM) {
- flags |= C2BufferPack::FLAG_END_OF_STREAM;
- ALOGV("EOS");
- }
- work->worklets.front()->output.flags = (C2BufferPack::flags_t)flags;
- work->worklets.front()->output.buffers.clear();
- work->worklets.front()->output.buffers.emplace_back(
- std::make_shared<GraphicBuffer>(std::move(mAllocatedBlock)));
- work->worklets.front()->output.ordinal = work->input.ordinal;
- };
- if (s_dec_op.u4_ts != workIndex) {
- finish(s_dec_op.u4_ts, fillWork);
- } else {
- fillWork(work);
- done = true;
- }
- } else if (isInFlush) {
- ALOGV("flush complete");
- /* If in flush mode and no output is returned by the codec,
- * then come out of flush mode */
- isInFlush = false;
-
- /* If EOS was recieved on input port and there is no output
- * from the codec, then signal EOS on output port */
- if (eos) {
- // TODO: It's an error if not done.
-
- resetPlugin();
- return done;
- }
-
- input = std::move(deferred);
+ finishWork(s_dec_op.u4_ts, work);
}
- if (input) {
- inOffset += s_dec_op.u4_num_bytes_consumed;
- if (inOffset >= input->capacity()) {
- /* If input EOS is seen and decoder is not in flush mode,
- * set the decoder in flush mode.
- * There can be a case where EOS is sent along with last picture data
- * In that case, only after decoding that input data, decoder has to be
- * put in flush. This case is handled here */
- if (eos && !isInFlush) {
- setFlushMode();
- isInFlush = true;
- }
- if (isInFlush) {
- input.reset();
- } else {
- break;
- }
- }
+ inOffset += s_dec_op.u4_num_bytes_consumed;
+ }
+ if (inOffset >= input.capacity()) {
+ /* If input EOS is seen, drain the decoder.
+ * There can be a case where EOS is sent along with last picture data
+ * In that case, only after decoding that input data, decoder has to be
+ * put in flush. This case is handled here */
+ if (eos) {
+ drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
}
}
- return done;
+}
+
+c2_status_t C2SoftAvcDec::drainInternal(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work) {
+ if (drainMode == NO_DRAIN) {
+ ALOGW("drain with NO_DRAIN: no-op");
+ return C2_OK;
+ }
+ if (drainMode == DRAIN_CHAIN) {
+ ALOGW("DRAIN_CHAIN not supported");
+ return C2_OMITTED;
+ }
+ setFlushMode();
+
+ while (true) {
+ (void)ensureDecoderState(pool);
+ C2GraphicView output = mAllocatedBlock->map().get();
+ if (output.error() != OK) {
+ ALOGE("mapped err = %d", output.error());
+ }
+
+ ivd_video_decode_ip_t s_dec_ip;
+ ivd_video_decode_op_t s_dec_op;
+
+ setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, &output, 0, 0u);
+
+ (void)ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+
+ if (s_dec_op.u4_output_present) {
+ ALOGV("output_present: %d", s_dec_op.u4_ts);
+ finishWork(s_dec_op.u4_ts, work);
+ } else {
+ break;
+ }
+ }
+
+ if (drainMode == DRAIN_COMPONENT_WITH_EOS
+ && work && work->worklets_processed == 0u) {
+ fillEmptyWork(work);
+ }
+
+ return C2_OK;
}
bool C2SoftAvcDec::colorAspectsDiffer(
@@ -1275,6 +1284,12 @@
return false;
}
+c2_status_t C2SoftAvcDec::drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) {
+ return drainInternal(drainMode, pool, nullptr);
+}
+
void C2SoftAvcDec::updateFinalColorAspects(
const ColorAspects &otherAspects, const ColorAspects &preferredAspects) {
Mutex::Autolock autoLock(mColorAspectsLock);
@@ -1323,7 +1338,11 @@
c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
std::function<void(::android::C2ComponentInterface*)> deleter) override {
*interface =
- std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
+ SimpleC2Interface::Builder("avc", id, deleter)
+ .inputFormat(C2FormatCompressed)
+ .outputFormat(C2FormatVideo)
+ .build();
+// std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
return C2_OK;
}
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
index aa22e23..0e8cf77 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
@@ -67,7 +67,7 @@
diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
((end).tv_usec - (start).tv_usec);
-
+#if 0
class C2SoftAvcDecIntf : public C2ComponentInterface {
public:
struct SupportedValuesWithFields {
@@ -139,6 +139,7 @@
void updateSupportedValues();
friend class C2SoftAvcDec;
};
+#endif
class C2SoftAvcDec : public SimpleC2Component {
public:
@@ -146,15 +147,17 @@
virtual ~C2SoftAvcDec();
// From SimpleC2Component
- virtual c2_status_t onInit() override;
- virtual c2_status_t onStop() override;
- virtual void onReset() override;
- virtual void onRelease() override;
- virtual c2_status_t onFlush_sm() override;
- virtual c2_status_t onDrain_nb() override;
- virtual bool process(
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(
const std::unique_ptr<C2Work> &work,
- std::shared_ptr<C2BlockPool> pool) override;
+ const std::shared_ptr<C2BlockPool> &pool) override;
+ c2_status_t drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) override;
private:
Mutex mColorAspectsLock;
@@ -216,6 +219,13 @@
status_t resetDecoder();
status_t resetPlugin();
+ c2_status_t ensureDecoderState(const std::shared_ptr<C2BlockPool> &pool);
+ void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work);
+ c2_status_t drainInternal(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool,
+ const std::unique_ptr<C2Work> &work);
+
bool setDecodeArgs(
ivd_video_decode_ip_t *ps_dec_ip,
ivd_video_decode_op_t *ps_dec_op,
diff --git a/media/libstagefright/codecs/cmds/Android.bp b/media/libstagefright/codecs/cmds/Android.bp
index ad0bd2d..40f1a3d 100644
--- a/media/libstagefright/codecs/cmds/Android.bp
+++ b/media/libstagefright/codecs/cmds/Android.bp
@@ -6,30 +6,21 @@
],
include_dirs: [
- "frameworks/av/media/libstagefright/codec2/include",
- "frameworks/av/media/libstagefright/codec2/vndk/include",
],
shared_libs: [
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.mapper@2.0",
"libbinder",
"libcutils",
"libgui",
- "libhidlbase",
- "libion",
"liblog",
"libstagefright",
"libstagefright_codec2",
+ "libstagefright_codec2_vndk",
"libstagefright_foundation",
"libui",
"libutils",
],
- static_libs: [
- "libstagefright_codec2_vndk",
- ],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/codecs/cmds/codec2.cpp b/media/libstagefright/codecs/cmds/codec2.cpp
index 5a225f1..78fb527 100644
--- a/media/libstagefright/codecs/cmds/codec2.cpp
+++ b/media/libstagefright/codecs/cmds/codec2.cpp
@@ -52,6 +52,7 @@
#include <gui/SurfaceComposerClient.h>
#include <util/C2ParamUtils.h>
+#include <C2AllocatorGralloc.h>
#include <C2Buffer.h>
#include <C2BufferPriv.h>
#include <C2Component.h>
@@ -174,6 +175,7 @@
void SimplePlayer::onWorkDone(
std::weak_ptr<C2Component> component, std::vector<std::unique_ptr<C2Work>> workItems) {
+ ALOGV("SimplePlayer::onWorkDone");
(void) component;
ULock l(mProcessedLock);
for (auto & item : workItems) {
@@ -245,31 +247,36 @@
}
int slot;
sp<Fence> fence;
+ ALOGV("Render: Frame #%" PRId64, work->worklets.front()->output.ordinal.frame_index);
const std::shared_ptr<C2Buffer> &output = work->worklets.front()->output.buffers[0];
- const C2ConstGraphicBlock &block = output->data().graphicBlocks().front();
- sp<GraphicBuffer> buffer(new GraphicBuffer(
- block.handle(),
- GraphicBuffer::CLONE_HANDLE,
- block.width(),
- block.height(),
- HAL_PIXEL_FORMAT_YV12,
- 1,
- (uint64_t)GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
- block.width()));
+ if (output) {
+ const C2ConstGraphicBlock &block = output->data().graphicBlocks().front();
+ native_handle_t *grallocHandle = UnwrapNativeCodec2GrallocHandle(block.handle());
+ sp<GraphicBuffer> buffer(new GraphicBuffer(
+ grallocHandle,
+ GraphicBuffer::CLONE_HANDLE,
+ block.width(),
+ block.height(),
+ HAL_PIXEL_FORMAT_YV12,
+ 1,
+ (uint64_t)GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ block.width()));
+ native_handle_delete(grallocHandle);
- status_t err = igbp->attachBuffer(&slot, buffer);
+ status_t err = igbp->attachBuffer(&slot, buffer);
- IGraphicBufferProducer::QueueBufferInput qbi(
- work->worklets.front()->output.ordinal.timestamp * 1000ll,
- false,
- HAL_DATASPACE_UNKNOWN,
- Rect(block.width(), block.height()),
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW,
- 0,
- Fence::NO_FENCE,
- 0);
- IGraphicBufferProducer::QueueBufferOutput qbo;
- err = igbp->queueBuffer(slot, qbi, &qbo);
+ IGraphicBufferProducer::QueueBufferInput qbi(
+ work->worklets.front()->output.ordinal.timestamp * 1000ll,
+ false,
+ HAL_DATASPACE_UNKNOWN,
+ Rect(block.width(), block.height()),
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW,
+ 0,
+ Fence::NO_FENCE,
+ 0);
+ IGraphicBufferProducer::QueueBufferOutput qbo;
+ err = igbp->queueBuffer(slot, qbi, &qbo);
+ }
work->input.buffers.clear();
work->worklets.clear();
@@ -278,6 +285,7 @@
mWorkQueue.push_back(std::move(work));
mQueueCondition.notify_all();
}
+ ALOGV("render loop finished");
});
long numFrames = 0;
@@ -365,11 +373,12 @@
++numFrames;
}
+ ALOGV("main loop finished");
source->stop();
- component->release();
-
running.store(false);
surfaceThread.join();
+
+ component->release();
printf("\n");
}
diff --git a/media/libstagefright/codecs/tests/Android.mk b/media/libstagefright/codecs/tests/Android.mk
deleted file mode 100644
index ea188ea..0000000
--- a/media/libstagefright/codecs/tests/Android.mk
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- C2SoftAvcDec_test.cpp \
-
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE := c2_google_component_test
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libstagefright_codec2 \
- libstagefright_foundation \
- libstagefright_soft_c2avcdec \
- liblog \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright/codec2/include \
- frameworks/av/media/libstagefright/codec2/vndk/include \
- frameworks/av/media/libstagefright/codecs/avcdec \
-
-LOCAL_CFLAGS += -Werror -Wall -std=c++14
-LOCAL_CLANG := true
-
-include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/codecs/tests/C2SoftAvcDec_test.cpp b/media/libstagefright/codecs/tests/C2SoftAvcDec_test.cpp
deleted file mode 100644
index ca26a1d..0000000
--- a/media/libstagefright/codecs/tests/C2SoftAvcDec_test.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2SoftAvcDec_test"
-#include <utils/Log.h>
-
-#include <gtest/gtest.h>
-
-#include <media/stagefright/foundation/MediaDefs.h>
-
-#include "C2SoftAvcDec.h"
-
-namespace android {
-
-namespace {
-
-template <class T>
-std::unique_ptr<T> alloc_unique_cstr(const char *cstr) {
- std::unique_ptr<T> ptr = T::alloc_unique(strlen(cstr) + 1);
- strcpy(ptr->m.value, cstr);
- return ptr;
-}
-
-} // namespace
-
-
-class C2SoftAvcDecTest : public ::testing::Test {
-public:
- C2SoftAvcDecTest() : mIntf(new C2SoftAvcDecIntf("dummy", 0u)) {}
- ~C2SoftAvcDecTest() = default;
-
- template <typename T>
- void testReadOnlyParam(const T *expected, const T *invalid);
-
- template <typename T>
- void testReadOnlyParamOnStack(const T *expected, const T *invalid);
-
- template <typename T>
- void testReadOnlyParamOnHeap(const T *expected, const T *invalid);
-
- template <typename T>
- void testReadOnlyFlexParam(
- const std::unique_ptr<T> &expected, const std::unique_ptr<T> &invalid);
-
-protected:
- std::shared_ptr<C2SoftAvcDecIntf> mIntf;
-};
-
-template <typename T>
-void C2SoftAvcDecTest::testReadOnlyParam(const T *expected, const T *invalid) {
- testReadOnlyParamOnStack(expected, invalid);
- testReadOnlyParamOnHeap(expected, invalid);
-}
-
-template <typename T>
-void C2SoftAvcDecTest::testReadOnlyParamOnStack(const T *expected, const T *invalid) {
- T param;
- ASSERT_EQ(C2_OK, mIntf->query_vb({¶m}, {}, C2_DONT_BLOCK, nullptr));
- ASSERT_EQ(*expected, param);
-
- std::vector<C2Param * const> params{ (C2Param * const)invalid };
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
-
- // The param must not change after failed config.
- ASSERT_EQ(C2_OK, mIntf->query_vb({¶m}, {}, C2_DONT_BLOCK, nullptr));
- ASSERT_EQ(*expected, param);
-}
-
-template <typename T>
-void C2SoftAvcDecTest::testReadOnlyParamOnHeap(const T *expected, const T *invalid) {
- std::vector<std::unique_ptr<C2Param>> heapParams;
-
- uint32_t index = expected->index();
-
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- ASSERT_EQ(*expected, *heapParams[0]);
-
- std::vector<C2Param * const> params{ (C2Param * const)invalid };
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
-
- // The param must not change after failed config.
- heapParams.clear();
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- ASSERT_EQ(*expected, *heapParams[0]);
-}
-
-template <typename T>
-void C2SoftAvcDecTest::testReadOnlyFlexParam(
- const std::unique_ptr<T> &expected, const std::unique_ptr<T> &invalid) {
- std::vector<std::unique_ptr<C2Param>> heapParams;
-
- uint32_t index = expected->index();
-
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- ASSERT_EQ(*expected, *heapParams[0]);
-
- std::vector<C2Param * const> params{ invalid.get() };
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- ASSERT_EQ(C2_BAD_VALUE, mIntf->config_vb(params, C2_DONT_BLOCK, &failures));
-
- // The param must not change after failed config.
- heapParams.clear();
- ASSERT_EQ(C2_OK, mIntf->query_vb({}, {index}, C2_DONT_BLOCK, &heapParams));
- ASSERT_EQ(1u, heapParams.size());
- ASSERT_EQ(*expected, *heapParams[0]);
-}
-
-
-TEST_F(C2SoftAvcDecTest, TestNameAndId) {
- EXPECT_STREQ("dummy", mIntf->getName().c_str());
- EXPECT_EQ(0u, mIntf->getId());
-}
-
-TEST_F(C2SoftAvcDecTest, TestDomainInfo) {
- C2ComponentDomainInfo expected(C2DomainVideo);
- C2ComponentDomainInfo invalid(C2DomainAudio);
- testReadOnlyParam(&expected, &invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestInputStreamCount) {
- C2PortStreamCountConfig::input expected(1);
- C2PortStreamCountConfig::input invalid(100);
- testReadOnlyParam(&expected, &invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestOutputStreamCount) {
- C2PortStreamCountConfig::output expected(1);
- C2PortStreamCountConfig::output invalid(100);
- testReadOnlyParam(&expected, &invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestInputPortMime) {
- std::unique_ptr<C2PortMimeConfig::input> expected(
- alloc_unique_cstr<C2PortMimeConfig::input>(MEDIA_MIMETYPE_VIDEO_AVC));
- std::unique_ptr<C2PortMimeConfig::input> invalid(
- alloc_unique_cstr<C2PortMimeConfig::input>(MEDIA_MIMETYPE_VIDEO_RAW));
- testReadOnlyFlexParam(expected, invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestOutputPortMime) {
- std::unique_ptr<C2PortMimeConfig::output> expected(
- alloc_unique_cstr<C2PortMimeConfig::output>(MEDIA_MIMETYPE_VIDEO_RAW));
- std::unique_ptr<C2PortMimeConfig::output> invalid(
- alloc_unique_cstr<C2PortMimeConfig::output>(MEDIA_MIMETYPE_VIDEO_AVC));
- testReadOnlyFlexParam(expected, invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestInputStreamFormat) {
- C2StreamFormatConfig::input expected(0u, C2FormatCompressed);
- C2StreamFormatConfig::input invalid(0u, C2FormatVideo);
- testReadOnlyParam(&expected, &invalid);
-}
-
-TEST_F(C2SoftAvcDecTest, TestOutputStreamFormat) {
- C2StreamFormatConfig::output expected(0u, C2FormatVideo);
- C2StreamFormatConfig::output invalid(0u, C2FormatCompressed);
- testReadOnlyParam(&expected, &invalid);
-}
-
-} // namespace android
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index 88a8351..c4eaa27 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -398,6 +398,7 @@
}
// TODO: move this into a Video HAL
+const static
ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandardFallbacks {
{
{ CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT470_6M } },
@@ -420,6 +421,7 @@
}
};
+const static
ALookup<CU::ColorStandard, CA::Primaries> sStandardPrimariesFallbacks {
{
{ CU::kColorStandardFilm, CA::PrimariesGenericFilm },
@@ -430,7 +432,8 @@
}
};
-static ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
+const static
+ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
{
{ HAL_DATASPACE_SRGB, HAL_DATASPACE_V0_SRGB },
{ HAL_DATASPACE_BT709, HAL_DATASPACE_V0_BT709 },
@@ -441,6 +444,73 @@
}
};
+#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
+#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
+
+const static
+ALookup<CU::ColorStandard, uint32_t> sGfxStandards {
+ {
+ { CU::kColorStandardUnspecified, GET_HAL_BITFIELD(STANDARD, UNSPECIFIED) },
+ { CU::kColorStandardBT709, GET_HAL_BITFIELD(STANDARD, BT709) },
+ { CU::kColorStandardBT601_625, GET_HAL_BITFIELD(STANDARD, BT601_625) },
+ { CU::kColorStandardBT601_625_Unadjusted, GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED) },
+ { CU::kColorStandardBT601_525, GET_HAL_BITFIELD(STANDARD, BT601_525) },
+ { CU::kColorStandardBT601_525_Unadjusted, GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED) },
+ { CU::kColorStandardBT2020, GET_HAL_BITFIELD(STANDARD, BT2020) },
+ { CU::kColorStandardBT2020Constant, GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE) },
+ { CU::kColorStandardBT470M, GET_HAL_BITFIELD(STANDARD, BT470M) },
+ { CU::kColorStandardFilm, GET_HAL_BITFIELD(STANDARD, FILM) },
+ { CU::kColorStandardDCI_P3, GET_HAL_BITFIELD(STANDARD, DCI_P3) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorStandardUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT709 == 1, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT601_625 == 2, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT601_525 == 4, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT2020 == 6, "SDK mismatch"); // N
+
+const static
+ALookup<CU::ColorTransfer, uint32_t> sGfxTransfers {
+ {
+ { CU::kColorTransferUnspecified, GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED) },
+ { CU::kColorTransferLinear, GET_HAL_BITFIELD(TRANSFER, LINEAR) },
+ { CU::kColorTransferSRGB, GET_HAL_BITFIELD(TRANSFER, SRGB) },
+ { CU::kColorTransferSMPTE_170M, GET_HAL_BITFIELD(TRANSFER, SMPTE_170M) },
+ { CU::kColorTransferGamma22, GET_HAL_BITFIELD(TRANSFER, GAMMA2_2) },
+ { CU::kColorTransferGamma28, GET_HAL_BITFIELD(TRANSFER, GAMMA2_8) },
+ { CU::kColorTransferST2084, GET_HAL_BITFIELD(TRANSFER, ST2084) },
+ { CU::kColorTransferHLG, GET_HAL_BITFIELD(TRANSFER, HLG) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorTransferUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorTransferLinear == 1, "SDK mismatch"); // N
+static_assert(CU::kColorTransferSRGB == 2, "SDK mismatch"); // N
+static_assert(CU::kColorTransferSMPTE_170M == 3, "SDK mismatch"); // N
+static_assert(CU::kColorTransferST2084 == 6, "SDK mismatch"); // N
+static_assert(CU::kColorTransferHLG == 7, "SDK mismatch"); // N
+
+const static
+ALookup<CU::ColorRange, uint32_t> sGfxRanges {
+ {
+ { CU::kColorRangeUnspecified, GET_HAL_BITFIELD(RANGE, UNSPECIFIED) },
+ { CU::kColorRangeFull, GET_HAL_BITFIELD(RANGE, FULL) },
+ { CU::kColorRangeLimited, GET_HAL_BITFIELD(RANGE, LIMITED) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorRangeUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorRangeFull == 1, "SDK mismatch"); // N
+static_assert(CU::kColorRangeLimited == 2, "SDK mismatch"); // N
+
+#undef GET_HAL_BITFIELD
+#undef GET_HAL_ENUM
+
+
bool ColorUtils::convertDataSpaceToV0(android_dataspace &dataSpace) {
(void)sLegacyDataSpaceToV0.lookup(dataSpace, &dataSpace);
return (dataSpace & 0xC000FFFF) == 0;
@@ -507,9 +577,23 @@
}
}
+ // assume 1-to-1 mapping to HAL values (to deal with potential vendor extensions)
+ uint32_t gfxRange = range;
+ uint32_t gfxStandard = standard;
+ uint32_t gfxTransfer = transfer;
+ // TRICKY: use & to ensure all three mappings are completed
+ if (!(sGfxRanges.map(range, &gfxRange) & sGfxStandards.map(standard, &gfxStandard)
+ & sGfxTransfers.map(transfer, &gfxTransfer))) {
+ ALOGW("could not safely map platform color aspects (R:%u(%s) S:%u(%s) T:%u(%s) to "
+ "graphics dataspace (R:%u S:%u T:%u)",
+ range, asString(range), standard, asString(standard), transfer, asString(transfer),
+ gfxRange, gfxStandard, gfxTransfer);
+ }
+
android_dataspace dataSpace = (android_dataspace)(
- (range << HAL_DATASPACE_RANGE_SHIFT) | (standard << HAL_DATASPACE_STANDARD_SHIFT) |
- (transfer << HAL_DATASPACE_TRANSFER_SHIFT));
+ (gfxRange << HAL_DATASPACE_RANGE_SHIFT) |
+ (gfxStandard << HAL_DATASPACE_STANDARD_SHIFT) |
+ (gfxTransfer << HAL_DATASPACE_TRANSFER_SHIFT));
(void)sLegacyDataSpaceToV0.rlookup(dataSpace, &dataSpace);
if (!mayExpand) {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
index b889a02..d6c768d 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
@@ -39,26 +39,28 @@
* vendor-extension section so they won't collide with future platform values.
*/
-#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
-#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
-
+ /**
+ * graphic.h constants changed in Android 8.0 after ColorStandard values were already public
+ * in Android 7.0. We will not deal with the break in graphic.h here, but list the public
+ * Android SDK MediaFormat values here.
+ */
enum ColorStandard : uint32_t {
- kColorStandardUnspecified = GET_HAL_BITFIELD(STANDARD, UNSPECIFIED),
- kColorStandardBT709 = GET_HAL_BITFIELD(STANDARD, BT709),
- kColorStandardBT601_625 = GET_HAL_BITFIELD(STANDARD, BT601_625),
- kColorStandardBT601_625_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED),
- kColorStandardBT601_525 = GET_HAL_BITFIELD(STANDARD, BT601_525),
- kColorStandardBT601_525_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED),
- kColorStandardBT2020 = GET_HAL_BITFIELD(STANDARD, BT2020),
- kColorStandardBT2020Constant = GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE),
- kColorStandardBT470M = GET_HAL_BITFIELD(STANDARD, BT470M),
- kColorStandardFilm = GET_HAL_BITFIELD(STANDARD, FILM),
- kColorStandardMax = GET_HAL_BITFIELD(STANDARD, MASK),
+ kColorStandardUnspecified = 0,
+ kColorStandardBT709 = 1,
+ kColorStandardBT601_625 = 2,
+ kColorStandardBT601_625_Unadjusted = 3, // not in SDK
+ kColorStandardBT601_525 = 4,
+ kColorStandardBT601_525_Unadjusted = 5, // not in SDK
+ kColorStandardBT2020 = 6,
+ kColorStandardBT2020Constant = 7, // not in SDK
+ kColorStandardBT470M = 8, // not in SDK
+ kColorStandardFilm = 9, // not in SDK
+ kColorStandardDCI_P3 = 10, // not in SDK, new in Android 8.0
/* This marks a section of color-standard values that are not supported by graphics HAL,
but track defined color primaries-matrix coefficient combinations in media.
These are stable for a given release. */
- kColorStandardExtendedStart = kColorStandardMax + 1,
+ kColorStandardExtendedStart = 64,
/* This marks a section of color-standard values that are not supported by graphics HAL
nor using media defined color primaries or matrix coefficients. These may differ per
@@ -67,19 +69,19 @@
};
enum ColorTransfer : uint32_t {
- kColorTransferUnspecified = GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED),
- kColorTransferLinear = GET_HAL_BITFIELD(TRANSFER, LINEAR),
- kColorTransferSRGB = GET_HAL_BITFIELD(TRANSFER, SRGB),
- kColorTransferSMPTE_170M = GET_HAL_BITFIELD(TRANSFER, SMPTE_170M),
- kColorTransferGamma22 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_2),
- kColorTransferGamma28 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_8),
- kColorTransferST2084 = GET_HAL_BITFIELD(TRANSFER, ST2084),
- kColorTransferHLG = GET_HAL_BITFIELD(TRANSFER, HLG),
- kColorTransferMax = GET_HAL_BITFIELD(TRANSFER, MASK),
+ kColorTransferUnspecified = 0,
+ kColorTransferLinear = 1,
+ kColorTransferSRGB = 2,
+ kColorTransferSMPTE_170M = 3, // not in SDK
+ kColorTransferGamma22 = 4, // not in SDK
+ kColorTransferGamma28 = 5, // not in SDK
+ kColorTransferST2084 = 6,
+ kColorTransferHLG = 7,
+ kColorTransferGamma26 = 8, // not in SDK, new in Android 8.0
/* This marks a section of color-transfer values that are not supported by graphics HAL,
but track media-defined color-transfer. These are stable for a given release. */
- kColorTransferExtendedStart = kColorTransferMax + 1,
+ kColorTransferExtendedStart = 32,
/* This marks a section of color-transfer values that are not supported by graphics HAL
nor defined by media. These may differ per device. */
@@ -87,23 +89,19 @@
};
enum ColorRange : uint32_t {
- kColorRangeUnspecified = GET_HAL_BITFIELD(RANGE, UNSPECIFIED),
- kColorRangeFull = GET_HAL_BITFIELD(RANGE, FULL),
- kColorRangeLimited = GET_HAL_BITFIELD(RANGE, LIMITED),
- kColorRangeMax = GET_HAL_BITFIELD(RANGE, MASK),
+ kColorRangeUnspecified = 0,
+ kColorRangeFull = 1,
+ kColorRangeLimited = 2,
/* This marks a section of color-transfer values that are not supported by graphics HAL,
but track media-defined color-transfer. These are stable for a given release. */
- kColorRangeExtendedStart = kColorRangeMax + 1,
+ kColorRangeExtendedStart = 8,
/* This marks a section of color-transfer values that are not supported by graphics HAL
nor defined by media. These may differ per device. */
kColorRangeVendorStart = 0x10000,
};
-#undef GET_HAL_BITFIELD
-#undef GET_HAL_ENUM
-
/*
* Static utilities for codec support
*/
@@ -197,7 +195,8 @@
case ColorUtils::kColorStandardBT2020Constant: return "BT2020Constant";
case ColorUtils::kColorStandardBT470M: return "BT470M";
case ColorUtils::kColorStandardFilm: return "Film";
- default: return def;
+ case ColorUtils::kColorStandardDCI_P3: return "DCI_P3";
+ default: return def;
}
}
@@ -212,7 +211,8 @@
case ColorUtils::kColorTransferGamma28: return "Gamma28";
case ColorUtils::kColorTransferST2084: return "ST2084";
case ColorUtils::kColorTransferHLG: return "HLG";
- default: return def;
+ case ColorUtils::kColorTransferGamma26: return "Gamma26";
+ default: return def;
}
}
@@ -222,7 +222,7 @@
case ColorUtils::kColorRangeUnspecified: return "Unspecified";
case ColorUtils::kColorRangeFull: return "Full";
case ColorUtils::kColorRangeLimited: return "Limited";
- default: return def;
+ default: return def;
}
}
diff --git a/media/libstagefright/include/CCodecBufferChannel.h b/media/libstagefright/include/CCodecBufferChannel.h
index 354cee2..c5062d6 100644
--- a/media/libstagefright/include/CCodecBufferChannel.h
+++ b/media/libstagefright/include/CCodecBufferChannel.h
@@ -33,24 +33,36 @@
namespace android {
/**
- * BufferChannelBase implementation for ACodec.
+ * BufferChannelBase implementation for CCodec.
*/
class CCodecBufferChannel : public BufferChannelBase {
public:
+ /**
+ * Base class for representation of buffers at one port.
+ */
class Buffers {
public:
Buffers() = default;
virtual ~Buffers() = default;
- inline void setAlloc(const std::shared_ptr<C2BlockPool> &alloc) { mAlloc = alloc; }
+ /**
+ * Set format for MediaCodec-facing buffers.
+ */
inline void setFormat(const sp<AMessage> &format) { mFormat = format; }
- inline const std::shared_ptr<C2BlockPool> &getAlloc() { return mAlloc; }
+
+ /**
+ * Returns true if the buffers are operating under array mode.
+ */
+ virtual bool isArrayMode() { return false; }
+
+ /**
+ * Fills the vector with MediaCodecBuffer's if in array mode; otherwise,
+ * no-op.
+ */
+ virtual void getArray(Vector<sp<MediaCodecBuffer>> *) {}
protected:
- // Input: this object uses it to allocate input buffers with which the
- // client fills.
- // Output: this object passes it to the component.
- std::shared_ptr<C2BlockPool> mAlloc;
+ // Format to be used for creating MediaCodec-facing buffers.
sp<AMessage> mFormat;
private:
@@ -62,10 +74,41 @@
using Buffers::Buffers;
virtual ~InputBuffers() = default;
+ /**
+ * Set a block pool to obtain input memory blocks.
+ */
+ inline void setPool(const std::shared_ptr<C2BlockPool> &pool) { mPool = pool; }
+
+ /**
+ * Get a new MediaCodecBuffer for input and its corresponding index.
+ * Returns false if no new buffer can be obtained at the moment.
+ */
virtual bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) = 0;
+
+ /**
+ * Release the buffer obtained from requestNewBuffer() and get the
+ * associated C2Buffer object back. Returns empty shared_ptr if the
+ * buffer is not on file.
+ */
virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+
+ /**
+ * Flush internal state. After this call, no index or buffer previously
+ * returned from requestNewBuffer() is valid.
+ */
virtual void flush() = 0;
+ /**
+ * Return array-backed version of input buffers. The returned object
+ * shall retain the internal state so that it will honor index and
+ * buffer from previous calls of requestNewBuffer().
+ */
+ virtual std::unique_ptr<InputBuffers> toArrayMode() = 0;
+
+ protected:
+ // Pool to obtain blocks for input buffers.
+ std::shared_ptr<C2BlockPool> mPool;
+
private:
DISALLOW_EVIL_CONSTRUCTORS(InputBuffers);
};
@@ -75,12 +118,46 @@
using Buffers::Buffers;
virtual ~OutputBuffers() = default;
+ /**
+ * Register output C2Buffer from the component and obtain corresponding
+ * index and MediaCodecBuffer object. Returns false if registration
+ * fails.
+ */
virtual bool registerBuffer(
const std::shared_ptr<C2Buffer> &buffer,
size_t *index,
sp<MediaCodecBuffer> *codecBuffer) = 0;
+
+ /**
+ * Register codec specific data as a buffer to be consistent with
+ * MediaCodec behavior.
+ */
+ virtual bool registerCsd(
+ const C2StreamCsdInfo::output * /* csd */,
+ size_t * /* index */,
+ sp<MediaCodecBuffer> * /* codecBuffer */) {
+ return false;
+ }
+
+ /**
+ * Release the buffer obtained from registerBuffer() and get the
+ * associated C2Buffer object back. Returns empty shared_ptr if the
+ * buffer is not on file.
+ */
virtual std::shared_ptr<C2Buffer> releaseBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
- virtual void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork);
+
+ /**
+ * Flush internal state. After this call, no index or buffer previously
+ * returned from registerBuffer() is valid.
+ */
+ virtual void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) = 0;
+
+ /**
+ * Return array-backed version of output buffers. The returned object
+ * shall retain the internal state so that it will honor index and
+ * buffer from previous calls of registerBuffer().
+ */
+ virtual std::unique_ptr<OutputBuffers> toArrayMode() = 0;
private:
DISALLOW_EVIL_CONSTRUCTORS(OutputBuffers);
@@ -151,12 +228,34 @@
private:
class QueueGuard;
+ /**
+ * Special mutex-like object with the following properties:
+ *
+ * - At STOPPED state (initial, or after stop())
+ * - QueueGuard object gets created at STOPPED state, and the client is
+ * supposed to return immediately.
+ * - At RUNNING state (after start())
+ * - Each QueueGuard object
+ */
class QueueSync {
public:
+ /**
+ * At construction the sync object is in STOPPED state.
+ */
inline QueueSync() : mCount(-1) {}
~QueueSync() = default;
+ /**
+ * Transition to RUNNING state when stopped. No-op if already in RUNNING
+ * state.
+ */
void start();
+
+ /**
+ * At RUNNING state, wait until all QueueGuard object created during
+ * RUNNING state are destroyed, and then transition to STOPPED state.
+ * No-op if already in STOPPED state.
+ */
void stop();
private:
@@ -186,6 +285,7 @@
std::function<void(status_t, enum ActionCode)> mOnError;
std::shared_ptr<C2BlockPool> mInputAllocator;
QueueSync mQueueSync;
+
Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
diff --git a/packages/MediaUpdate/src/com/android/media/update/ApiFactory.java b/packages/MediaUpdate/src/com/android/media/update/ApiFactory.java
index 1cdd177..abff13e 100644
--- a/packages/MediaUpdate/src/com/android/media/update/ApiFactory.java
+++ b/packages/MediaUpdate/src/com/android/media/update/ApiFactory.java
@@ -18,21 +18,21 @@
import android.content.Context;
import android.media.update.MediaController2Provider;
+import android.media.update.VideoView2Provider;
import android.media.update.StaticProvider;
import android.media.update.ViewProvider;
import android.widget.MediaController2;
+import android.widget.VideoView2;
import com.android.widget.MediaController2Impl;
+import com.android.widget.VideoView2Impl;
public class ApiFactory implements StaticProvider {
- private final Context mContext;
- public ApiFactory(Context context) {
- mContext = context;
- }
-
- public static Object initialize(Context context) throws ReflectiveOperationException {
- return new ApiFactory(context);
+ public static Object initialize(Context appContext, Context libContext)
+ throws ReflectiveOperationException {
+ ApiHelper.initialize(appContext, libContext);
+ return new ApiFactory();
}
@Override
@@ -40,4 +40,9 @@
MediaController2 instance, ViewProvider superProvider) {
return new MediaController2Impl(instance, superProvider);
}
+
+ @Override
+ public VideoView2Provider createVideoView2(VideoView2 instance, ViewProvider superProvider) {
+ return new VideoView2Impl(instance, superProvider);
+ }
}
diff --git a/packages/MediaUpdate/src/com/android/media/update/ApiHelper.java b/packages/MediaUpdate/src/com/android/media/update/ApiHelper.java
new file mode 100644
index 0000000..550da86
--- /dev/null
+++ b/packages/MediaUpdate/src/com/android/media/update/ApiHelper.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.update;
+
+import android.content.Context;
+import android.content.res.Resources;
+
+public class ApiHelper {
+ private static ApiHelper sInstance;
+ private final Context mAppContext;
+ private final Resources mLibResources;
+ private final Resources.Theme mLibTheme;
+
+ public static ApiHelper getInstance() {
+ return sInstance;
+ }
+
+ static void initialize(Context appContext, Context libContext) {
+ if (sInstance == null) {
+ sInstance = new ApiHelper(appContext, libContext);
+ }
+ }
+
+ private ApiHelper(Context appContext, Context libContext) {
+ mAppContext = appContext;
+ mLibResources = libContext.getResources();
+ mLibTheme = libContext.getTheme();
+ }
+
+ public Resources getLibResources() {
+ return mLibResources;
+ }
+
+ public Resources.Theme getLibTheme() {
+ return mLibTheme;
+ }
+}
diff --git a/packages/MediaUpdate/src/com/android/widget/VideoView2Impl.java b/packages/MediaUpdate/src/com/android/widget/VideoView2Impl.java
new file mode 100644
index 0000000..66b5ed5
--- /dev/null
+++ b/packages/MediaUpdate/src/com/android/widget/VideoView2Impl.java
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.graphics.Canvas;
+import android.media.AudioAttributes;
+import android.media.AudioFocusRequest;
+import android.media.MediaPlayer;
+import android.media.update.VideoView2Provider;
+import android.media.update.ViewProvider;
+import android.net.Uri;
+import android.view.KeyEvent;
+import android.view.MotionEvent;
+import android.view.View;
+import android.widget.MediaController2;
+import android.widget.VideoView2;
+
+import java.util.Map;
+
+public class VideoView2Impl implements VideoView2Provider, VideoViewInterface.SurfaceListener {
+
+ private final VideoView2 mInstance;
+ private final ViewProvider mSuperProvider;
+
+ public VideoView2Impl(VideoView2 instance, ViewProvider superProvider) {
+ mInstance = instance;
+ mSuperProvider = superProvider;
+
+ // TODO: Implement
+ }
+
+ @Override
+ public void start_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public void pause_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public int getDuration_impl() {
+ // TODO: Implement
+ return -1;
+ }
+
+ @Override
+ public int getCurrentPosition_impl() {
+ // TODO: Implement
+ return 0;
+ }
+
+ @Override
+ public void seekTo_impl(int msec) {
+ // TODO: Implement
+ }
+
+ @Override
+ public boolean isPlaying_impl() {
+ // TODO: Implement
+ return false;
+ }
+
+ @Override
+ public int getBufferPercentage_impl() {
+ return -1;
+ }
+
+ @Override
+ public int getAudioSessionId_impl() {
+ // TODO: Implement
+ return 0;
+ }
+
+ @Override
+ public void showSubtitle_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public void hideSubtitle_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setAudioFocusRequest_impl(int focusGain) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setAudioAttributes_impl(AudioAttributes attributes) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setVideoPath_impl(String path) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setVideoURI_impl(Uri uri) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setVideoURI_impl(Uri uri, Map<String, String> headers) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setMediaController2_impl(MediaController2 controllerView) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setViewType_impl(int viewType) {
+ // TODO: Implement
+ }
+
+ @Override
+ public int getViewType_impl() {
+ // TODO: Implement
+ return -1;
+ }
+
+ @Override
+ public void stopPlayback_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setOnPreparedListener_impl(MediaPlayer.OnPreparedListener l) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setOnCompletionListener_impl(MediaPlayer.OnCompletionListener l) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setOnErrorListener_impl(MediaPlayer.OnErrorListener l) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setOnInfoListener_impl(MediaPlayer.OnInfoListener l) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void setOnViewTypeChangedListener_impl(VideoView2.OnViewTypeChangedListener l) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void onAttachedToWindow_impl() {
+ mSuperProvider.onAttachedToWindow_impl();
+ // TODO: Implement
+ }
+
+ @Override
+ public void onDetachedFromWindow_impl() {
+ mSuperProvider.onDetachedFromWindow_impl();
+ // TODO: Implement
+ }
+
+ @Override
+ public void onLayout_impl(boolean changed, int left, int top, int right, int bottom) {
+ mSuperProvider.onLayout_impl(changed, left, top, right, bottom);
+ // TODO: Implement
+ }
+
+ @Override
+ public void draw_impl(Canvas canvas) {
+ mSuperProvider.draw_impl(canvas);
+ // TODO: Implement
+ }
+
+ @Override
+ public CharSequence getAccessibilityClassName_impl() {
+ // TODO: Implement
+ return null;
+ }
+
+ @Override
+ public boolean onTouchEvent_impl(MotionEvent ev) {
+ // TODO: Implement
+ return false;
+ }
+
+ @Override
+ public boolean onTrackballEvent_impl(MotionEvent ev) {
+ // TODO: Implement
+ return false;
+ }
+
+ @Override
+ public boolean onKeyDown_impl(int keyCode, KeyEvent event) {
+ // TODO: Implement
+ return false;
+ }
+
+ @Override
+ public void onFinishInflate_impl() {
+ // TODO: Implement
+ }
+
+ @Override
+ public boolean dispatchKeyEvent_impl(KeyEvent event) {
+ // TODO: Implement
+ return false;
+ }
+
+ @Override
+ public void setEnabled_impl(boolean enabled) {
+ // TODO: Implement
+ }
+
+ ///////////////////////////////////////////////////
+ // Implements VideoViewInterface.SurfaceListener
+ ///////////////////////////////////////////////////
+
+ @Override
+ public void onSurfaceCreated(View view, int width, int height) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void onSurfaceDestroyed(View view) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void onSurfaceChanged(View view, int width, int height) {
+ // TODO: Implement
+ }
+
+ @Override
+ public void onSurfaceTakeOverDone(VideoViewInterface view) {
+ // TODO: Implement
+ }
+}
diff --git a/packages/MediaUpdate/src/com/android/widget/VideoViewInterface.java b/packages/MediaUpdate/src/com/android/widget/VideoViewInterface.java
new file mode 100644
index 0000000..2a5eb94
--- /dev/null
+++ b/packages/MediaUpdate/src/com/android/widget/VideoViewInterface.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.annotation.NonNull;
+import android.media.MediaPlayer;
+import android.view.View;
+
+interface VideoViewInterface {
+ /**
+ * Assigns the view's surface to the given MediaPlayer instance.
+ *
+ * @param mp MediaPlayer
+ * @return true if the surface is successfully assigned, false if not. It will fail to assign
+ * if any of MediaPlayer or surface is unavailable.
+ */
+ boolean assignSurfaceToMediaPlayer(MediaPlayer mp);
+ void setSurfaceListener(SurfaceListener l);
+ int getViewType();
+ void setMediaPlayer(MediaPlayer mp);
+
+ /**
+ * Takes over oldView. It means that the MediaPlayer will start rendering on this view.
+ * The visibility of oldView will be set as {@link View.GONE}. If the view doesn't have a
+ * MediaPlayer instance or its surface is not available, the actual execution is deferred until
+ * a MediaPlayer instance is set by {@link #setMediaPlayer} or its surface becomes available.
+ * {@link SurfaceListener.onSurfaceTakeOverDone} will be called when the actual execution is
+ * done.
+ *
+ * @param oldView The view that MediaPlayer is currently rendering on.
+ */
+ void takeOver(@NonNull VideoViewInterface oldView);
+
+ /**
+ * Indicates if the view's surface is available.
+ *
+ * @return true if the surface is available.
+ */
+ boolean hasAvailableSurface();
+
+ /**
+ * An instance of VideoViewInterface calls these surface notification methods accordingly if
+ * a listener has been registered via {@link #setSurfaceListener(SurfaceListener)}.
+ */
+ interface SurfaceListener {
+ void onSurfaceCreated(View view, int width, int height);
+ void onSurfaceDestroyed(View view);
+ void onSurfaceChanged(View view, int width, int height);
+ void onSurfaceTakeOverDone(VideoViewInterface view);
+ }
+}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 3c975c3..4d5e094 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2597,6 +2597,7 @@
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
+ Mutex::Autolock _l(t->mLock);
for (size_t j = 0; j < t->mEffectChains.size(); j++) {
sp<EffectChain> ec = t->mEffectChains[j];
if (ec->sessionId() > AUDIO_SESSION_OUTPUT_MIX) {
@@ -2606,6 +2607,7 @@
}
for (size_t i = 0; i < mRecordThreads.size(); i++) {
sp<RecordThread> t = mRecordThreads.valueAt(i);
+ Mutex::Autolock _l(t->mLock);
for (size_t j = 0; j < t->mEffectChains.size(); j++) {
sp<EffectChain> ec = t->mEffectChains[j];
chains.push(ec);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3bb5803..d5def48 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -4301,10 +4301,16 @@
// because we're about to decrement the last sp<> on those tracks.
block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
} else {
- LOG_ALWAYS_FATAL("fast track %d should have been active; "
+ // ALOGW rather than LOG_ALWAYS_FATAL because it seems there are cases where an
+ // AudioTrack may start (which may not be with a start() but with a write()
+ // after underrun) and immediately paused or released. In that case the
+ // FastTrack state hasn't had time to update.
+ // TODO Remove the ALOGW when this theory is confirmed.
+ ALOGW("fast track %d should have been active; "
"mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
j, track->mState, state->mTrackMask, recentUnderruns,
track->sharedBuffer() != 0);
+ // Since the FastMixer state already has the track inactive, do nothing here.
}
tracksToRemove->add(track);
// Avoids a misleading display in dumpsys
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d520937..caf3c02 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -84,12 +84,7 @@
bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
// searches for an exact match
- status_t checkExactAudioProfile(uint32_t samplingRate,
- audio_channel_mask_t channelMask,
- audio_format_t format) const
- {
- return mProfiles.checkExactProfile(samplingRate, channelMask, format);
- }
+ virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
// searches for a compatible match, currently implemented for input
// parameters are input|output, returned value is the best match.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index d6ea698..094ff65 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -137,6 +137,26 @@
}
}
+status_t AudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
+{
+ status_t status = NO_ERROR;
+ auto config_mask = config->config_mask;
+ if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
+ config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+ status = checkGain(&config->gain, config->gain.index);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ if (config_mask != 0) {
+ // TODO should we check sample_rate / channel_mask / format separately?
+ status = mProfiles.checkExactProfile(config->sample_rate,
+ config->channel_mask,
+ config->format);
+ }
+ return status;
+}
+
void AudioPort::pickSamplingRate(uint32_t &pickedRate,const SampleRateVector &samplingRates) const
{
pickedRate = 0;
@@ -388,9 +408,7 @@
status = NO_INIT;
goto exit;
}
- status = audioport->checkExactAudioProfile(config->sample_rate,
- config->channel_mask,
- config->format);
+ status = audioport->checkExactAudioProfile(config);
if (status != NO_ERROR) {
goto exit;
}
@@ -404,10 +422,6 @@
mFormat = config->format;
}
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
- status = audioport->checkGain(&config->gain, config->gain.index);
- if (status != NO_ERROR) {
- goto exit;
- }
mGain = config->gain;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index e8980b5..69dd06b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -71,7 +71,13 @@
return false;
}
} else {
- if (checkExactAudioProfile(samplingRate, channelMask, format) != NO_ERROR) {
+ const struct audio_port_config config = {
+ .config_mask = AUDIO_PORT_CONFIG_ALL & ~AUDIO_PORT_CONFIG_GAIN,
+ .sample_rate = samplingRate,
+ .channel_mask = channelMask,
+ .format = format,
+ };
+ if (checkExactAudioProfile(&config) != NO_ERROR) {
return false;
}
}
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 7f42b1b..2954b3b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -389,7 +389,7 @@
nsecs_t ts_since = 0;
String16 helpOption("-help");
String16 onlyOption("-only");
- AString only;
+ std::string only;
int n = args.size();
for (int i = 0; i < n; i++) {
@@ -553,7 +553,7 @@
if (only != NULL && strcmp(only, (*it)->getKey()) != 0) {
ALOGV("Told to omit '%s'", (*it)->getKey());
}
- AString distilled = (*it)->dumpSummary(slot, only);
+ std::string distilled = (*it)->dumpSummary(slot, only);
result.append(distilled.c_str());
}
}
@@ -605,7 +605,7 @@
ALOGV("Omit '%s', it's not '%s'", (*it)->getKey().c_str(), only);
continue;
}
- AString entry = (*it)->toString(mDumpProto);
+ std::string entry = (*it)->toString(mDumpProto);
result.appendFormat("%5d: %s\n", slot, entry.c_str());
slot++;
}
@@ -746,7 +746,7 @@
}
}
-static AString allowedKeys[] =
+static std::string allowedKeys[] =
{
"codec",
"extractor"
@@ -760,7 +760,7 @@
// untrusted uids can only send us a limited set of keys
if (isTrusted == false) {
// restrict to a specific set of keys
- AString key = item->getKey();
+ std::string key = item->getKey();
size_t i;
for(i = 0; i < nAllowedKeys; i++) {
@@ -854,7 +854,7 @@
return setPkgInfo(item, uid, setName, setVersion);
}
} else {
- AString pkg;
+ std::string pkg;
std::string installer = "";
int64_t versionCode = 0;
@@ -896,7 +896,7 @@
}
// strip any leading "shared:" strings that came back
- if (pkg.startsWith("shared:")) {
+ if (pkg.compare(0, 7, "shared:") == 0) {
pkg.erase(0, 7);
}
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index fce7d08..1287835 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -136,8 +136,8 @@
// mapping uids to package names
struct UidToPkgMap {
uid_t uid;
- AString pkg;
- AString installer;
+ std::string pkg;
+ std::string installer;
int64_t versionCode;
nsecs_t expiration;
};
diff --git a/services/mediaanalytics/MetricsSummarizer.cpp b/services/mediaanalytics/MetricsSummarizer.cpp
index 93fe0ec..e7c26e3 100644
--- a/services/mediaanalytics/MetricsSummarizer.cpp
+++ b/services/mediaanalytics/MetricsSummarizer.cpp
@@ -19,6 +19,7 @@
#include <stdlib.h>
#include <stdint.h>
+#include <string>
#include <inttypes.h>
#include <utils/threads.h>
@@ -87,21 +88,21 @@
{
if (mKey == NULL)
return true;
- AString itemKey = item.getKey();
+ std::string itemKey = item.getKey();
if (strcmp(mKey, itemKey.c_str()) != 0) {
return false;
}
return true;
}
-AString MetricsSummarizer::dumpSummary(int &slot)
+std::string MetricsSummarizer::dumpSummary(int &slot)
{
return dumpSummary(slot, NULL);
}
-AString MetricsSummarizer::dumpSummary(int &slot, const char *only)
+std::string MetricsSummarizer::dumpSummary(int &slot, const char *only)
{
- AString value = "";
+ std::string value;
List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
if (it != mSummaries->end()) {
@@ -110,7 +111,7 @@
if (only != NULL && strcmp(only, (*it)->getKey().c_str()) != 0) {
continue;
}
- AString entry = (*it)->toString();
+ std::string entry = (*it)->toString();
snprintf(buf, sizeof(buf), "%5d: ", slot);
value.append(buf);
value.append(entry.c_str());
diff --git a/services/mediaanalytics/MetricsSummarizer.h b/services/mediaanalytics/MetricsSummarizer.h
index a9f0786..a16c7bc 100644
--- a/services/mediaanalytics/MetricsSummarizer.h
+++ b/services/mediaanalytics/MetricsSummarizer.h
@@ -18,10 +18,10 @@
#ifndef ANDROID_METRICSSUMMARIZER_H
#define ANDROID_METRICSSUMMARIZER_H
+#include <string>
#include <utils/threads.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
-#include <utils/String8.h>
#include <utils/List.h>
#include <media/IMediaAnalyticsService.h>
@@ -49,8 +49,8 @@
virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
// dump the summarized records (for dumpsys)
- AString dumpSummary(int &slot);
- AString dumpSummary(int &slot, const char *only);
+ std::string dumpSummary(int &slot);
+ std::string dumpSummary(int &slot, const char *only);
void setIgnorables(const char **);
const char **getIgnorables();
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index ca31691..8e5b260 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -2,7 +2,11 @@
# service executable
include $(CLEAR_VARS)
+# seccomp is not required for coverage build.
+ifneq ($(NATIVE_COVERAGE),true)
LOCAL_REQUIRED_MODULES_arm := mediacodec.policy
+LOCAL_REQUIRED_MODULES_x86 := mediacodec.policy
+endif
LOCAL_SRC_FILES := main_codecservice.cpp
LOCAL_SHARED_LIBRARIES := \
libmedia_omx \
@@ -28,7 +32,7 @@
include $(BUILD_EXECUTABLE)
# service seccomp policy
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64))
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
include $(CLEAR_VARS)
LOCAL_MODULE := mediacodec.policy
LOCAL_MODULE_CLASS := ETC
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
new file mode 100644
index 0000000..dc2c04f
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -0,0 +1,69 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+read: 1
+mprotect: 1
+prctl: 1
+openat: 1
+getuid32: 1
+writev: 1
+ioctl: 1
+close: 1
+mmap2: 1
+fstat64: 1
+madvise: 1
+fstatat64: 1
+futex: 1
+munmap: 1
+faccessat: 1
+_llseek: 1
+lseek: 1
+clone: 1
+sigaltstack: 1
+setpriority: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+ugetrlimit: 1
+readlinkat: 1
+_llseek: 1
+fstatfs64: 1
+pread64: 1
+mremap: 1
+dup: 1
+set_tid_address: 1
+write: 1
+nanosleep: 1
+
+# for attaching to debuggerd on process crash
+socketcall: 1
+sigaction: 1
+tgkill: 1
+rt_sigprocmask: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+getdents64: 1
+pipe2: 1
+ppoll: 1
+
+# Required by AddressSanitizer
+gettid: 1
+sched_yield: 1
+getpid: 1
+gettid: 1
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index a61994d..8db1761 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -72,13 +72,6 @@
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAUDIO_OK;
- const audio_attributes_t attributes = {
- .content_type = AUDIO_CONTENT_TYPE_MUSIC,
- .usage = AUDIO_USAGE_MEDIA,
- .source = AUDIO_SOURCE_VOICE_RECOGNITION,
- .flags = AUDIO_FLAG_LOW_LATENCY,
- .tags = ""
- };
audio_config_base_t config;
audio_port_handle_t deviceId;
@@ -87,6 +80,24 @@
copyFrom(request.getConstantConfiguration());
+ aaudio_direction_t direction = getDirection();
+
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(getContentType());
+ const audio_usage_t usage = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? AAudioConvert_usageToInternal(getUsage())
+ : AUDIO_USAGE_UNKNOWN;
+ const audio_source_t source = (direction == AAUDIO_DIRECTION_INPUT)
+ ? AAudioConvert_inputPresetToAudioSource(getInputPreset())
+ : AUDIO_SOURCE_DEFAULT;
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = usage,
+ .source = source,
+ .flags = AUDIO_FLAG_LOW_LATENCY,
+ .tags = ""
+ };
mMmapClient.clientUid = request.getUserId();
mMmapClient.clientPid = request.getProcessId();
mMmapClient.packageName.setTo(String16(""));
@@ -108,7 +119,6 @@
int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
- aaudio_direction_t direction = getDirection();
if (direction == AAUDIO_DIRECTION_OUTPUT) {
config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
? AUDIO_CHANNEL_OUT_STEREO