Merge "GenericSource: Allow multiple buffer reads for video" into nyc-mr1-dev
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index db69a00..25f7173 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -484,13 +484,19 @@
status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
+ // Configures temporal layering based on |msg|. |inConfigure| shall be true iff this is called
+ // during configure() call. on success the configured layering is set in |outputFormat|. If
+ // |outputFormat| is mOutputFormat, it is copied to trigger an output format changed event.
+ status_t configureTemporalLayers(
+ const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat);
+
status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
status_t setupH263EncoderParameters(const sp<AMessage> &msg);
status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
status_t setupHEVCEncoderParameters(const sp<AMessage> &msg);
- status_t setupVPXEncoderParameters(const sp<AMessage> &msg);
+ status_t setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 87c32a6..4b2b868 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -123,6 +123,9 @@
bool findBuffer(const char *name, sp<ABuffer> *buffer) const;
bool findMessage(const char *name, sp<AMessage> *obj) const;
+ // finds any numeric type cast to a float
+ bool findAsFloat(const char *name, float *value) const;
+
bool findRect(
const char *name,
int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const;
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
index 632acbc..aaa0db2 100644
--- a/media/common_time/Android.mk
+++ b/media/common_time/Android.mk
@@ -19,4 +19,6 @@
libutils \
liblog
+LOCAL_CFLAGS := -Wall -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
index 8d8556c..222b7ce 100644
--- a/media/common_time/cc_helper.cpp
+++ b/media/common_time/cc_helper.cpp
@@ -80,7 +80,7 @@
}
}
-void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) {
+void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID __unused) {
// do nothing; listener is only really used as a token so the server can
// find out when clients die.
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 32c4b8a..24ca582 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -22,6 +22,8 @@
#include "WebmWriter.h"
#include "StagefrightRecorder.h"
+#include <algorithm>
+
#include <android/hardware/ICamera.h>
#include <binder/IPCThreadState.h>
@@ -57,6 +59,11 @@
namespace android {
+static const float kTypicalDisplayRefreshingRate = 60.f;
+// display refresh rate drops on battery saver
+static const float kMinTypicalDisplayRefreshingRate = kTypicalDisplayRefreshingRate / 2;
+static const int kMaxNumVideoTemporalLayers = 8;
+
// To collect the encoder usage for the battery app
static void addBatteryData(uint32_t params) {
sp<IBinder> binder =
@@ -1565,9 +1572,44 @@
format->setInt32("level", mVideoEncoderLevel);
}
+ uint32_t tsLayers = 1;
+ bool preferBFrames = true; // we like B-frames as it produces better quality per bitrate
format->setInt32("priority", 0 /* realtime */);
+ float maxPlaybackFps = mFrameRate; // assume video is only played back at normal speed
+
if (mCaptureFpsEnable) {
format->setFloat("operating-rate", mCaptureFps);
+
+ // enable layering for all time lapse and high frame rate recordings
+ if (mFrameRate / mCaptureFps >= 1.9) { // time lapse
+ preferBFrames = false;
+ tsLayers = 2; // use at least two layers as resulting video will likely be sped up
+ } else if (mCaptureFps > maxPlaybackFps) { // slow-mo
+ maxPlaybackFps = mCaptureFps; // assume video will be played back at full capture speed
+ preferBFrames = false;
+ }
+ }
+
+ for (uint32_t tryLayers = 1; tryLayers <= kMaxNumVideoTemporalLayers; ++tryLayers) {
+ if (tryLayers > tsLayers) {
+ tsLayers = tryLayers;
+ }
+ // keep going until the base layer fps falls below the typical display refresh rate
+ float baseLayerFps = maxPlaybackFps / (1 << (tryLayers - 1));
+ if (baseLayerFps < kMinTypicalDisplayRefreshingRate / 0.9) {
+ break;
+ }
+ }
+
+ if (tsLayers > 1) {
+ uint32_t bLayers = std::min(2u, tsLayers - 1); // use up-to 2 B-layers
+ uint32_t pLayers = tsLayers - bLayers;
+ format->setString(
+ "ts-schema", AStringPrintf("android.generic.%u+%u", pLayers, bLayers));
+
+ // TODO: some encoders do not support B-frames with temporal layering, and we have a
+ // different preference based on use-case. We could move this into camera profiles.
+ format->setInt32("android._prefer-b-frames", preferBFrames);
}
if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index fa19410..cf38efc 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -19,6 +19,8 @@
#include <utils/Log.h>
#include <inttypes.h>
+#include <algorithm>
+
#include "NuPlayerCCDecoder.h"
#include "NuPlayerDecoder.h"
#include "NuPlayerRenderer.h"
@@ -41,7 +43,7 @@
namespace android {
-static float kDisplayRefreshingRate = 60.f;
+static float kDisplayRefreshingRate = 60.f; // TODO: get this from the display
// The default total video frame rate of a stream when that info is not available from
// the source.
@@ -77,7 +79,7 @@
mTimeChangePending(false),
mFrameRateTotal(kDefaultVideoFrameRateTotal),
mPlaybackSpeed(1.0f),
- mNumVideoTemporalLayerTotal(1),
+ mNumVideoTemporalLayerTotal(1), // decode all layers
mNumVideoTemporalLayerAllowed(1),
mCurrentMaxVideoTemporalLayerId(0),
mResumePending(false),
@@ -351,14 +353,14 @@
int32_t numVideoTemporalLayerTotal;
if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
- && numVideoTemporalLayerTotal > 0
+ && numVideoTemporalLayerTotal >= 0
&& numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
&& mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
needAdjustLayers = true;
- mNumVideoTemporalLayerTotal = numVideoTemporalLayerTotal;
+ mNumVideoTemporalLayerTotal = std::max(numVideoTemporalLayerTotal, 1);
}
- if (needAdjustLayers) {
+ if (needAdjustLayers && mNumVideoTemporalLayerTotal > 1) {
// TODO: For now, layer fps is calculated for some specific architectures.
// But it really should be extracted from the stream.
mVideoTemporalLayerAggregateFps[0] =
@@ -378,25 +380,21 @@
}
if (needAdjustLayers) {
- int32_t layerId;
- for (layerId = 0; layerId < mNumVideoTemporalLayerTotal; ++layerId) {
- if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
- > kDisplayRefreshingRate) {
- --layerId;
- break;
+ float decodeFrameRate = mFrameRateTotal;
+ // enable temporal layering optimization only if we know the layering depth
+ if (mNumVideoTemporalLayerTotal > 1) {
+ int32_t layerId;
+ for (layerId = 0; layerId < mNumVideoTemporalLayerTotal - 1; ++layerId) {
+ if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
+ >= kDisplayRefreshingRate * 0.9) {
+ break;
+ }
}
+ mNumVideoTemporalLayerAllowed = layerId + 1;
+ decodeFrameRate = mVideoTemporalLayerAggregateFps[layerId];
}
- if (layerId < 0) {
- layerId = 0;
- } else if (layerId >= mNumVideoTemporalLayerTotal) {
- layerId = mNumVideoTemporalLayerTotal - 1;
- }
- mNumVideoTemporalLayerAllowed = layerId + 1;
- if (mCurrentMaxVideoTemporalLayerId > layerId) {
- mCurrentMaxVideoTemporalLayerId = layerId;
- }
- ALOGV("onSetParameters: allowed layers=%d, current max layerId=%d",
- mNumVideoTemporalLayerAllowed, mCurrentMaxVideoTemporalLayerId);
+ ALOGV("onSetParameters: allowed layers=%d, decodeFps=%g",
+ mNumVideoTemporalLayerAllowed, decodeFrameRate);
if (mCodec == NULL) {
ALOGW("onSetParameters called before codec is created.");
@@ -404,8 +402,7 @@
}
sp<AMessage> codecParams = new AMessage();
- codecParams->setFloat("operating-rate",
- mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed);
+ codecParams->setFloat("operating-rate", decodeFrameRate * mPlaybackSpeed);
mCodec->setParameters(codecParams);
}
}
@@ -818,11 +815,12 @@
dropAccessUnit = false;
if (!mIsAudio && !mIsSecure) {
int32_t layerId = 0;
+ bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
if (mRenderer->getVideoLateByUs() > 100000ll
&& mIsVideoAVC
&& !IsAVCReferenceFrame(accessUnit)) {
dropAccessUnit = true;
- } else if (accessUnit->meta()->findInt32("temporal-layer-id", &layerId)) {
+ } else if (haveLayerId && mNumVideoTemporalLayerTotal > 1) {
// Add only one layer each time.
if (layerId > mCurrentMaxVideoTemporalLayerId + 1
|| layerId >= mNumVideoTemporalLayerAllowed) {
@@ -832,9 +830,14 @@
mCurrentMaxVideoTemporalLayerId);
} else if (layerId > mCurrentMaxVideoTemporalLayerId) {
mCurrentMaxVideoTemporalLayerId = layerId;
+ } else if (layerId == 0 && mNumVideoTemporalLayerTotal > 1 && IsIDR(accessUnit)) {
+ mCurrentMaxVideoTemporalLayerId = mNumVideoTemporalLayerTotal - 1;
}
}
if (dropAccessUnit) {
+ if (layerId <= mCurrentMaxVideoTemporalLayerId && layerId > 0) {
+ mCurrentMaxVideoTemporalLayerId = layerId - 1;
+ }
++mNumInputFramesDropped;
}
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 42281f6..3dea270 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2462,6 +2462,109 @@
return OK;
}
+status_t ACodec::configureTemporalLayers(
+ const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat) {
+ if (!mIsVideo || !mIsEncoder) {
+ return INVALID_OPERATION;
+ }
+
+ AString tsSchema;
+ if (!msg->findString("ts-schema", &tsSchema)) {
+ return OK;
+ }
+
+ unsigned int numLayers = 0;
+ unsigned int numBLayers = 0;
+ int tags;
+ char dummy;
+ OMX_VIDEO_ANDROID_TEMPORALLAYERINGPATTERNTYPE pattern =
+ OMX_VIDEO_AndroidTemporalLayeringPatternNone;
+ if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+ && numLayers > 0) {
+ pattern = OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC;
+ } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+ &numLayers, &dummy, &numBLayers, &dummy))
+ && (tags == 1 || (tags == 3 && dummy == '+'))
+ && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
+ numLayers += numBLayers;
+ pattern = OMX_VIDEO_AndroidTemporalLayeringPatternAndroid;
+ } else {
+ ALOGI("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
+ return BAD_VALUE;
+ }
+
+ OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layerParams;
+ InitOMXParams(&layerParams);
+ layerParams.nPortIndex = kPortIndexOutput;
+
+ status_t err = mOMX->getParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ &layerParams, sizeof(layerParams));
+
+ if (err != OK) {
+ return err;
+ } else if (!(layerParams.eSupportedPatterns & pattern)) {
+ return BAD_VALUE;
+ }
+
+ numLayers = min(numLayers, layerParams.nLayerCountMax);
+ numBLayers = min(numBLayers, layerParams.nBLayerCountMax);
+
+ if (!inConfigure) {
+ OMX_VIDEO_CONFIG_ANDROID_TEMPORALLAYERINGTYPE layerConfig;
+ InitOMXParams(&layerConfig);
+ layerConfig.nPortIndex = kPortIndexOutput;
+ layerConfig.ePattern = pattern;
+ layerConfig.nPLayerCountActual = numLayers - numBLayers;
+ layerConfig.nBLayerCountActual = numBLayers;
+ layerConfig.bBitrateRatiosSpecified = OMX_FALSE;
+
+ err = mOMX->setConfig(
+ mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
+ &layerConfig, sizeof(layerConfig));
+ } else {
+ layerParams.ePattern = pattern;
+ layerParams.nPLayerCountActual = numLayers - numBLayers;
+ layerParams.nBLayerCountActual = numBLayers;
+ layerParams.bBitrateRatiosSpecified = OMX_FALSE;
+
+ err = mOMX->setParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ &layerParams, sizeof(layerParams));
+ }
+
+ AString configSchema;
+ if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternAndroid) {
+ configSchema = AStringPrintf("android.generic.%u+%u", numLayers - numBLayers, numBLayers);
+ } else if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC) {
+ configSchema = AStringPrintf("webrtc.vp8.%u", numLayers);
+ }
+
+ if (err != OK) {
+ ALOGW("Failed to set temporal layers to %s (requested %s)",
+ configSchema.c_str(), tsSchema.c_str());
+ return err;
+ }
+
+ err = mOMX->getParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ &layerParams, sizeof(layerParams));
+
+ if (err == OK) {
+ ALOGD("Temporal layers requested:%s configured:%s got:%s(%u: P=%u, B=%u)",
+ tsSchema.c_str(), configSchema.c_str(),
+ asString(layerParams.ePattern), layerParams.ePattern,
+ layerParams.nPLayerCountActual, layerParams.nBLayerCountActual);
+
+ if (outputFormat.get() == mOutputFormat.get()) {
+ mOutputFormat = mOutputFormat->dup(); // trigger an output format change event
+ }
+ // assume we got what we configured
+ outputFormat->setString("ts-schema", configSchema);
+ }
+ return err;
+}
+
status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
@@ -3769,13 +3872,17 @@
case OMX_VIDEO_CodingVP8:
case OMX_VIDEO_CodingVP9:
- err = setupVPXEncoderParameters(msg);
+ err = setupVPXEncoderParameters(msg, outputFormat);
break;
default:
break;
}
+ if (err != OK) {
+ return err;
+ }
+
// Set up color aspects on input, but propagate them to the output format, as they will
// not be read back from encoder.
err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
@@ -3794,6 +3901,29 @@
err = OK;
}
+ if (err != OK) {
+ return err;
+ }
+
+ switch (compressionFormat) {
+ case OMX_VIDEO_CodingAVC:
+ case OMX_VIDEO_CodingHEVC:
+ err = configureTemporalLayers(msg, true /* inConfigure */, outputFormat);
+ if (err != OK) {
+ err = OK; // ignore failure
+ }
+ break;
+
+ case OMX_VIDEO_CodingVP8:
+ case OMX_VIDEO_CodingVP9:
+ // TODO: do we need to support android.generic layering? webrtc layering is
+ // already set up in setupVPXEncoderParameters.
+ break;
+
+ default:
+ break;
+ }
+
if (err == OK) {
ALOGI("setupVideoEncoder succeeded");
}
@@ -3838,14 +3968,31 @@
return err;
}
-static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) {
- if (iFramesInterval < 0) {
- return 0xFFFFFFFF;
- } else if (iFramesInterval == 0) {
+static OMX_U32 setPFramesSpacing(
+ float iFramesInterval /* seconds */, int32_t frameRate, uint32_t BFramesSpacing = 0) {
+ // BFramesSpacing is the number of B frames between I/P frames
+ // PFramesSpacing (the value to be returned) is the number of P frames between I frames
+ //
+ // keyFrameInterval = ((PFramesSpacing + 1) * BFramesSpacing) + PFramesSpacing + 1
+ // ^^^ ^^^ ^^^
+ // number of B frames number of P I frame
+ //
+ // = (PFramesSpacing + 1) * (BFramesSpacing + 1)
+ //
+ // E.g.
+ // I P I : I-interval: 8, nPFrames 1, nBFrames 3
+ // BBB BBB
+
+ if (iFramesInterval < 0) { // just 1 key frame
+ return 0xFFFFFFFE; // don't use maxint as key-frame-interval calculation will add 1
+ } else if (iFramesInterval == 0) { // just key frames
return 0;
}
- OMX_U32 ret = frameRate * iFramesInterval;
- return ret;
+
+ // round down as key-frame-interval is an upper limit
+ uint32_t keyFrameInterval = uint32_t(frameRate * iFramesInterval);
+ OMX_U32 ret = keyFrameInterval / (BFramesSpacing + 1);
+ return ret > 0 ? ret - 1 : 0;
}
static OMX_VIDEO_CONTROLRATETYPE getBitrateMode(const sp<AMessage> &msg) {
@@ -3858,9 +4005,10 @@
}
status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) {
- int32_t bitrate, iFrameInterval;
+ int32_t bitrate;
+ float iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
- || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+ || !msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
return INVALID_OPERATION;
}
@@ -3893,11 +4041,11 @@
mpeg4type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
- mpeg4type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+ mpeg4type.nBFrames = 0;
+ mpeg4type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, mpeg4type.nBFrames);
if (mpeg4type.nPFrames == 0) {
mpeg4type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
- mpeg4type.nBFrames = 0;
mpeg4type.nIDCVLCThreshold = 0;
mpeg4type.bACPred = OMX_TRUE;
mpeg4type.nMaxPacketSize = 256;
@@ -3939,9 +4087,10 @@
}
status_t ACodec::setupH263EncoderParameters(const sp<AMessage> &msg) {
- int32_t bitrate, iFrameInterval;
+ int32_t bitrate;
+ float iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
- || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+ || !msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
return INVALID_OPERATION;
}
@@ -3970,11 +4119,11 @@
h263type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
- h263type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+ h263type.nBFrames = 0;
+ h263type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h263type.nBFrames);
if (h263type.nPFrames == 0) {
h263type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
- h263type.nBFrames = 0;
int32_t profile;
if (msg->findInt32("profile", &profile)) {
@@ -4067,9 +4216,10 @@
}
status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
- int32_t bitrate, iFrameInterval;
+ int32_t bitrate;
+ float iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
- || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+ || !msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
return INVALID_OPERATION;
}
@@ -4125,8 +4275,15 @@
h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profile);
h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level);
} else {
- // Use baseline profile for AVC recording if profile is not specified.
+ // Use largest supported profile for AVC recording if profile is not specified.
h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
+ for (OMX_VIDEO_AVCPROFILETYPE profile : {
+ OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCProfileMain }) {
+ if (verifySupportForProfileAndLevel(profile, 0) == OK) {
+ h264type.eProfile = profile;
+ break;
+ }
+ }
}
ALOGI("setupAVCEncoderParameters with [profile: %s] [level: %s]",
@@ -4137,7 +4294,7 @@
h264type.bUseHadamard = OMX_TRUE;
h264type.nRefFrames = 1;
h264type.nBFrames = 0;
- h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+ h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
if (h264type.nPFrames == 0) {
h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
}
@@ -4155,7 +4312,7 @@
h264type.bUseHadamard = OMX_TRUE;
h264type.nRefFrames = 2;
h264type.nBFrames = 1;
- h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+ h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
h264type.nRefIdx10ActiveMinus1 = 0;
@@ -4187,13 +4344,42 @@
return err;
}
+ // TRICKY: if we are enabling temporal layering as well, some codecs may not support layering
+ // when B-frames are enabled. Detect this now so we can disable B frames if temporal layering
+ // is preferred.
+ AString tsSchema;
+ int32_t preferBFrames = (int32_t)false;
+ if (msg->findString("ts-schema", &tsSchema)
+ && (!msg->findInt32("android._prefer-b-frames", &preferBFrames) || !preferBFrames)) {
+ OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layering;
+ InitOMXParams(&layering);
+ layering.nPortIndex = kPortIndexOutput;
+ if (mOMX->getParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ &layering, sizeof(layering)) == OK
+ && layering.eSupportedPatterns
+ && layering.nBLayerCountMax == 0) {
+ h264type.nBFrames = 0;
+ h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
+ h264type.nAllowedPictureTypes &= ~OMX_VIDEO_PictureTypeB;
+ ALOGI("disabling B-frames");
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+
+ if (err != OK) {
+ return err;
+ }
+ }
+ }
+
return configureBitrate(bitrate, bitrateMode);
}
status_t ACodec::setupHEVCEncoderParameters(const sp<AMessage> &msg) {
- int32_t bitrate, iFrameInterval;
+ int32_t bitrate;
+ float iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
- || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+ || !msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
return INVALID_OPERATION;
}
@@ -4235,7 +4421,7 @@
hevcType.eLevel = static_cast<OMX_VIDEO_HEVCLEVELTYPE>(level);
}
// TODO: finer control?
- hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate);
+ hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate) + 1;
err = mOMX->setParameter(
mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
@@ -4246,9 +4432,9 @@
return configureBitrate(bitrate, bitrateMode);
}
-status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
+status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
int32_t bitrate;
- int32_t iFrameInterval = 0;
+ float iFrameInterval = 0;
size_t tsLayers = 0;
OMX_VIDEO_ANDROID_VPXTEMPORALLAYERPATTERNTYPE pattern =
OMX_VIDEO_VPXTemporalLayerPatternNone;
@@ -4262,7 +4448,7 @@
if (!msg->findInt32("bitrate", &bitrate)) {
return INVALID_OPERATION;
}
- msg->findInt32("i-frame-interval", &iFrameInterval);
+ msg->findAsFloat("i-frame-interval", &iFrameInterval);
OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
@@ -4276,19 +4462,31 @@
}
AString tsSchema;
+ OMX_VIDEO_ANDROID_TEMPORALLAYERINGPATTERNTYPE tsType =
+ OMX_VIDEO_AndroidTemporalLayeringPatternNone;
+
if (msg->findString("ts-schema", &tsSchema)) {
- if (tsSchema == "webrtc.vp8.1-layer") {
+ unsigned int numLayers = 0;
+ unsigned int numBLayers = 0;
+ int tags;
+ char dummy;
+ if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+ && numLayers > 0) {
pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
- tsLayers = 1;
- } else if (tsSchema == "webrtc.vp8.2-layer") {
+ tsType = OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC;
+ tsLayers = numLayers;
+ } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+ &numLayers, &dummy, &numBLayers, &dummy))
+ && (tags == 1 || (tags == 3 && dummy == '+'))
+ && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
- tsLayers = 2;
- } else if (tsSchema == "webrtc.vp8.3-layer") {
- pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
- tsLayers = 3;
+ // VPX does not have a concept of B-frames, so just count all layers
+ tsType = OMX_VIDEO_AndroidTemporalLayeringPatternAndroid;
+ tsLayers = numLayers + numBLayers;
} else {
- ALOGW("Unsupported ts-schema [%s]", tsSchema.c_str());
+ ALOGW("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
}
+ tsLayers = min(tsLayers, (size_t)OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS);
}
OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
@@ -4300,7 +4498,7 @@
if (err == OK) {
if (iFrameInterval > 0) {
- vp8type.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate);
+ vp8type.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate) + 1;
}
vp8type.eTemporalPattern = pattern;
vp8type.nTemporalLayerCount = tsLayers;
@@ -4320,6 +4518,12 @@
&vp8type, sizeof(vp8type));
if (err != OK) {
ALOGW("Extended VP8 parameters set failed: %d", err);
+ } else if (tsType == OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC) {
+ // advertise even single layer WebRTC layering, as it is defined
+ outputFormat->setString("ts-schema", AStringPrintf("webrtc.vp8.%u-layer", tsLayers));
+ } else if (tsLayers > 0) {
+ // tsType == OMX_VIDEO_AndroidTemporalLayeringPatternAndroid
+ outputFormat->setString("ts-schema", AStringPrintf("android.generic.%u", tsLayers));
}
}
@@ -4850,32 +5054,21 @@
sizeof(vp8type));
if (err == OK) {
- AString tsSchema = "none";
- if (vp8type.eTemporalPattern
- == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
- switch (vp8type.nTemporalLayerCount) {
- case 1:
- {
- tsSchema = "webrtc.vp8.1-layer";
- break;
- }
- case 2:
- {
- tsSchema = "webrtc.vp8.2-layer";
- break;
- }
- case 3:
- {
- tsSchema = "webrtc.vp8.3-layer";
- break;
- }
- default:
- {
- break;
- }
+ if (vp8type.eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC
+ && vp8type.nTemporalLayerCount > 0
+ && vp8type.nTemporalLayerCount
+ <= OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+ // advertise as android.generic if we configured for android.generic
+ AString origSchema;
+ if (notify->findString("ts-schema", &origSchema)
+ && origSchema.startsWith("android.generic")) {
+ notify->setString("ts-schema", AStringPrintf(
+ "android.generic.%u", vp8type.nTemporalLayerCount));
+ } else {
+ notify->setString("ts-schema", AStringPrintf(
+ "webrtc.vp8.%u-layer", vp8type.nTemporalLayerCount));
}
}
- notify->setString("ts-schema", tsSchema);
}
// Fall through to set up mime.
}
@@ -7342,7 +7535,12 @@
}
}
- return OK;
+ status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
+ if (err != OK) {
+ err = OK; // ignore failure
+ }
+
+ return err;
}
void ACodec::onSignalEndOfInputStream() {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index b5eb50b..ec534ef 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -17,6 +17,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG4Writer"
+#include <algorithm>
+
#include <arpa/inet.h>
#include <fcntl.h>
#include <inttypes.h>
@@ -30,6 +32,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/MediaBuffer.h>
@@ -256,6 +259,7 @@
int32_t mTrackId;
int64_t mTrackDurationUs;
int64_t mMaxChunkDurationUs;
+ int64_t mLastDecodingTimeUs;
int64_t mEstimatedTrackSizeBytes;
int64_t mMdatSizeBytes;
@@ -1179,7 +1183,7 @@
while (getNextNALUnit(&data, &searchSize, &nextNalStart,
&nextNalSize, true) == OK) {
- size_t currentNalSize = nextNalStart - currentNalStart - 3 /* strip start-code */;
+ size_t currentNalSize = nextNalStart - currentNalStart - 4 /* strip start-code */;
MediaBuffer *nalBuf = new MediaBuffer((void *)currentNalStart, currentNalSize);
addLengthPrefixedSample_l(nalBuf);
nalBuf->release();
@@ -1542,6 +1546,14 @@
mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
+ // store temporal layer count
+ if (!mIsAudio) {
+ int32_t count;
+ if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
+ mOwner->setTemporalLayerCount(count);
+ }
+ }
+
setTimeScale();
}
@@ -1920,6 +1932,7 @@
mEstimatedTrackSizeBytes = 0;
mMdatSizeBytes = 0;
mMaxChunkDurationUs = 0;
+ mLastDecodingTimeUs = -1;
pthread_create(&mThread, &attr, ThreadWrapper, this);
pthread_attr_destroy(&attr);
@@ -2504,6 +2517,17 @@
int64_t decodingTimeUs;
CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
+ } else {
+ // increase decoding time by at least 1 tick
+ decodingTimeUs = std::max(
+ mLastDecodingTimeUs + divUp(1000000, mTimeScale), decodingTimeUs);
+ }
+
+ mLastDecodingTimeUs = decodingTimeUs;
cttsOffsetTimeUs =
timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 8a0009c..1c76ad7 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1317,6 +1317,20 @@
}
convertMessageToMetaDataColorAspects(msg, meta);
+
+ AString tsSchema;
+ if (msg->findString("ts-schema", &tsSchema)) {
+ unsigned int numLayers = 0;
+ unsigned int numBLayers = 0;
+ char dummy;
+ int tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+ &numLayers, &dummy, &numBLayers, &dummy);
+ if ((tags == 1 || (tags == 3 && dummy == '+'))
+ && numLayers > 0 && numLayers < UINT32_MAX - numBLayers
+ && numLayers + numBLayers <= INT32_MAX) {
+ meta->setInt32(kKeyTemporalLayerCount, numLayers + numBLayers);
+ }
+ }
} else if (mime.startsWith("audio/")) {
int32_t numChannels;
if (msg->findInt32("channel-count", &numChannels)) {
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 4cde54e..0822c34 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -120,6 +120,17 @@
mIsFirst = true;
}
+void *SoftMP3::memsetSafe(OMX_BUFFERHEADERTYPE *outHeader, int c, size_t len) {
+ if (len > outHeader->nAllocLen) {
+ ALOGE("memset buffer too small: got %u, expected %zu", outHeader->nAllocLen, len);
+ android_errorWriteLog(0x534e4554, "29422022");
+ notify(OMX_EventError, OMX_ErrorUndefined, OUTPUT_BUFFER_TOO_SMALL, NULL);
+ mSignalledError = true;
+ return NULL;
+ }
+ return memset(outHeader->pBuffer, c, len);
+}
+
OMX_ERRORTYPE SoftMP3::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
@@ -300,7 +311,10 @@
outHeader->nOffset = 0;
outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
- memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
+ if (!memsetSafe(outHeader, 0, outHeader->nFilledLen)) {
+ return;
+ }
+
}
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
mSignalledOutputEos = true;
@@ -312,9 +326,9 @@
// if mIsFirst is true as we may not have a valid
// mConfig->samplingRate and mConfig->num_channels?
ALOGV_IF(mIsFirst, "insufficient data for first frame, sending silence");
- memset(outHeader->pBuffer,
- 0,
- mConfig->outputFrameSize * sizeof(int16_t));
+ if (!memsetSafe(outHeader, 0, mConfig->outputFrameSize * sizeof(int16_t))) {
+ return;
+ }
if (inHeader) {
mConfig->inputBufferUsedLength = inHeader->nFilledLen;
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index f9e7b53..3bfa6c7 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -72,6 +72,7 @@
void initPorts();
void initDecoder();
+ void *memsetSafe(OMX_BUFFERHEADERTYPE *outHeader, int c, size_t len);
DISALLOW_EVIL_CONSTRUCTORS(SoftMP3);
};
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 8022467..3490008 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -156,7 +156,7 @@
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
- if (outHeader->nAllocLen >= outHeader->nFilledLen) {
+ if (outputBufferSafe(outHeader)) {
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
@@ -166,8 +166,6 @@
size_t srcVStride = mImg->stride[VPX_PLANE_V];
copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
} else {
- ALOGE("b/27597103, buffer too small");
- android_errorWriteLog(0x534e4554, "27597103");
outHeader->nFilledLen = 0;
}
@@ -197,6 +195,25 @@
return true;
}
+bool SoftVPX::outputBufferSafe(OMX_BUFFERHEADERTYPE *outHeader) {
+ uint32_t width = outputBufferWidth();
+ uint32_t height = outputBufferHeight();
+ uint64_t nFilledLen = width;
+ nFilledLen *= height;
+ if (nFilledLen > UINT32_MAX / 3) {
+ ALOGE("b/29421675, nFilledLen overflow %llu w %u h %u",
+ (unsigned long long)nFilledLen, width, height);
+ android_errorWriteLog(0x534e4554, "29421675");
+ return false;
+ } else if (outHeader->nAllocLen < outHeader->nFilledLen) {
+ ALOGE("b/27597103, buffer too small");
+ android_errorWriteLog(0x534e4554, "27597103");
+ return false;
+ }
+
+ return true;
+}
+
void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
if (mOutputPortSettingsChange != NONE || mEOSStatus == OUTPUT_FRAMES_FLUSHED) {
return;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 8ccbae2..84cf79c 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -66,6 +66,7 @@
status_t initDecoder();
status_t destroyDecoder();
bool outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset);
+ bool outputBufferSafe(OMX_BUFFERHEADERTYPE *outHeader);
DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
};
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 37fb33f..a4583d6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -212,6 +212,33 @@
return NULL;
}
+bool AMessage::findAsFloat(const char *name, float *value) const {
+ size_t i = findItemIndex(name, strlen(name));
+ if (i < mNumItems) {
+ const Item *item = &mItems[i];
+ switch (item->mType) {
+ case kTypeFloat:
+ *value = item->u.floatValue;
+ return true;
+ case kTypeDouble:
+ *value = (float)item->u.doubleValue;
+ return true;
+ case kTypeInt64:
+ *value = (float)item->u.int64Value;
+ return true;
+ case kTypeInt32:
+ *value = (float)item->u.int32Value;
+ return true;
+ case kTypeSize:
+ *value = (float)item->u.sizeValue;
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
bool AMessage::contains(const char *name) const {
size_t i = findItemIndex(name, strlen(name));
return i < mNumItems;
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 98498e9..47573c3 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,6 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ASessionDescription"
#include <utils/Log.h>
+#include <cutils/log.h>
#include "ASessionDescription.h"
@@ -211,12 +212,12 @@
*PT = x;
- char key[20];
- sprintf(key, "a=rtpmap:%lu", x);
+ char key[32];
+ snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
CHECK(findAttribute(index, key, desc));
- sprintf(key, "a=fmtp:%lu", x);
+ snprintf(key, sizeof(key), "a=fmtp:%lu", x);
if (!findAttribute(index, key, params)) {
params->clear();
}
@@ -228,8 +229,11 @@
*width = 0;
*height = 0;
- char key[20];
- sprintf(key, "a=framesize:%lu", PT);
+ char key[33];
+ snprintf(key, sizeof(key), "a=framesize:%lu", PT);
+ if (PT > 9999999) {
+ android_errorWriteLog(0x534e4554, "25747670");
+ }
AString value;
if (!findAttribute(index, key, &value)) {
return false;
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 7f6b66b..74729e4 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -40,7 +40,7 @@
LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
LOCAL_SHARED_LIBRARIES := \
libbinder \
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 4f826e5..8b831f0 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -14,6 +14,8 @@
liblog \
libbinder
+LOCAL_CFLAGS := -Wall -Werror
+
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1f1e36b..2547746 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6014,14 +6014,6 @@
for (;;) {
Vector< sp<EffectChain> > effectChains;
- // sleep with mutex unlocked
- if (sleepUs > 0) {
- ATRACE_BEGIN("sleep");
- usleep(sleepUs);
- ATRACE_END();
- sleepUs = 0;
- }
-
// activeTracks accumulates a copy of a subset of mActiveTracks
Vector< sp<RecordTrack> > activeTracks;
@@ -6042,6 +6034,15 @@
break;
}
+ // sleep with mutex unlocked
+ if (sleepUs > 0) {
+ ATRACE_BEGIN("sleep");
+ mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)sleepUs));
+ ATRACE_END();
+ sleepUs = 0;
+ continue;
+ }
+
// if no active track(s), then standby and release wakelock
size_t size = mActiveTracks.size();
if (size == 0) {
@@ -6065,6 +6066,7 @@
}
bool doBroadcast = false;
+ bool allStopped = true;
for (size_t i = 0; i < size; ) {
activeTrack = mActiveTracks[i];
@@ -6093,15 +6095,18 @@
case TrackBase::STARTING_1:
sleepUs = 10000;
i++;
+ allStopped = false;
continue;
case TrackBase::STARTING_2:
doBroadcast = true;
mStandby = false;
activeTrack->mState = TrackBase::ACTIVE;
+ allStopped = false;
break;
case TrackBase::ACTIVE:
+ allStopped = false;
break;
case TrackBase::IDLE:
@@ -6121,6 +6126,10 @@
fastTrack = activeTrack;
}
}
+
+ if (allStopped) {
+ standbyIfNotAlreadyInStandby();
+ }
if (doBroadcast) {
mStartStopCond.broadcast();
}
@@ -6703,6 +6712,8 @@
}
// note that threadLoop may still be processing the track at this point [without lock]
recordTrack->mState = TrackBase::PAUSING;
+ // signal thread to stop
+ mWaitWorkCV.broadcast();
// do not wait for mStartStopCond if exiting
if (exitPending()) {
return true;
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 8b45adc..c8e5148 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -50,6 +50,7 @@
LOCAL_MODULE:= libaudiopolicyservice
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
include $(BUILD_SHARED_LIBRARY)
@@ -102,6 +103,8 @@
LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_CFLAGS += -Wall -Werror
+
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE:= libaudiopolicymanagerdefault
@@ -125,6 +128,8 @@
$(TOPDIR)frameworks/av/services/audiopolicy/common/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+LOCAL_CFLAGS := -Wall -Werror
+
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE:= libaudiopolicymanager
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 3b4ae6b..d7da0ad 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -60,6 +60,8 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+LOCAL_CFLAGS := -Wall -Werror
+
LOCAL_MODULE := libaudiopolicycomponents
include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index b828f81..1612714 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -131,6 +131,7 @@
typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+inline
static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
const char *del = "|")
{
@@ -139,6 +140,7 @@
return samplingRateCollection;
}
+inline
static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
{
FormatTraits::Collection formatCollection;
@@ -146,6 +148,7 @@
return formatCollection;
}
+inline
static audio_format_t formatFromString(const std::string &literalFormat)
{
audio_format_t format;
@@ -156,6 +159,7 @@
return format;
}
+inline
static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
{
audio_channel_mask_t channels;
@@ -166,6 +170,7 @@
return channels;
}
+inline
static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
const char *del = "|")
{
@@ -176,6 +181,7 @@
return channelMaskCollection;
}
+inline
static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
const char *del = "|")
{
@@ -185,6 +191,7 @@
return inputChannelMaskCollection;
}
+inline
static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
const char *del = "|")
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 35f078e..50453ad 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -104,7 +104,6 @@
ssize_t DeviceVector::remove(const sp<DeviceDescriptor>& item)
{
- size_t i;
ssize_t ret = indexOf(item);
if (ret < 0) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 0e64716..ff38df4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -775,7 +775,6 @@
const audio_offload_info_t *offloadInfo)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- uint32_t latency = 0;
status_t status;
#ifdef AUDIO_POLICY_TEST
@@ -1192,7 +1191,7 @@
}
}
}
- uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
+ (void) /*uint32_t muteWaitMs*/ setOutputDevice(outputDesc, device, force, 0, NULL, address);
// handle special case for sonification while in call
if (isInCall()) {
@@ -1288,7 +1287,6 @@
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
for (size_t i = 0; i < mOutputs.size(); i++) {
- audio_io_handle_t curOutput = mOutputs.keyAt(i);
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc &&
desc->isActive() &&
@@ -1795,7 +1793,7 @@
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
- status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
@@ -2708,7 +2706,6 @@
true,
NULL);
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
- audio_patch_handle_t afPatchHandle = patchDesc->mAfPatchHandle;
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
status, patchDesc->mAfPatchHandle);
@@ -3187,6 +3184,7 @@
}
mEngine->setObserver(this);
status_t status = mEngine->initCheck();
+ (void) status;
ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
// mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
@@ -4066,7 +4064,7 @@
ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
- status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
mAudioPatches.removeItemsAt(index);
mpClientInterface->onAudioPatchListUpdate();
}
@@ -4095,7 +4093,7 @@
ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
- status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+ (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
mAudioPatches.removeItemsAt(index);
mpClientInterface->onAudioPatchListUpdate();
}
@@ -5400,7 +5398,6 @@
AudioProfileVector &profiles)
{
String8 reply;
- char *value;
// Format MUST be checked first to update the list of AudioProfile
if (profiles.hasDynamicFormat()) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index a6cd50e..f6e24e4 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -52,9 +52,11 @@
static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
+#ifdef USE_LEGACY_AUDIO_POLICY
namespace {
extern struct audio_policy_service_ops aps_ops;
};
+#endif
// ----------------------------------------------------------------------------
@@ -66,11 +68,6 @@
void AudioPolicyService::onFirstRef()
{
- char value[PROPERTY_VALUE_MAX];
- const struct hw_module_t *module;
- int forced_val;
- int rc;
-
{
Mutex::Autolock _l(mLock);
@@ -85,7 +82,8 @@
ALOGI("AudioPolicyService CSTOR in legacy mode");
/* instantiate the audio policy manager */
- rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
+ const struct hw_module_t *module;
+ int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
if (rc) {
return;
}
@@ -1198,6 +1196,7 @@
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
+#ifdef USE_LEGACY_AUDIO_POLICY
namespace {
struct audio_policy_service_ops aps_ops = {
.open_output = aps_open_output,
@@ -1220,5 +1219,6 @@
.open_input_on_module = aps_open_input_on_module,
};
}; // namespace <unnamed>
+#endif
}; // namespace android
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index e8e18b8..c55ac7f 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -40,6 +40,8 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+LOCAL_CFLAGS := -Wall -Werror
+
LOCAL_MODULE:= libsoundtriggerservice
include $(BUILD_SHARED_LIBRARY)