Merge "Camera: Add camera service errors to event logs in dumpsys output." into sc-dev
diff --git a/apex/manifest.json b/apex/manifest.json
index b7d8fc8..c7e56be 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media",
- "version": 309999900,
+ "version": 309999910,
"requireNativeLibs": [
"libandroid.so",
"libbinder_ndk.so",
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index e20d867..d36e914 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media.swcodec",
- "version": 309999900,
+ "version": 309999910,
"requireNativeLibs": [
":sphal"
]
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index dfd649d..c557de1 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -88,6 +88,16 @@
mTimestampUs = 0u;
mOutputSize = 0u;
mTimestampDevTest = false;
+ mWidth = ENC_DEFAULT_FRAME_WIDTH;
+ mHeight = ENC_DEFAULT_FRAME_HEIGHT;
+ mMaxWidth = 0;
+ mMaxHeight = 0;
+ mMinWidth = INT32_MAX;
+ mMinHeight = INT32_MAX;
+
+ ASSERT_EQ(getMaxMinResolutionSupported(mComponent), C2_OK);
+ mWidth = std::max(std::min(mWidth, mMaxWidth), mMinWidth);
+ mHeight = std::max(std::min(mHeight, mMaxHeight), mMinHeight);
C2SecureModeTuning secureModeTuning{};
mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
@@ -111,6 +121,8 @@
virtual void getParams() {}
bool setupConfigParam(int32_t nWidth, int32_t nHeight, int32_t nBFrame = 0);
+ c2_status_t getMaxMinResolutionSupported(
+ const std::shared_ptr<android::Codec2Client::Component>& component);
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
@@ -181,6 +193,12 @@
uint32_t mFailedWorkReceived;
uint64_t mTimestampUs;
uint64_t mOutputSize;
+ int32_t mWidth;
+ int32_t mHeight;
+ int32_t mMaxWidth;
+ int32_t mMaxHeight;
+ int32_t mMinWidth;
+ int32_t mMinHeight;
std::list<uint64_t> mTimestampUslist;
std::list<uint64_t> mFlushedIndices;
@@ -271,6 +289,37 @@
strcat(URL, "bbb_352x288_420p_30fps_32frames.yuv");
}
+void fillByteBuffer(char* inputBuffer, char* mInputData, uint32_t nWidth, int32_t nHeight) {
+ int width, height, tileWidth, tileHeight;
+ int offset = 0, frmOffset = 0;
+ int numOfPlanes = 3;
+ for (int plane = 0; plane < numOfPlanes; plane++) {
+ if (plane == 0) {
+ width = nWidth;
+ height = nHeight;
+ tileWidth = ENC_DEFAULT_FRAME_WIDTH;
+ tileHeight = ENC_DEFAULT_FRAME_HEIGHT;
+ } else {
+ width = nWidth / 2;
+ tileWidth = ENC_DEFAULT_FRAME_WIDTH / 2;
+ height = nHeight / 2;
+ tileHeight = ENC_DEFAULT_FRAME_HEIGHT / 2;
+ }
+ for (int k = 0; k < height; k += tileHeight) {
+ int rowsToCopy = std::min(height - k, tileHeight);
+ for (int j = 0; j < rowsToCopy; j++) {
+ for (int i = 0; i < width; i += tileWidth) {
+ int colsToCopy = std::min(width - i, tileWidth);
+ memcpy(inputBuffer + (offset + (k + j) * width + i),
+ mInputData + (frmOffset + j * tileWidth), colsToCopy);
+ }
+ }
+ }
+ offset += width * height;
+ frmOffset += tileWidth * tileHeight;
+ }
+}
+
void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
std::mutex& queueLock, std::condition_variable& queueCondition,
std::list<std::unique_ptr<C2Work>>& workQueue,
@@ -314,12 +363,22 @@
ULock l(queueLock);
flushedIndices.emplace_back(frameID);
}
- char* data = (char*)malloc(bytesCount);
- ASSERT_NE(data, nullptr);
- memset(data, 0, bytesCount);
- if (eleStream.is_open()) {
- eleStream.read(data, bytesCount);
- ASSERT_EQ(eleStream.gcount(), bytesCount);
+ std::vector<uint8_t> buffer(bytesCount);
+ char* data = (char*)buffer.data();
+ if (nWidth != ENC_DEFAULT_FRAME_WIDTH || nHeight != ENC_DEFAULT_FRAME_HEIGHT) {
+ int defaultBytesCount = ENC_DEFAULT_FRAME_HEIGHT * ENC_DEFAULT_FRAME_WIDTH * 3 >> 1;
+ std::vector<uint8_t> srcBuffer(defaultBytesCount);
+ char* srcData = (char*)srcBuffer.data();
+ if (eleStream.is_open()) {
+ eleStream.read(srcData, defaultBytesCount);
+ ASSERT_EQ(eleStream.gcount(), defaultBytesCount);
+ }
+ fillByteBuffer(data, srcData, nWidth, nHeight);
+ } else {
+ if (eleStream.is_open()) {
+ eleStream.read(data, bytesCount);
+ ASSERT_EQ(eleStream.gcount(), bytesCount);
+ }
}
std::shared_ptr<C2GraphicBlock> block;
err = graphicPool->fetchGraphicBlock(nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
@@ -352,7 +411,6 @@
work->input.buffers.emplace_back(new GraphicBuffer(block));
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
- free(data);
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
@@ -381,13 +439,59 @@
}
};
+c2_status_t Codec2VideoEncHidlTestBase::getMaxMinResolutionSupported(
+ const std::shared_ptr<android::Codec2Client::Component>& component) {
+ std::unique_ptr<C2StreamPictureSizeInfo::input> param =
+ std::make_unique<C2StreamPictureSizeInfo::input>();
+ std::vector<C2FieldSupportedValuesQuery> validValueInfos = {
+ C2FieldSupportedValuesQuery::Current(
+ C2ParamField(param.get(), &C2StreamPictureSizeInfo::width)),
+ C2FieldSupportedValuesQuery::Current(
+ C2ParamField(param.get(), &C2StreamPictureSizeInfo::height))};
+ c2_status_t c2err = component->querySupportedValues(validValueInfos, C2_MAY_BLOCK);
+ if (c2err != C2_OK || validValueInfos.size() != 2u) {
+ ALOGE("querySupportedValues_vb failed for pictureSize");
+ return c2err;
+ }
+
+ const auto& c2FSVWidth = validValueInfos[0].values;
+ const auto& c2FSVHeight = validValueInfos[1].values;
+ switch (c2FSVWidth.type) {
+ case C2FieldSupportedValues::type_t::RANGE: {
+ const auto& widthRange = c2FSVWidth.range;
+ const auto& heightRange = c2FSVHeight.range;
+ mMaxWidth = (uint32_t)(widthRange.max).ref<uint32_t>();
+ mMaxHeight = (uint32_t)(heightRange.max).ref<uint32_t>();
+ mMinWidth = (uint32_t)(widthRange.min).ref<uint32_t>();
+ mMinHeight = (uint32_t)(heightRange.min).ref<uint32_t>();
+ break;
+ }
+ case C2FieldSupportedValues::type_t::VALUES: {
+ int32_t curr = 0;
+ for (const C2Value::Primitive& prim : c2FSVWidth.values) {
+ curr = (uint32_t)prim.ref<uint32_t>();
+ mMaxWidth = std::max(curr, mMaxWidth);
+ mMinWidth = std::min(curr, mMinWidth);
+ }
+ for (const C2Value::Primitive& prim : c2FSVHeight.values) {
+ curr = (uint32_t)prim.ref<uint32_t>();
+ mMaxHeight = std::max(curr, mMaxHeight);
+ mMinHeight = std::min(curr, mMinHeight);
+ }
+ break;
+ }
+ default:
+ ALOGE("Non supported data");
+ return C2_BAD_VALUE;
+ }
+ return C2_OK;
+}
+
TEST_P(Codec2VideoEncEncodeTest, EncodeTest) {
description("Encodes input file");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
char mURL[512];
- int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
- int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
bool signalEOS = std::get<3>(GetParam());
// Send an empty frame to receive CSD data from encoder.
bool sendEmptyFirstFrame = std::get<3>(GetParam());
@@ -415,10 +519,6 @@
inputFrames--;
}
- if (!setupConfigParam(nWidth, nHeight, mConfigBPictures ? 1 : 0)) {
- std::cout << "[ WARN ] Test Skipped \n";
- return;
- }
std::vector<std::unique_ptr<C2Param>> inParams;
c2_status_t c2_status = mComponent->query({}, {C2StreamGopTuning::output::PARAM_TYPE},
C2_DONT_BLOCK, &inParams);
@@ -438,6 +538,9 @@
mConfigBPictures = false;
}
}
+ if (!setupConfigParam(mWidth, mHeight, mConfigBPictures ? 1 : 0)) {
+ ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
+ }
ASSERT_EQ(mComponent->start(), C2_OK);
@@ -447,7 +550,7 @@
}
ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
- inputFrames, ENC_NUM_FRAMES, nWidth, nHeight, false,
+ inputFrames, ENC_NUM_FRAMES, mWidth, mHeight, false,
signalEOS));
// mDisableTest will be set if buffer was not fetched properly.
// This may happen when resolution is not proper but config succeeded
@@ -538,14 +641,12 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
char mURL[512];
- int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
- int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
+
strcpy(mURL, sResourceDir.c_str());
GetURLForComponent(mURL);
- if (!setupConfigParam(nWidth, nHeight)) {
- std::cout << "[ WARN ] Test Skipped \n";
- return;
+ if (!setupConfigParam(mWidth, mHeight)) {
+ ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
}
ASSERT_EQ(mComponent->start(), C2_OK);
@@ -567,7 +668,7 @@
ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mGraphicPool, eleStream, mDisableTest, 0,
- numFramesFlushed, nWidth, nHeight, false, false));
+ numFramesFlushed, mWidth, mHeight, false, false));
// mDisableTest will be set if buffer was not fetched properly.
// This may happen when resolution is not proper but config succeeded
// In this cases, we skip encoding the input stream
@@ -587,8 +688,8 @@
ASSERT_EQ(mWorkQueue.size(), MAX_INPUT_BUFFERS);
ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
- numFramesFlushed, numFrames - numFramesFlushed, nWidth,
- nHeight, true));
+ numFramesFlushed, numFrames - numFramesFlushed, mWidth,
+ mHeight, true));
eleStream.close();
// mDisableTest will be set if buffer was not fetched properly.
// This may happen when resolution is not proper but config succeeded
@@ -731,11 +832,8 @@
mFlushedIndices.clear();
- int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
- int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
- if (!setupConfigParam(nWidth, nHeight)) {
- std::cout << "[ WARN ] Test Skipped \n";
- return;
+ if (!setupConfigParam(mWidth, mHeight)) {
+ ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
}
ASSERT_EQ(mComponent->start(), C2_OK);
@@ -756,8 +854,8 @@
ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mGraphicPool, eleStream,
- mDisableTest, inputFrameId, ENC_NUM_FRAMES, nWidth,
- nHeight, false, false));
+ mDisableTest, inputFrameId, ENC_NUM_FRAMES, mWidth,
+ mHeight, false, false));
// mDisableTest will be set if buffer was not fetched properly.
// This may happen when resolution is not proper but config succeeded
// In this cases, we skip encoding the input stream
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index d0997db..94af93c 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -523,7 +523,7 @@
}
void AmendOutputFormatWithCodecSpecificData(
- const uint8_t *data, size_t size, const std::string mediaType,
+ const uint8_t *data, size_t size, const std::string &mediaType,
const sp<AMessage> &outputFormat) {
if (mediaType == MIMETYPE_VIDEO_AVC) {
// Codec specific data should be SPS and PPS in a single buffer,
@@ -2285,7 +2285,12 @@
}
}
if (config->mInputSurface) {
- config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ if (work->worklets.empty()
+ || !work->worklets.back()
+ || (work->worklets.back()->output.flags
+ & C2FrameData::FLAG_INCOMPLETE) == 0) {
+ config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ }
}
if (initDataWatcher.hasChanged()) {
initData = initDataWatcher.update();
@@ -2441,6 +2446,11 @@
C2String compName;
{
Mutexed<State>::Locked state(mState);
+ if (!state->comp) {
+ ALOGD("previous call to %s exceeded timeout "
+ "and the component is already released", name.c_str());
+ return;
+ }
compName = state->comp->getName();
}
ALOGW("[%s] previous call to %s exceeded timeout", compName.c_str(), name.c_str());
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 7656307..389b73f 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -749,10 +749,20 @@
AudioFlingerServerAdapter::AudioFlingerServerAdapter(
const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {}
-status_t AudioFlingerServerAdapter::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+status_t AudioFlingerServerAdapter::onTransact(uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
uint32_t flags) {
- return mDelegate->onPreTransact(static_cast<Delegate::TransactionCode>(code), data, flags)
- ?: BnAudioFlingerService::onTransact(code, data, reply, flags);
+ return mDelegate->onTransactWrapper(static_cast<Delegate::TransactionCode>(code),
+ data,
+ flags,
+ [&] {
+ return BnAudioFlingerService::onTransact(
+ code,
+ data,
+ reply,
+ flags);
+ });
}
status_t AudioFlingerServerAdapter::dump(int fd, const Vector<String16>& args) {
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 3a5d164..3a04569 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -516,18 +516,22 @@
};
/**
- * And optional hook, called on every transaction, before unparceling the data and
- * dispatching to the respective method. Useful for bulk operations, such as logging or
- * permission checks.
- * If an error status is returned, the transaction will return immediately and will not be
- * processed.
+ * And optional hook, called on every transaction, allowing additional operations to be
+ * performed before/after the unparceling ofthe data and dispatching to the respective
+ * method. Useful for bulk operations, such as logging or permission checks.
+ * The implementer is responsible to invoke the provided delegate function, which is the
+ * actual onTransact(), unless an error occurs.
+ * By default, this is just a pass-through to the delegate.
*/
- virtual status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) {
+ virtual status_t onTransactWrapper(TransactionCode code,
+ const Parcel& data,
+ uint32_t flags,
+ const std::function<status_t()>& delegate) {
(void) code;
(void) data;
(void) flags;
- return OK;
- };
+ return delegate();
+ }
/**
* An optional hook for implementing diagnostics dumping.
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index b40317f..e96c041 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -33,7 +33,6 @@
relative_install_path: "soundfx",
cflags: [
- "-DBUILD_FLOAT",
"-fvisibility=hidden",
"-Wall",
"-Werror",
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 99ac4f5..5ca5525 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -31,13 +31,8 @@
// Do not submit with DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER defined, strictly for testing
//#define DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER 0
-#ifdef BUILD_FLOAT
#define MINUS_3_DB_IN_FLOAT 0.70710678f // -3dB = 0.70710678f
const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_FLOAT;
-#else
-#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
-const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_16_BIT;
-#endif
// subset of possible audio_channel_mask_t values, and AUDIO_CHANNEL_OUT_* renamed to CHANNEL_MASK_*
typedef enum {
@@ -88,7 +83,7 @@
// number of effects in this library
const int kNbEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
-#ifdef BUILD_FLOAT
+
static LVM_FLOAT clamp_float(LVM_FLOAT a) {
if (a > 1.0f) {
return 1.0f;
@@ -100,7 +95,7 @@
return a;
}
}
-#endif
+
/*----------------------------------------------------------------------------
* Test code
*--------------------------------------------------------------------------*/
@@ -303,106 +298,6 @@
return -EINVAL;
}
-#ifndef BUILD_FLOAT
-/*--- Effect Control Interface Implementation ---*/
-
-static int Downmix_Process(effect_handle_t self,
- audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
-
- downmix_object_t *pDownmixer;
- int16_t *pSrc, *pDst;
- downmix_module_t *pDwmModule = (downmix_module_t *)self;
-
- if (pDwmModule == NULL) {
- return -EINVAL;
- }
-
- if (inBuffer == NULL || inBuffer->raw == NULL ||
- outBuffer == NULL || outBuffer->raw == NULL ||
- inBuffer->frameCount != outBuffer->frameCount) {
- return -EINVAL;
- }
-
- pDownmixer = (downmix_object_t*) &pDwmModule->context;
-
- if (pDownmixer->state == DOWNMIX_STATE_UNINITIALIZED) {
- ALOGE("Downmix_Process error: trying to use an uninitialized downmixer");
- return -EINVAL;
- } else if (pDownmixer->state == DOWNMIX_STATE_INITIALIZED) {
- ALOGE("Downmix_Process error: trying to use a non-configured downmixer");
- return -ENODATA;
- }
-
- pSrc = inBuffer->s16;
- pDst = outBuffer->s16;
- size_t numFrames = outBuffer->frameCount;
-
- const bool accumulate =
- (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
-
- switch(pDownmixer->type) {
-
- case DOWNMIX_TYPE_STRIP:
- if (accumulate) {
- while (numFrames) {
- pDst[0] = clamp16(pDst[0] + pSrc[0]);
- pDst[1] = clamp16(pDst[1] + pSrc[1]);
- pSrc += pDownmixer->input_channel_count;
- pDst += 2;
- numFrames--;
- }
- } else {
- while (numFrames) {
- pDst[0] = pSrc[0];
- pDst[1] = pSrc[1];
- pSrc += pDownmixer->input_channel_count;
- pDst += 2;
- numFrames--;
- }
- }
- break;
-
- case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
- // bypass the optimized downmix routines for the common formats
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
- return -EINVAL;
- }
- break;
-#endif
- // optimize for the common formats
- switch((downmix_input_channel_mask_t)downmixInputChannelMask) {
- case CHANNEL_MASK_QUAD_BACK:
- case CHANNEL_MASK_QUAD_SIDE:
- Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
- break;
- case CHANNEL_MASK_5POINT1_BACK:
- case CHANNEL_MASK_5POINT1_SIDE:
- Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
- break;
- case CHANNEL_MASK_7POINT1:
- Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
- break;
- default:
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
- return -EINVAL;
- }
- break;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-#else /*BUILD_FLOAT*/
/*--- Effect Control Interface Implementation ---*/
static int Downmix_Process(effect_handle_t self,
@@ -503,7 +398,6 @@
return 0;
}
-#endif
static int Downmix_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
void *pCmdData, uint32_t *replySize, void *pReplyData) {
@@ -940,35 +834,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is RL
- // sample at index 3 is RR
- if (accumulate) {
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp16(pDst[0] + ((pSrc[0] + pSrc[2]) >> 1));
- // FR + RR
- pDst[1] = clamp16(pDst[1] + ((pSrc[1] + pSrc[3]) >> 1));
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp16((pSrc[0] + pSrc[2]) >> 1);
- // FR + RR
- pDst[1] = clamp16((pSrc[1] + pSrc[3]) >> 1);
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
// sample at index 0 is FL
// sample at index 1 is FR
@@ -996,7 +861,6 @@
}
}
}
-#endif
/*----------------------------------------------------------------------------
* Downmix_foldFrom5Point1()
@@ -1015,52 +879,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
- // accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
// sample at index 0 is FL
@@ -1105,7 +923,6 @@
}
}
}
-#endif
/*----------------------------------------------------------------------------
* Downmix_foldFrom7Point1()
@@ -1124,54 +941,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // sample at index 6 is SL
- // sample at index 7 is SR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + SL + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + SR + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
- //accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + SL + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + SR + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
// sample at index 0 is FL
@@ -1218,7 +987,7 @@
}
}
}
-#endif
+
/*----------------------------------------------------------------------------
* Downmix_foldGeneric()
*----------------------------------------------------------------------------
@@ -1245,99 +1014,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-bool Downmix_foldGeneric(
- uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-
- if (!Downmix_validChannelMask(mask)) {
- return false;
- }
-
- const bool hasSides = (mask & kSides) != 0;
- const bool hasBacks = (mask & kBacks) != 0;
-
- const int numChan = audio_channel_count_from_out_mask(mask);
- const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
- const bool hasLFE =
- ((mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY);
- const bool hasBC = ((mask & AUDIO_CHANNEL_OUT_BACK_CENTER) == AUDIO_CHANNEL_OUT_BACK_CENTER);
- // compute at what index each channel is: samples will be in the following order:
- // FL FR FC LFE BL BR BC SL SR
- // when a channel is not present, its index is set to the same as the index of the preceding
- // channel
- const int indexFC = hasFC ? 2 : 1; // front center
- const int indexLFE = hasLFE ? indexFC + 1 : indexFC; // low frequency
- const int indexBL = hasBacks ? indexLFE + 1 : indexLFE; // back left
- const int indexBR = hasBacks ? indexBL + 1 : indexBL; // back right
- const int indexBC = hasBC ? indexBR + 1 : indexBR; // back center
- const int indexSL = hasSides ? indexBC + 1 : indexBC; // side left
- const int indexSR = hasSides ? indexSL + 1 : indexSL; // side right
-
- int32_t lt, rt, centersLfeContrib; // samples in Q19.12 format
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- centersLfeContrib = 0;
- if (hasFC) { centersLfeContrib += pSrc[indexFC]; }
- if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
- if (hasBC) { centersLfeContrib += pSrc[indexBC]; }
- centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
- // always has FL/FR
- lt = (pSrc[0] << 12);
- rt = (pSrc[1] << 12);
- // mix in sides and backs
- if (hasSides) {
- lt += pSrc[indexSL] << 12;
- rt += pSrc[indexSR] << 12;
- }
- if (hasBacks) {
- lt += pSrc[indexBL] << 12;
- rt += pSrc[indexBR] << 12;
- }
- lt += centersLfeContrib;
- rt += centersLfeContrib;
- // accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- } else {
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- centersLfeContrib = 0;
- if (hasFC) { centersLfeContrib += pSrc[indexFC]; }
- if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
- if (hasBC) { centersLfeContrib += pSrc[indexBC]; }
- centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
- // always has FL/FR
- lt = (pSrc[0] << 12);
- rt = (pSrc[1] << 12);
- // mix in sides and backs
- if (hasSides) {
- lt += pSrc[indexSL] << 12;
- rt += pSrc[indexSR] << 12;
- }
- if (hasBacks) {
- lt += pSrc[indexBL] << 12;
- rt += pSrc[indexBR] << 12;
- }
- lt += centersLfeContrib;
- rt += centersLfeContrib;
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- }
- return true;
-}
-#else
bool Downmix_foldGeneric(
uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
@@ -1429,4 +1105,3 @@
}
return true;
}
-#endif
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
index c1be0f2..679a855 100644
--- a/media/libeffects/downmix/EffectDownmix.h
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -27,9 +27,8 @@
*/
#define DOWNMIX_OUTPUT_CHANNELS AUDIO_CHANNEL_OUT_STEREO
-#ifdef BUILD_FLOAT
#define LVM_FLOAT float
-#endif
+
typedef enum {
DOWNMIX_STATE_UNINITIALIZED,
DOWNMIX_STATE_INITIALIZED,
@@ -97,18 +96,10 @@
int Downmix_Reset(downmix_object_t *pDownmixer, bool init);
int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-#ifdef BUILD_FLOAT
void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
bool Downmix_foldGeneric(
uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
-#else
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-bool Downmix_foldGeneric(
- uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-#endif
#endif /*ANDROID_EFFECTDOWNMIX_H_*/
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 5b60bbf..2c1f158 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -302,7 +302,7 @@
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
mCodec = MediaCodec::CreateByType(
- mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid);
+ mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid, format);
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index b43df38..d2d978a 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -124,8 +124,16 @@
// index(i) should be started from 1. 0 is reserved for [root]
mRTPConn->addStream(sockRtp, sockRtcp, desc, i + 1, notify, false);
mRTPConn->setSelfID(info->mSelfID);
- mRTPConn->setJbTime(
- (info->mJbTimeMs <= 3000 && info->mJbTimeMs >= 40) ? info->mJbTimeMs : 300);
+ mRTPConn->setStaticJitterTimeMs(info->mJbTimeMs);
+
+ unsigned long PT;
+ AString formatDesc, formatParams;
+ // index(i) should be started from 1. 0 is reserved for [root]
+ desc->getFormatType(i + 1, &PT, &formatDesc, &formatParams);
+
+ int32_t clockRate, numChannels;
+ ASessionDescription::ParseFormatDesc(formatDesc.c_str(), &clockRate, &numChannels);
+ info->mTimeScale = clockRate;
info->mRTPSocket = sockRtp;
info->mRTCPSocket = sockRtcp;
@@ -146,10 +154,8 @@
if (info->mIsAudio) {
mAudioTrack = source;
- info->mTimeScale = 16000;
} else {
mVideoTrack = source;
- info->mTimeScale = 90000;
}
info->mSource = source;
@@ -680,7 +686,7 @@
newTrackInfo.mIsAudio = isAudioKey;
mTracks.push(newTrackInfo);
info = &mTracks.editTop();
- info->mJbTimeMs = 300;
+ info->mJbTimeMs = kStaticJitterTimeMs;
}
if (key == "rtp-param-mime-type") {
@@ -724,7 +730,8 @@
int64_t networkHandle = atoll(value);
setSocketNetwork(networkHandle);
} else if (key == "rtp-param-jitter-buffer-time") {
- info->mJbTimeMs = atoi(value);
+ // clamping min at 40, max at 3000
+ info->mJbTimeMs = std::min(std::max(40, atoi(value)), 3000);
}
return OK;
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index 2691201..2657889 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -107,7 +107,7 @@
// Maximum allowed back-to-back start count.
int32_t pacerBurstCountQuota = 10;
// Maximum allowed back-to-back running time.
- int32_t pacerBurstTimeQuotaSeconds = 180; // 3-min
+ int32_t pacerBurstTimeQuotaSeconds = 120; // 2-min
};
struct Session {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 3638526..57bdba0 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -638,12 +638,20 @@
sp<MediaCodec> MediaCodec::CreateByType(
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
uid_t uid) {
+ sp<AMessage> format;
+ return CreateByType(looper, mime, encoder, err, pid, uid, format);
+}
+
+sp<MediaCodec> MediaCodec::CreateByType(
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
+ uid_t uid, sp<AMessage> format) {
Vector<AString> matchingCodecs;
MediaCodecList::findMatchingCodecs(
mime.c_str(),
encoder,
0,
+ format,
&matchingCodecs);
if (err != NULL) {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 799ca0d..6243828 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -44,6 +44,7 @@
#include <cutils/properties.h>
#include <algorithm>
+#include <regex>
namespace android {
@@ -348,6 +349,14 @@
void MediaCodecList::findMatchingCodecs(
const char *mime, bool encoder, uint32_t flags,
Vector<AString> *matches) {
+ sp<AMessage> format; // initializes as clear/null
+ findMatchingCodecs(mime, encoder, flags, format, matches);
+}
+
+//static
+void MediaCodecList::findMatchingCodecs(
+ const char *mime, bool encoder, uint32_t flags, sp<AMessage> format,
+ Vector<AString> *matches) {
matches->clear();
const sp<IMediaCodecList> list = getInstance();
@@ -368,14 +377,22 @@
const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
CHECK(info != nullptr);
+
AString componentName = info->getCodecName();
+ if (!codecHandlesFormat(mime, info, format)) {
+ ALOGV("skipping codec '%s' which doesn't satisfy format %s",
+ componentName.c_str(), format->debugString(2).c_str());
+ continue;
+ }
+
if ((flags & kHardwareCodecsOnly) && isSoftwareCodec(componentName)) {
ALOGV("skipping SW codec '%s'", componentName.c_str());
- } else {
- matches->push(componentName);
- ALOGV("matching '%s'", componentName.c_str());
+ continue;
}
+
+ matches->push(componentName);
+ ALOGV("matching '%s'", componentName.c_str());
}
if (flags & kPreferSoftwareCodecs ||
@@ -384,4 +401,118 @@
}
}
+/*static*/
+bool MediaCodecList::codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info,
+ sp<AMessage> format) {
+
+ if (format == nullptr) {
+ ALOGD("codecHandlesFormat: no format, so no extra checks");
+ return true;
+ }
+
+ sp<MediaCodecInfo::Capabilities> capabilities = info->getCapabilitiesFor(mime);
+
+ // ... no capabilities listed means 'handle it all'
+ if (capabilities == nullptr) {
+ ALOGD("codecHandlesFormat: no capabilities for refinement");
+ return true;
+ }
+
+ const sp<AMessage> &details = capabilities->getDetails();
+
+ // if parsing the capabilities fails, ignore this particular codec
+ // currently video-centric evaluation
+ //
+ // TODO: like to make it handle the same set of properties from
+ // MediaCodecInfo::isFormatSupported()
+ // not yet done here are:
+ // profile, level, bitrate, features,
+
+ bool isVideo = false;
+ if (strncmp(mime, "video/", 6) == 0) {
+ isVideo = true;
+ }
+
+ if (isVideo) {
+ int width = -1;
+ int height = -1;
+
+ if (format->findInt32("height", &height) && format->findInt32("width", &width)) {
+
+ // is it within the supported size range of the codec?
+ AString sizeRange;
+ AString minSize,maxSize;
+ AString minWidth, minHeight;
+ AString maxWidth, maxHeight;
+ if (!details->findString("size-range", &sizeRange)
+ || !splitString(sizeRange, "-", &minSize, &maxSize)) {
+ ALOGW("Unable to parse size-range from codec info");
+ return false;
+ }
+ if (!splitString(minSize, "x", &minWidth, &minHeight)) {
+ if (!splitString(minSize, "*", &minWidth, &minHeight)) {
+ ALOGW("Unable to parse size-range/min-size from codec info");
+ return false;
+ }
+ }
+ if (!splitString(maxSize, "x", &maxWidth, &maxHeight)) {
+ if (!splitString(maxSize, "*", &maxWidth, &maxHeight)) {
+ ALOGW("Unable to fully parse size-range/max-size from codec info");
+ return false;
+ }
+ }
+
+ // strtol() returns 0 if unable to parse a number, which works for our later tests
+ int minW = strtol(minWidth.c_str(), NULL, 10);
+ int minH = strtol(minHeight.c_str(), NULL, 10);
+ int maxW = strtol(maxWidth.c_str(), NULL, 10);
+ int maxH = strtol(maxHeight.c_str(), NULL, 10);
+
+ if (minW == 0 || minH == 0 || maxW == 0 || maxH == 0) {
+ ALOGW("Unable to parse values from size-range from codec info");
+ return false;
+ }
+
+ // finally, comparison time
+ if (width < minW || width > maxW || height < minH || height > maxH) {
+ ALOGV("format %dx%d outside of allowed %dx%d-%dx%d",
+ width, height, minW, minH, maxW, maxH);
+ // at this point, it's a rejection, UNLESS
+ // the codec allows swapping width and height
+ int32_t swappable;
+ if (!details->findInt32("feature-can-swap-width-height", &swappable)
+ || swappable == 0) {
+ return false;
+ }
+ // NB: deliberate comparison of height vs width limits (and width vs height)
+ if (height < minW || height > maxW || width < minH || width > maxH) {
+ return false;
+ }
+ }
+
+ // @ 'alignment' [e.g. "2x2" which tells us that both dimensions must be even]
+ // no alignment == we're ok with anything
+ AString alignment, alignWidth, alignHeight;
+ if (details->findString("alignment", &alignment)) {
+ if (splitString(alignment, "x", &alignWidth, &alignHeight) ||
+ splitString(alignment, "*", &alignWidth, &alignHeight)) {
+ int wAlign = strtol(alignWidth.c_str(), NULL, 10);
+ int hAlign = strtol(alignHeight.c_str(), NULL, 10);
+ // strtol() returns 0 if failing to parse, treat as "no restriction"
+ if (wAlign > 0 && hAlign > 0) {
+ if ((width % wAlign) != 0 || (height % hAlign) != 0) {
+ ALOGV("format dimensions %dx%d not aligned to %dx%d",
+ width, height, wAlign, hAlign);
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // haven't found a reason to discard this one
+ return true;
+}
+
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 0584054..3517bae 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -104,6 +104,10 @@
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
pid_t pid = kNoPid, uid_t uid = kNoUid);
+ static sp<MediaCodec> CreateByType(
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err,
+ pid_t pid, uid_t uid, sp<AMessage> format);
+
static sp<MediaCodec> CreateByComponentName(
const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
pid_t pid = kNoPid, uid_t uid = kNoUid);
@@ -400,6 +404,7 @@
std::string mLastReplyOrigin;
std::vector<sp<AMessage>> mDeferredMessages;
uint32_t mFlags;
+ int64_t mPresentationTimeUs = 0;
status_t mStickyError;
sp<Surface> mSurface;
SoftwareRenderer *mSoftRenderer;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index 78d1005..3cf455c 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -75,6 +75,16 @@
uint32_t flags,
Vector<AString> *matchingCodecs);
+ // add optional format, to further refine matching codecs
+ static void findMatchingCodecs(
+ const char *mime,
+ bool createEncoder,
+ uint32_t flags,
+ sp<AMessage> format,
+ Vector<AString> *matchingCodecs);
+
+ static bool codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info, sp<AMessage> format);
+
static bool isSoftwareCodec(const AString &componentName);
private:
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 2f93d5d..92b2b09 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -34,6 +34,8 @@
namespace android {
+const double JITTER_MULTIPLE = 1.5f;
+
// static
AAVCAssembler::AAVCAssembler(const sp<AMessage> ¬ify)
: mNotifyMsg(notify),
@@ -123,22 +125,48 @@
int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
- int64_t startTime = source->mFirstSysTime / 1000;
- int64_t nowTime = ALooper::GetNowUs() / 1000;
- int64_t playedTime = nowTime - startTime;
+ const int64_t startTimeMs = source->mFirstSysTime / 1000;
+ const int64_t nowTimeMs = ALooper::GetNowUs() / 1000;
+ const int64_t staticJbTimeMs = source->getStaticJitterTimeMs();
+ const int64_t dynamicJbTimeMs = source->getDynamicJitterTimeMs();
+ const int64_t clockRate = source->mClockRate;
- int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
- const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+ int64_t playedTimeMs = nowTimeMs - startTimeMs;
+ int64_t playedTimeRtp = source->mFirstRtpTime + MsToRtp(playedTimeMs, clockRate);
- int64_t expiredTimeInJb = rtpTime + jitterTime;
- bool isExpired = expiredTimeInJb <= (playedTimeRtp);
- bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
- bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+ /**
+ * Based on experience in real commercial network services,
+ * 300 ms is a maximum heuristic jitter buffer time for video RTP service.
+ */
+
+ /**
+ * The static(base) jitter is a kind of expected propagation time that we desire.
+ * We can drop packets if it doesn't meet our standards.
+ * If it gets shorter we can get faster response but can lose packets.
+ * Expecting range : 50ms ~ 1000ms (But 300 ms would be practical upper bound)
+ */
+ const int64_t baseJbTimeRtp = MsToRtp(staticJbTimeMs, clockRate);
+ /**
+ * Dynamic jitter is a variance of interarrival time as defined in the 6.4.1 of RFC 3550.
+ * We can regard this as a tolerance of every moments.
+ * Expecting range : 0ms ~ 150ms (Not to over 300 ms practically)
+ */
+ const int64_t dynamicJbTimeRtp = // Max 150
+ std::min(MsToRtp(dynamicJbTimeMs, clockRate), MsToRtp(150, clockRate));
+ const int64_t jitterTimeRtp = baseJbTimeRtp + dynamicJbTimeRtp; // Total jitter time
+
+ int64_t expiredTimeRtp = rtpTime + jitterTimeRtp; // When does this buffer expire ? (T)
+ int64_t diffTimeRtp = playedTimeRtp - expiredTimeRtp;
+ bool isExpired = (diffTimeRtp >= 0); // It's expired if T is passed away
+ bool isFirstLineBroken = (diffTimeRtp > jitterTimeRtp); // (T + jitter) is a standard tolerance
+
+ int64_t finalMargin = dynamicJbTimeRtp * JITTER_MULTIPLE;
+ bool isSecondLineBroken = (diffTimeRtp > jitterTimeRtp + finalMargin); // The Maginot line
if (mShowQueue && mShowQueueCnt < 20) {
showCurrentQueue(queue);
- printNowTimeUs(startTime, nowTime, playedTime);
- printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+ printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
mShowQueueCnt++;
}
@@ -149,17 +177,23 @@
return NOT_ENOUGH_DATA;
}
- if (isTooLate200) {
- ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
- }
+ if (isFirstLineBroken) {
+ if (isSecondLineBroken) {
+ ALOGW("buffer too late ... \t Diff in Jb=%lld \t "
+ "Seq# %d \t ExpSeq# %d \t"
+ "JitterMs %lld + (%lld * %.3f)",
+ (long long)(diffTimeRtp),
+ buffer->int32Data(), mNextExpectedSeqNo,
+ (long long)staticJbTimeMs, (long long)dynamicJbTimeMs, JITTER_MULTIPLE + 1);
+ printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
- if (isTooLate300) {
- ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
- (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
- printNowTimeUs(startTime, nowTime, playedTime);
- printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
-
- mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
+ mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTimeRtp);
+ } else {
+ ALOGW("=== WARNING === buffer arrived after %lld + %lld = %lld ms === WARNING === ",
+ (long long)staticJbTimeMs, (long long)dynamicJbTimeMs,
+ (long long)RtpToMs(jitterTimeRtp, clockRate));
+ }
}
if (mNextExpectedSeqNoValid) {
@@ -170,6 +204,7 @@
source->noticeAbandonBuffer(cntRemove);
ALOGW("delete %d of %d buffers", cntRemove, size);
}
+
if (queue->empty()) {
return NOT_ENOUGH_DATA;
}
@@ -565,17 +600,6 @@
msg->post();
}
-inline int64_t AAVCAssembler::findRTPTime(
- const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
- /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
- Because rtpTime can be near UINT32_MAX. Beware the overflow. */
- int64_t rtpTime = 0;
- CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
- // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
- int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
- return rtpTime | overflowMask;
-}
-
int32_t AAVCAssembler::pickProperSeq(const Queue *queue,
uint32_t first, int64_t play, int64_t jit) {
sp<ABuffer> buffer = *(queue->begin());
@@ -620,16 +644,6 @@
return initSize - queue->size();
}
-inline void AAVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
- ALOGD("start=%lld, now=%lld, played=%lld",
- (long long)start, (long long)now, (long long)play);
-}
-
-inline void AAVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
- ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
- (long long)rtp, (long long)play, (long long)exp, isExp);
-}
-
ARTPAssembler::AssemblyStatus AAVCAssembler::assembleMore(
const sp<ARTPSource> &source) {
AssemblyStatus status = addNALUnit(source);
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/AAVCAssembler.h
index 9d71e2f..954086c 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.h
+++ b/media/libstagefright/rtsp/AAVCAssembler.h
@@ -63,13 +63,10 @@
void submitAccessUnit();
- inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
size_t avail, float goodRatio);
int32_t deleteUnitUnderSeq(Queue *q, uint32_t seq);
- void printNowTimeUs(int64_t start, int64_t now, int64_t play);
- void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
DISALLOW_EVIL_CONSTRUCTORS(AAVCAssembler);
};
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index 553ea08..cd60203 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -41,6 +41,8 @@
namespace android {
+const double JITTER_MULTIPLE = 1.5f;
+
// static
AHEVCAssembler::AHEVCAssembler(const sp<AMessage> ¬ify)
: mNotifyMsg(notify),
@@ -130,23 +132,51 @@
sp<ABuffer> buffer = *queue->begin();
buffer->meta()->setObject("source", source);
+
int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
- int64_t startTime = source->mFirstSysTime / 1000;
- int64_t nowTime = ALooper::GetNowUs() / 1000;
- int64_t playedTime = nowTime - startTime;
- int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
- const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+ const int64_t startTimeMs = source->mFirstSysTime / 1000;
+ const int64_t nowTimeMs = ALooper::GetNowUs() / 1000;
+ const int64_t staticJbTimeMs = source->getStaticJitterTimeMs();
+ const int64_t dynamicJbTimeMs = source->getDynamicJitterTimeMs();
+ const int64_t clockRate = source->mClockRate;
- int64_t expiredTimeInJb = rtpTime + jitterTime;
- bool isExpired = expiredTimeInJb <= (playedTimeRtp);
- bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
- bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+ int64_t playedTimeMs = nowTimeMs - startTimeMs;
+ int64_t playedTimeRtp = source->mFirstRtpTime + MsToRtp(playedTimeMs, clockRate);
+
+ /**
+ * Based on experience in real commercial network services,
+ * 300 ms is a maximum heuristic jitter buffer time for video RTP service.
+ */
+
+ /**
+ * The static(base) jitter is a kind of expected propagation time that we desire.
+ * We can drop packets if it doesn't meet our standards.
+ * If it gets shorter we can get faster response but can lose packets.
+ * Expecting range : 50ms ~ 1000ms (But 300 ms would be practical upper bound)
+ */
+ const int64_t baseJbTimeRtp = MsToRtp(staticJbTimeMs, clockRate);
+ /**
+ * Dynamic jitter is a variance of interarrival time as defined in the 6.4.1 of RFC 3550.
+ * We can regard this as a tolerance of every moments.
+ * Expecting range : 0ms ~ 150ms (Not to over 300 ms practically)
+ */
+ const int64_t dynamicJbTimeRtp = // Max 150
+ std::min(MsToRtp(dynamicJbTimeMs, clockRate), MsToRtp(150, clockRate));
+ const int64_t jitterTimeRtp = baseJbTimeRtp + dynamicJbTimeRtp; // Total jitter time
+
+ int64_t expiredTimeRtp = rtpTime + jitterTimeRtp; // When does this buffer expire ? (T)
+ int64_t diffTimeRtp = playedTimeRtp - expiredTimeRtp;
+ bool isExpired = (diffTimeRtp >= 0); // It's expired if T is passed away
+ bool isFirstLineBroken = (diffTimeRtp > jitterTimeRtp); // (T + jitter) is a standard tolerance
+
+ int64_t finalMargin = dynamicJbTimeRtp * JITTER_MULTIPLE;
+ bool isSecondLineBroken = (diffTimeRtp > jitterTimeRtp + finalMargin); // The Maginot line
if (mShowQueueCnt < 20) {
showCurrentQueue(queue);
- printNowTimeUs(startTime, nowTime, playedTime);
- printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+ printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
mShowQueueCnt++;
}
@@ -157,17 +187,23 @@
return NOT_ENOUGH_DATA;
}
- if (isTooLate200) {
- ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
- }
+ if (isFirstLineBroken) {
+ if (isSecondLineBroken) {
+ ALOGW("buffer too late ... \t Diff in Jb=%lld \t "
+ "Seq# %d \t ExpSeq# %d \t"
+ "JitterMs %lld + (%lld * %.3f)",
+ (long long)(diffTimeRtp),
+ buffer->int32Data(), mNextExpectedSeqNo,
+ (long long)staticJbTimeMs, (long long)dynamicJbTimeMs, JITTER_MULTIPLE + 1);
+ printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
- if (isTooLate300) {
- ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
- (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
- printNowTimeUs(startTime, nowTime, playedTime);
- printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
-
- mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
+ mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTimeRtp);
+ } else {
+ ALOGW("=== WARNING === buffer arrived after %lld + %lld = %lld ms === WARNING === ",
+ (long long)staticJbTimeMs, (long long)dynamicJbTimeMs,
+ (long long)RtpToMs(jitterTimeRtp, clockRate));
+ }
}
if (mNextExpectedSeqNoValid) {
@@ -578,17 +614,6 @@
msg->post();
}
-inline int64_t AHEVCAssembler::findRTPTime(
- const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
- /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
- Because rtpTime can be near UINT32_MAX. Beware the overflow. */
- int64_t rtpTime = 0;
- CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
- // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
- int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
- return rtpTime | overflowMask;
-}
-
int32_t AHEVCAssembler::pickProperSeq(const Queue *queue,
uint32_t first, int64_t play, int64_t jit) {
sp<ABuffer> buffer = *(queue->begin());
@@ -633,16 +658,6 @@
return initSize - queue->size();
}
-inline void AHEVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
- ALOGD("start=%lld, now=%lld, played=%lld",
- (long long)start, (long long)now, (long long)play);
-}
-
-inline void AHEVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
- ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
- (long long)rtp, (long long)play, (long long)exp, isExp);
-}
-
ARTPAssembler::AssemblyStatus AHEVCAssembler::assembleMore(
const sp<ARTPSource> &source) {
AssemblyStatus status = addNALUnit(source);
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/AHEVCAssembler.h
index bf1cded..e64b661 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.h
+++ b/media/libstagefright/rtsp/AHEVCAssembler.h
@@ -64,13 +64,10 @@
void submitAccessUnit();
- inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
size_t avail, float goodRatio);
int32_t deleteUnitUnderSeq(Queue *queue, uint32_t seq);
- void printNowTimeUs(int64_t start, int64_t now, int64_t play);
- void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
DISALLOW_EVIL_CONSTRUCTORS(AHEVCAssembler);
};
diff --git a/media/libstagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/ARTPAssembler.h
index 191f08e..f959c40 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.h
+++ b/media/libstagefright/rtsp/ARTPAssembler.h
@@ -19,6 +19,9 @@
#define A_RTP_ASSEMBLER_H_
#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <utils/List.h>
#include <utils/RefBase.h>
@@ -61,12 +64,47 @@
bool mShowQueue;
int32_t mShowQueueCnt;
+ // Utility functions
+ inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
+ inline int64_t MsToRtp(int64_t ms, int64_t clockRate);
+ inline int64_t RtpToMs(int64_t rtp, int64_t clockRate);
+ inline void printNowTimeMs(int64_t start, int64_t now, int64_t play);
+ inline void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
+
private:
int64_t mFirstFailureTimeUs;
DISALLOW_EVIL_CONSTRUCTORS(ARTPAssembler);
};
+inline int64_t ARTPAssembler::findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
+ /* If you want to +,-,* rtpTime, recommend to declare rtpTime as int64_t.
+ Because rtpTime can be near UINT32_MAX. Beware the overflow. */
+ int64_t rtpTime = 0;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
+ int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
+ return rtpTime | overflowMask;
+}
+
+inline int64_t ARTPAssembler::MsToRtp(int64_t ms, int64_t clockRate) {
+ return ms * clockRate / 1000;
+}
+
+inline int64_t ARTPAssembler::RtpToMs(int64_t rtp, int64_t clockRate) {
+ return rtp * 1000 / clockRate;
+}
+
+inline void ARTPAssembler::printNowTimeMs(int64_t start, int64_t now, int64_t play) {
+ ALOGD("start=%lld, now=%lld, played=%lld",
+ (long long)start, (long long)now, (long long)play);
+}
+
+inline void ARTPAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
+ ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
+ (long long)rtp, (long long)play, (long long)exp, isExp);
+}
+
} // namespace android
#endif // A_RTP_ASSEMBLER_H_
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 61c06d1..9509377 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -70,6 +70,8 @@
bool mIsInjected;
+ // A place to save time when it polls
+ int64_t mLastPollTimeUs;
// RTCP Extension for CVO
int mCVOExtMap; // will be set to 0 if cvo is not negotiated in sdp
};
@@ -80,7 +82,7 @@
mLastReceiverReportTimeUs(-1),
mLastBitrateReportTimeUs(-1),
mTargetBitrate(-1),
- mJbTimeMs(300) {
+ mStaticJitterTimeMs(kStaticJitterTimeMs) {
}
ARTPConnection::~ARTPConnection() {
@@ -416,6 +418,7 @@
return;
}
+ int64_t nowUs = ALooper::GetNowUs();
int res = select(maxSocket + 1, &rs, NULL, NULL, &tv);
if (res > 0) {
@@ -425,6 +428,7 @@
++it;
continue;
}
+ it->mLastPollTimeUs = nowUs;
status_t err = OK;
if (FD_ISSET(it->mRTPSocket, &rs)) {
@@ -486,7 +490,6 @@
}
}
- int64_t nowUs = ALooper::GetNowUs();
checkRxBitrate(nowUs);
if (mLastReceiverReportTimeUs <= 0
@@ -720,6 +723,7 @@
buffer->setInt32Data(u16at(&data[2]));
buffer->setRange(payloadOffset, size - payloadOffset);
+ source->putDynamicJitterData(rtpTime, s->mLastPollTimeUs);
source->processRTPPacket(buffer);
return OK;
@@ -1066,7 +1070,7 @@
}
source->setSelfID(mSelfID);
- source->setJbTime(mJbTimeMs > 0 ? mJbTimeMs : 300);
+ source->setStaticJitterTimeMs(mStaticJitterTimeMs);
info->mSources.add(srcId, source);
} else {
source = info->mSources.valueAt(index);
@@ -1086,8 +1090,8 @@
mSelfID = selfID;
}
-void ARTPConnection::setJbTime(const uint32_t jbTimeMs) {
- mJbTimeMs = jbTimeMs;
+void ARTPConnection::setStaticJitterTimeMs(const uint32_t jbTimeMs) {
+ mStaticJitterTimeMs = jbTimeMs;
}
void ARTPConnection::setTargetBitrate(int32_t targetBitrate) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index a37ac0e..ea0a374 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -46,7 +46,7 @@
void injectPacket(int index, const sp<ABuffer> &buffer);
void setSelfID(const uint32_t selfID);
- void setJbTime(const uint32_t jbTimeMs);
+ void setStaticJitterTimeMs(const uint32_t jbTimeMs);
void setTargetBitrate(int32_t targetBitrate);
// Creates a pair of UDP datagram sockets bound to adjacent ports
@@ -89,7 +89,7 @@
int32_t mSelfID;
int32_t mTargetBitrate;
- uint32_t mJbTimeMs;
+ uint32_t mStaticJitterTimeMs;
int32_t mCumulativeBytes;
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 3fdf8e4..402dc27 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -48,7 +48,6 @@
mFirstRtpTime(0),
mFirstSysTime(0),
mClockRate(0),
- mJbTimeMs(300), // default jitter buffer time is 300ms.
mFirstSsrc(0),
mHighestNackNumber(0),
mID(id),
@@ -59,6 +58,7 @@
mPrevNumBuffersReceived(0),
mPrevExpectedForRR(0),
mPrevNumBuffersReceivedForRR(0),
+ mStaticJbTimeMs(kStaticJitterTimeMs),
mLastNTPTime(0),
mLastNTPTimeUpdateUs(0),
mIssueFIRRequests(false),
@@ -102,6 +102,11 @@
if (mAssembler != NULL && !mAssembler->initCheck()) {
mAssembler.clear();
}
+
+ int32_t clockRate, numChannels;
+ ASessionDescription::ParseFormatDesc(desc.c_str(), &clockRate, &numChannels);
+ mClockRate = clockRate;
+ mJitterCalc = new JitterCalc(mClockRate);
}
static uint32_t AbsDiff(uint32_t seq1, uint32_t seq2) {
@@ -139,9 +144,8 @@
mBaseSeqNumber = seqNum;
mFirstRtpTime = firstRtpTime;
mFirstSsrc = ssrc;
- ALOGD("first-rtp arrived: first-rtp-time=%d, sys-time=%lld, seq-num=%u, ssrc=%d",
+ ALOGD("first-rtp arrived: first-rtp-time=%u, sys-time=%lld, seq-num=%u, ssrc=%d",
mFirstRtpTime, (long long)mFirstSysTime, mHighestSeqNumber, mFirstSsrc);
- mClockRate = 90000;
mQueue.push_back(buffer);
return true;
}
@@ -327,10 +331,11 @@
data[18] = (mHighestSeqNumber >> 8) & 0xff;
data[19] = mHighestSeqNumber & 0xff;
- data[20] = 0x00; // Interarrival jitter
- data[21] = 0x00;
- data[22] = 0x00;
- data[23] = 0x00;
+ uint32_t jitterTime = getDynamicJitterTimeMs() * mClockRate / 1000;
+ data[20] = jitterTime >> 24; // Interarrival jitter
+ data[21] = (jitterTime >> 16) & 0xff;
+ data[22] = (jitterTime >> 8) & 0xff;
+ data[23] = jitterTime & 0xff;
uint32_t LSR = 0;
uint32_t DLSR = 0;
@@ -508,15 +513,27 @@
kSourceID = selfID;
}
-void ARTPSource::setJbTime(const uint32_t jbTimeMs) {
- mJbTimeMs = jbTimeMs;
-}
-
void ARTPSource::setPeriodicFIR(bool enable) {
ALOGD("setPeriodicFIR %d", enable);
mIssueFIRRequests = enable;
}
+uint32_t ARTPSource::getStaticJitterTimeMs() {
+ return mStaticJbTimeMs;
+}
+
+uint32_t ARTPSource::getDynamicJitterTimeMs() {
+ return mJitterCalc->getJitterMs();
+}
+
+void ARTPSource::setStaticJitterTimeMs(const uint32_t jbTimeMs) {
+ mStaticJbTimeMs = jbTimeMs;
+}
+
+void ARTPSource::putDynamicJitterData(uint32_t timeStamp, int64_t arrivalTime) {
+ mJitterCalc->putData(timeStamp, arrivalTime);
+}
+
bool ARTPSource::isNeedToEarlyNotify() {
uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
int32_t intervalExpectedInNow = expected - mPrevExpected;
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index c51fd8a..56011d3 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -27,8 +27,12 @@
#include <map>
+#include "JitterCalculator.h"
+
namespace android {
+const uint32_t kStaticJitterTimeMs = 50; // 50ms
+
struct ABuffer;
struct AMessage;
struct ARTPAssembler;
@@ -64,8 +68,13 @@
void setSeqNumToNACK(uint16_t seqNum, uint16_t mask, uint16_t nowJitterHeadSeqNum);
uint32_t getSelfID();
void setSelfID(const uint32_t selfID);
- void setJbTime(const uint32_t jbTimeMs);
void setPeriodicFIR(bool enable);
+
+ uint32_t getStaticJitterTimeMs();
+ uint32_t getDynamicJitterTimeMs();
+ void setStaticJitterTimeMs(const uint32_t jbTimeMs);
+ void putDynamicJitterData(uint32_t timeStamp, int64_t arrivalTime);
+
bool isNeedToEarlyNotify();
void notifyPktInfo(int32_t bitrate, bool isRegular);
// FIR needs to be sent by missing packet or broken video image.
@@ -78,7 +87,6 @@
int64_t mFirstSysTime;
int32_t mClockRate;
- uint32_t mJbTimeMs;
int32_t mFirstSsrc;
int32_t mHighestNackNumber;
@@ -96,6 +104,9 @@
List<sp<ABuffer> > mQueue;
sp<ARTPAssembler> mAssembler;
+ uint32_t mStaticJbTimeMs;
+ sp<JitterCalc> mJitterCalc;
+
typedef struct infoNACK {
uint16_t seqNum;
uint16_t mask;
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index dcadbaf..34d1788 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -36,6 +36,7 @@
"ARTPWriter.cpp",
"ARTSPConnection.cpp",
"ASessionDescription.cpp",
+ "JitterCalculator.cpp",
"SDPLoader.cpp",
],
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
new file mode 100644
index 0000000..466171c
--- /dev/null
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JitterCalc"
+#include <utils/Log.h>
+
+#include "JitterCalculator.h"
+
+#include <stdlib.h>
+
+namespace android {
+
+JitterCalc::JitterCalc(int32_t clockRate)
+ : mClockRate(clockRate) {
+ init();
+}
+
+void JitterCalc::init() {
+ mJitterValueUs = 0;
+ mLastTimeStamp = 0;
+ mLastArrivalTimeUs = 0;
+}
+
+void JitterCalc::putData(int64_t rtpTime, int64_t arrivalTimeUs) {
+ if (mLastTimeStamp == 0) {
+ mLastTimeStamp = rtpTime;
+ mLastArrivalTimeUs = arrivalTimeUs;
+ }
+
+ const int64_t UINT32_MSB = 0x80000000;
+ int64_t tempLastTimeStamp = mLastTimeStamp;
+ // A RTP time wraps around after UINT32_MAX. We must consider this case.
+ int64_t overflowMask = (mLastTimeStamp ^ rtpTime) & UINT32_MSB;
+ rtpTime |= ((overflowMask & ~rtpTime) << 1);
+ tempLastTimeStamp |= ((overflowMask & ~mLastTimeStamp) << 1);
+ ALOGV("Raw stamp \t\t now %llx \t\t last %llx",
+ (long long)rtpTime, (long long)tempLastTimeStamp);
+
+ int64_t diffTimeStampUs = abs(rtpTime - tempLastTimeStamp) * 1000000ll / mClockRate;
+ int64_t diffArrivalUs = abs(arrivalTimeUs - mLastArrivalTimeUs);
+ ALOGV("diffTimeStampus %lld \t\t diffArrivalUs %lld",
+ (long long)diffTimeStampUs, (long long)diffArrivalUs);
+
+ // 6.4.1 of RFC3550 defines this interarrival jitter value.
+ mJitterValueUs = (mJitterValueUs * 15 + abs(diffTimeStampUs - diffArrivalUs)) / 16;
+ ALOGV("JitterUs %lld", (long long)mJitterValueUs);
+
+ mLastTimeStamp = (uint32_t)rtpTime;
+ mLastArrivalTimeUs = arrivalTimeUs;
+}
+
+uint32_t JitterCalc::getJitterMs() {
+ return mJitterValueUs / 1000;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/JitterCalculator.h
new file mode 100644
index 0000000..03e43ff
--- /dev/null
+++ b/media/libstagefright/rtsp/JitterCalculator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_JITTER_CALCULATOR_H_
+
+#define A_JITTER_CALCULATOR_H_
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class JitterCalc : public RefBase {
+private:
+ // Time Stamp per Second
+ const int32_t mClockRate;
+
+ uint32_t mJitterValueUs;
+ uint32_t mLastTimeStamp;
+ int64_t mLastArrivalTimeUs;
+
+ void init();
+public:
+ JitterCalc(int32_t clockRate);
+ void putData(int64_t rtpTime, int64_t arrivalTime);
+ uint32_t getJitterMs();
+};
+
+} // namespace android
+
+#endif // A_JITTER_CALCULATOR_H_
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 20812bf..3562b00 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -4115,9 +4115,13 @@
// ----------------------------------------------------------------------------
-status_t AudioFlinger::onPreTransact(
- TransactionCode code, const Parcel& /* data */, uint32_t /* flags */)
-{
+status_t AudioFlinger::onTransactWrapper(TransactionCode code,
+ const Parcel& data,
+ uint32_t flags,
+ const std::function<status_t()>& delegate) {
+ (void) data;
+ (void) flags;
+
// make sure transactions reserved to AudioPolicyManager do not come from other processes
switch (code) {
case TransactionCode::SET_STREAM_VOLUME:
@@ -4150,6 +4154,7 @@
default:
return INVALID_OPERATION;
}
+ // Fail silently in these cases.
return OK;
default:
break;
@@ -4177,6 +4182,7 @@
default:
return INVALID_OPERATION;
}
+ // Fail silently in these cases.
return OK;
}
} break;
@@ -4218,7 +4224,7 @@
AudioSystem::get_audio_policy_service();
}
- return OK;
+ return delegate();
}
} // namespace android
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c66ecb0..4b03d10 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -272,7 +272,8 @@
virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
- status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
+ status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
+ const std::function<status_t()>& delegate) override;
// end of IAudioFlinger interface
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4fc60a4..d42a6ca 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -815,7 +815,7 @@
if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
@@ -835,7 +835,7 @@
if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
- if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low_frequency_2, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
@@ -855,7 +855,7 @@
if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
- if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index edcdf5a..f67ffc1 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -261,12 +261,8 @@
case STRATEGY_PHONE: {
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
if (!devices.isEmpty()) break;
- devices = availableOutputDevices.getFirstDevicesFromTypes({
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE,
- AUDIO_DEVICE_OUT_WIRED_HEADSET,
- AUDIO_DEVICE_OUT_LINE,
- AUDIO_DEVICE_OUT_USB_HEADSET,
- AUDIO_DEVICE_OUT_USB_DEVICE});
+ devices = availableOutputDevices.getFirstDevicesFromTypes(
+ getLastRemovableMediaDevices());
if (!devices.isEmpty()) break;
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_EARPIECE);
} break;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 7294a58..b4efd1a 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -80,18 +80,23 @@
audio_format_t audioFormat = getFormat();
- // FLOAT is not directly supported by the HAL so ask for a 24-bit.
- bool isHighResRequested = audioFormat == AUDIO_FORMAT_PCM_FLOAT
- || audioFormat == AUDIO_FORMAT_PCM_32_BIT;
- if (isHighResRequested) {
+ // FLOAT is not directly supported by the HAL so ask for a 32-bit.
+ if (audioFormat == AUDIO_FORMAT_PCM_FLOAT) {
// TODO remove these logs when finished debugging.
- ALOGD("%s() change format from %d to 24_BIT_PACKED", __func__, audioFormat);
- audioFormat = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ ALOGD("%s() change format from %d to 32_BIT", __func__, audioFormat);
+ audioFormat = AUDIO_FORMAT_PCM_32_BIT;
}
result = openWithFormat(audioFormat);
if (result == AAUDIO_OK) return result;
+ if (result == AAUDIO_ERROR_UNAVAILABLE && audioFormat == AUDIO_FORMAT_PCM_32_BIT) {
+ ALOGD("%s() 32_BIT failed, perhaps due to format. Try again with 24_BIT_PACKED", __func__);
+ audioFormat = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ result = openWithFormat(audioFormat);
+ }
+ if (result == AAUDIO_OK) return result;
+
// TODO The HAL and AudioFlinger should be recommending a format if the open fails.
// But that recommendation is not propagating back from the HAL.
// So for now just try something very likely to work.