Merge "transcoding: change max back-to-back running time to 2min" into sc-dev
diff --git a/apex/manifest.json b/apex/manifest.json
index b7d8fc8..c7e56be 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media",
- "version": 309999900,
+ "version": 309999910,
"requireNativeLibs": [
"libandroid.so",
"libbinder_ndk.so",
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index e20d867..d36e914 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media.swcodec",
- "version": 309999900,
+ "version": 309999910,
"requireNativeLibs": [
":sphal"
]
diff --git a/media/codec2/components/flac/C2SoftFlacDec.cpp b/media/codec2/components/flac/C2SoftFlacDec.cpp
index e70c289..49892a4 100644
--- a/media/codec2/components/flac/C2SoftFlacDec.cpp
+++ b/media/codec2/components/flac/C2SoftFlacDec.cpp
@@ -221,6 +221,11 @@
uint8_t *input = const_cast<uint8_t *>(rView.data() + inOffset);
if (codecConfig) {
+ if (mHasStreamInfo) {
+ ALOGV("Ignore Codec Config");
+ fillEmptyWork(work);
+ return;
+ }
status_t decoderErr = mFLACDecoder->parseMetadata(input, inSize);
if (decoderErr != OK && decoderErr != WOULD_BLOCK) {
ALOGE("process: FLACDecoder parseMetaData returns error %d", decoderErr);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 2599ef6..5789532 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -38,6 +38,7 @@
#include <media/omx/1.0/WOmxNode.h>
#include <media/openmax/OMX_Core.h>
#include <media/openmax/OMX_IndexExt.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/omx/1.0/WGraphicBufferSource.h>
#include <media/stagefright/omx/OmxGraphicBufferSource.h>
#include <media/stagefright/CCodec.h>
@@ -521,6 +522,44 @@
}
}
+void AmendOutputFormatWithCodecSpecificData(
+ const uint8_t *data, size_t size, const std::string &mediaType,
+ const sp<AMessage> &outputFormat) {
+ if (mediaType == MIMETYPE_VIDEO_AVC) {
+ // Codec specific data should be SPS and PPS in a single buffer,
+ // each prefixed by a startcode (0x00 0x00 0x00 0x01).
+ // We separate the two and put them into the output format
+ // under the keys "csd-0" and "csd-1".
+
+ unsigned csdIndex = 0;
+
+ const uint8_t *nalStart;
+ size_t nalSize;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ sp<ABuffer> csd = new ABuffer(nalSize + 4);
+ memcpy(csd->data(), "\x00\x00\x00\x01", 4);
+ memcpy(csd->data() + 4, nalStart, nalSize);
+
+ outputFormat->setBuffer(
+ AStringPrintf("csd-%u", csdIndex).c_str(), csd);
+
+ ++csdIndex;
+ }
+
+ if (csdIndex != 2) {
+ ALOGW("Expected two NAL units from AVC codec config, but %u found",
+ csdIndex);
+ }
+ } else {
+ // For everything else we just stash the codec specific data into
+ // the output format as a single piece of csd under "csd-0".
+ sp<ABuffer> csd = new ABuffer(size);
+ memcpy(csd->data(), data, size);
+ csd->setRange(0, size);
+ outputFormat->setBuffer("csd-0", csd);
+ }
+}
+
} // namespace
// CCodec::ClientListener
@@ -2170,7 +2209,7 @@
}
// handle configuration changes in work done
- std::unique_ptr<C2Param> initData;
+ std::shared_ptr<const C2StreamInitDataInfo::output> initData;
sp<AMessage> outputFormat = nullptr;
{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
@@ -2249,13 +2288,15 @@
config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
}
if (initDataWatcher.hasChanged()) {
- initData = C2Param::Copy(*initDataWatcher.update().get());
+ initData = initDataWatcher.update();
+ AmendOutputFormatWithCodecSpecificData(
+ initData->m.value, initData->flexCount(), config->mCodingMediaType,
+ config->mOutputFormat);
}
outputFormat = config->mOutputFormat;
}
mChannel->onWorkDone(
- std::move(work), outputFormat,
- initData ? (C2StreamInitDataInfo::output *)initData.get() : nullptr);
+ std::move(work), outputFormat, initData ? initData.get() : nullptr);
break;
}
case kWhatWatch: {
@@ -2400,6 +2441,11 @@
C2String compName;
{
Mutexed<State>::Locked state(mState);
+ if (!state->comp) {
+ ALOGD("previous call to %s exceeded timeout "
+ "and the component is already released", name.c_str());
+ return;
+ }
compName = state->comp->getName();
}
ALOGW("[%s] previous call to %s exceeded timeout", compName.c_str(), name.c_str());
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 416884e..b4e4c5d 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2587,7 +2587,9 @@
case FOURCC("dvcC"):
case FOURCC("dvvC"): {
- CHECK_EQ(chunk_data_size, 24);
+ if (chunk_data_size != 24) {
+ return ERROR_MALFORMED;
+ }
auto buffer = heapbuffer<uint8_t>(chunk_data_size);
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 7656307..389b73f 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -749,10 +749,20 @@
AudioFlingerServerAdapter::AudioFlingerServerAdapter(
const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {}
-status_t AudioFlingerServerAdapter::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+status_t AudioFlingerServerAdapter::onTransact(uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
uint32_t flags) {
- return mDelegate->onPreTransact(static_cast<Delegate::TransactionCode>(code), data, flags)
- ?: BnAudioFlingerService::onTransact(code, data, reply, flags);
+ return mDelegate->onTransactWrapper(static_cast<Delegate::TransactionCode>(code),
+ data,
+ flags,
+ [&] {
+ return BnAudioFlingerService::onTransact(
+ code,
+ data,
+ reply,
+ flags);
+ });
}
status_t AudioFlingerServerAdapter::dump(int fd, const Vector<String16>& args) {
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 3a5d164..3a04569 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -516,18 +516,22 @@
};
/**
- * And optional hook, called on every transaction, before unparceling the data and
- * dispatching to the respective method. Useful for bulk operations, such as logging or
- * permission checks.
- * If an error status is returned, the transaction will return immediately and will not be
- * processed.
+ * And optional hook, called on every transaction, allowing additional operations to be
+ * performed before/after the unparceling ofthe data and dispatching to the respective
+ * method. Useful for bulk operations, such as logging or permission checks.
+ * The implementer is responsible to invoke the provided delegate function, which is the
+ * actual onTransact(), unless an error occurs.
+ * By default, this is just a pass-through to the delegate.
*/
- virtual status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) {
+ virtual status_t onTransactWrapper(TransactionCode code,
+ const Parcel& data,
+ uint32_t flags,
+ const std::function<status_t()>& delegate) {
(void) code;
(void) data;
(void) flags;
- return OK;
- };
+ return delegate();
+ }
/**
* An optional hook for implementing diagnostics dumping.
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index b40317f..e96c041 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -33,7 +33,6 @@
relative_install_path: "soundfx",
cflags: [
- "-DBUILD_FLOAT",
"-fvisibility=hidden",
"-Wall",
"-Werror",
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 99ac4f5..5ca5525 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -31,13 +31,8 @@
// Do not submit with DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER defined, strictly for testing
//#define DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER 0
-#ifdef BUILD_FLOAT
#define MINUS_3_DB_IN_FLOAT 0.70710678f // -3dB = 0.70710678f
const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_FLOAT;
-#else
-#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
-const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_16_BIT;
-#endif
// subset of possible audio_channel_mask_t values, and AUDIO_CHANNEL_OUT_* renamed to CHANNEL_MASK_*
typedef enum {
@@ -88,7 +83,7 @@
// number of effects in this library
const int kNbEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
-#ifdef BUILD_FLOAT
+
static LVM_FLOAT clamp_float(LVM_FLOAT a) {
if (a > 1.0f) {
return 1.0f;
@@ -100,7 +95,7 @@
return a;
}
}
-#endif
+
/*----------------------------------------------------------------------------
* Test code
*--------------------------------------------------------------------------*/
@@ -303,106 +298,6 @@
return -EINVAL;
}
-#ifndef BUILD_FLOAT
-/*--- Effect Control Interface Implementation ---*/
-
-static int Downmix_Process(effect_handle_t self,
- audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
-
- downmix_object_t *pDownmixer;
- int16_t *pSrc, *pDst;
- downmix_module_t *pDwmModule = (downmix_module_t *)self;
-
- if (pDwmModule == NULL) {
- return -EINVAL;
- }
-
- if (inBuffer == NULL || inBuffer->raw == NULL ||
- outBuffer == NULL || outBuffer->raw == NULL ||
- inBuffer->frameCount != outBuffer->frameCount) {
- return -EINVAL;
- }
-
- pDownmixer = (downmix_object_t*) &pDwmModule->context;
-
- if (pDownmixer->state == DOWNMIX_STATE_UNINITIALIZED) {
- ALOGE("Downmix_Process error: trying to use an uninitialized downmixer");
- return -EINVAL;
- } else if (pDownmixer->state == DOWNMIX_STATE_INITIALIZED) {
- ALOGE("Downmix_Process error: trying to use a non-configured downmixer");
- return -ENODATA;
- }
-
- pSrc = inBuffer->s16;
- pDst = outBuffer->s16;
- size_t numFrames = outBuffer->frameCount;
-
- const bool accumulate =
- (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
-
- switch(pDownmixer->type) {
-
- case DOWNMIX_TYPE_STRIP:
- if (accumulate) {
- while (numFrames) {
- pDst[0] = clamp16(pDst[0] + pSrc[0]);
- pDst[1] = clamp16(pDst[1] + pSrc[1]);
- pSrc += pDownmixer->input_channel_count;
- pDst += 2;
- numFrames--;
- }
- } else {
- while (numFrames) {
- pDst[0] = pSrc[0];
- pDst[1] = pSrc[1];
- pSrc += pDownmixer->input_channel_count;
- pDst += 2;
- numFrames--;
- }
- }
- break;
-
- case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
- // bypass the optimized downmix routines for the common formats
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
- return -EINVAL;
- }
- break;
-#endif
- // optimize for the common formats
- switch((downmix_input_channel_mask_t)downmixInputChannelMask) {
- case CHANNEL_MASK_QUAD_BACK:
- case CHANNEL_MASK_QUAD_SIDE:
- Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
- break;
- case CHANNEL_MASK_5POINT1_BACK:
- case CHANNEL_MASK_5POINT1_SIDE:
- Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
- break;
- case CHANNEL_MASK_7POINT1:
- Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
- break;
- default:
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
- return -EINVAL;
- }
- break;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-#else /*BUILD_FLOAT*/
/*--- Effect Control Interface Implementation ---*/
static int Downmix_Process(effect_handle_t self,
@@ -503,7 +398,6 @@
return 0;
}
-#endif
static int Downmix_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
void *pCmdData, uint32_t *replySize, void *pReplyData) {
@@ -940,35 +834,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is RL
- // sample at index 3 is RR
- if (accumulate) {
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp16(pDst[0] + ((pSrc[0] + pSrc[2]) >> 1));
- // FR + RR
- pDst[1] = clamp16(pDst[1] + ((pSrc[1] + pSrc[3]) >> 1));
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp16((pSrc[0] + pSrc[2]) >> 1);
- // FR + RR
- pDst[1] = clamp16((pSrc[1] + pSrc[3]) >> 1);
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
// sample at index 0 is FL
// sample at index 1 is FR
@@ -996,7 +861,6 @@
}
}
}
-#endif
/*----------------------------------------------------------------------------
* Downmix_foldFrom5Point1()
@@ -1015,52 +879,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
- // accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
// sample at index 0 is FL
@@ -1105,7 +923,6 @@
}
}
}
-#endif
/*----------------------------------------------------------------------------
* Downmix_foldFrom7Point1()
@@ -1124,54 +941,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // sample at index 6 is SL
- // sample at index 7 is SR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + SL + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + SR + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
- //accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
- + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
- // FL + centerPlusLfeContrib + SL + RL
- lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
- // FR + centerPlusLfeContrib + SR + RR
- rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- }
-}
-#else
void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
// sample at index 0 is FL
@@ -1218,7 +987,7 @@
}
}
}
-#endif
+
/*----------------------------------------------------------------------------
* Downmix_foldGeneric()
*----------------------------------------------------------------------------
@@ -1245,99 +1014,6 @@
*
*----------------------------------------------------------------------------
*/
-#ifndef BUILD_FLOAT
-bool Downmix_foldGeneric(
- uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-
- if (!Downmix_validChannelMask(mask)) {
- return false;
- }
-
- const bool hasSides = (mask & kSides) != 0;
- const bool hasBacks = (mask & kBacks) != 0;
-
- const int numChan = audio_channel_count_from_out_mask(mask);
- const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
- const bool hasLFE =
- ((mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY);
- const bool hasBC = ((mask & AUDIO_CHANNEL_OUT_BACK_CENTER) == AUDIO_CHANNEL_OUT_BACK_CENTER);
- // compute at what index each channel is: samples will be in the following order:
- // FL FR FC LFE BL BR BC SL SR
- // when a channel is not present, its index is set to the same as the index of the preceding
- // channel
- const int indexFC = hasFC ? 2 : 1; // front center
- const int indexLFE = hasLFE ? indexFC + 1 : indexFC; // low frequency
- const int indexBL = hasBacks ? indexLFE + 1 : indexLFE; // back left
- const int indexBR = hasBacks ? indexBL + 1 : indexBL; // back right
- const int indexBC = hasBC ? indexBR + 1 : indexBR; // back center
- const int indexSL = hasSides ? indexBC + 1 : indexBC; // side left
- const int indexSR = hasSides ? indexSL + 1 : indexSL; // side right
-
- int32_t lt, rt, centersLfeContrib; // samples in Q19.12 format
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- centersLfeContrib = 0;
- if (hasFC) { centersLfeContrib += pSrc[indexFC]; }
- if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
- if (hasBC) { centersLfeContrib += pSrc[indexBC]; }
- centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
- // always has FL/FR
- lt = (pSrc[0] << 12);
- rt = (pSrc[1] << 12);
- // mix in sides and backs
- if (hasSides) {
- lt += pSrc[indexSL] << 12;
- rt += pSrc[indexSR] << 12;
- }
- if (hasBacks) {
- lt += pSrc[indexBL] << 12;
- rt += pSrc[indexBR] << 12;
- }
- lt += centersLfeContrib;
- rt += centersLfeContrib;
- // accumulate in destination
- pDst[0] = clamp16(pDst[0] + (lt >> 13));
- pDst[1] = clamp16(pDst[1] + (rt >> 13));
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- } else {
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- centersLfeContrib = 0;
- if (hasFC) { centersLfeContrib += pSrc[indexFC]; }
- if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
- if (hasBC) { centersLfeContrib += pSrc[indexBC]; }
- centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
- // always has FL/FR
- lt = (pSrc[0] << 12);
- rt = (pSrc[1] << 12);
- // mix in sides and backs
- if (hasSides) {
- lt += pSrc[indexSL] << 12;
- rt += pSrc[indexSR] << 12;
- }
- if (hasBacks) {
- lt += pSrc[indexBL] << 12;
- rt += pSrc[indexBR] << 12;
- }
- lt += centersLfeContrib;
- rt += centersLfeContrib;
- // store in destination
- pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
- pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- }
- return true;
-}
-#else
bool Downmix_foldGeneric(
uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
@@ -1429,4 +1105,3 @@
}
return true;
}
-#endif
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
index c1be0f2..679a855 100644
--- a/media/libeffects/downmix/EffectDownmix.h
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -27,9 +27,8 @@
*/
#define DOWNMIX_OUTPUT_CHANNELS AUDIO_CHANNEL_OUT_STEREO
-#ifdef BUILD_FLOAT
#define LVM_FLOAT float
-#endif
+
typedef enum {
DOWNMIX_STATE_UNINITIALIZED,
DOWNMIX_STATE_INITIALIZED,
@@ -97,18 +96,10 @@
int Downmix_Reset(downmix_object_t *pDownmixer, bool init);
int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-#ifdef BUILD_FLOAT
void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
bool Downmix_foldGeneric(
uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
-#else
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-bool Downmix_foldGeneric(
- uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-#endif
#endif /*ANDROID_EFFECTDOWNMIX_H_*/
diff --git a/media/libmediaformatshaper/CodecSeeding.cpp b/media/libmediaformatshaper/CodecSeeding.cpp
index 2f2e29d..cc241f4 100644
--- a/media/libmediaformatshaper/CodecSeeding.cpp
+++ b/media/libmediaformatshaper/CodecSeeding.cpp
@@ -44,8 +44,7 @@
} preloadTunings_t;
/*
- * 240 = 2.4 bits per pixel-per-second == 5mbps@1080, 2.3mbps@720p, which is about where
- * we want our initial floor for now.
+ * bpp == bits per pixel per second, for 30fps.
*/
static preloadTuning_t featuresAvc[] = {
@@ -69,11 +68,12 @@
{true, "vq-target-bpp-1080p", "1.50"},
{true, "vq-target-bpp-720p", "1.80"},
{true, "vq-target-bpp-540p", "2.10"},
+ {true, "vq-target-bpp-480p", "2.30"},
{true, "vq-target-qpmax", "-1"},
{true, "vq-target-qpmax-1080p", "45"},
- {true, "vq-target-qpmax-720p", "43"},
- {true, "vq-target-qpmax-540p", "42"},
- {true, "vq-target-qpmax-480p", "39"},
+ {true, "vq-target-qpmax-720p", "44"},
+ {true, "vq-target-qpmax-540p", "43"},
+ {true, "vq-target-qpmax-480p", "42"},
{true, "vq-bitrate-phaseout", "1.75"},
{true, "vq-boost-missing-qp", "0.20"},
{true, nullptr, 0}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 5b60bbf..2c1f158 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -302,7 +302,7 @@
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
mCodec = MediaCodec::CreateByType(
- mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid);
+ mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid, format);
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8a34d1a..57bdba0 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -638,12 +638,20 @@
sp<MediaCodec> MediaCodec::CreateByType(
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
uid_t uid) {
+ sp<AMessage> format;
+ return CreateByType(looper, mime, encoder, err, pid, uid, format);
+}
+
+sp<MediaCodec> MediaCodec::CreateByType(
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
+ uid_t uid, sp<AMessage> format) {
Vector<AString> matchingCodecs;
MediaCodecList::findMatchingCodecs(
mime.c_str(),
encoder,
0,
+ format,
&matchingCodecs);
if (err != NULL) {
@@ -1579,16 +1587,12 @@
// the reclaimResource call doesn't consider the requester's buffer size for now.
resources.push_back(MediaResource::GraphicMemoryResource(1));
for (int i = 0; i <= kMaxRetry; ++i) {
- if (i > 0) {
- // Don't try to reclaim resource for the first time.
- if (!mResourceManagerProxy->reclaimResource(resources)) {
- break;
- }
- }
-
sp<AMessage> response;
err = PostAndAwaitResponse(msg, &response);
if (err != OK && err != INVALID_OPERATION) {
+ if (isResourceError(err) && !mResourceManagerProxy->reclaimResource(resources)) {
+ break;
+ }
// MediaCodec now set state to UNINITIALIZED upon any fatal error.
// To maintain backward-compatibility, do a reset() to put codec
// back into INITIALIZED state.
@@ -4329,7 +4333,8 @@
// format as necessary.
int32_t flags = 0;
(void) buffer->meta()->findInt32("flags", &flags);
- if ((flags & BUFFER_FLAG_CODECCONFIG) && !(mFlags & kFlagIsSecure)) {
+ if ((flags & BUFFER_FLAG_CODECCONFIG) && !(mFlags & kFlagIsSecure)
+ && !mOwnerName.startsWith("codec2::")) {
status_t err =
amendOutputFormatWithCodecSpecificData(buffer);
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 799ca0d..6243828 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -44,6 +44,7 @@
#include <cutils/properties.h>
#include <algorithm>
+#include <regex>
namespace android {
@@ -348,6 +349,14 @@
void MediaCodecList::findMatchingCodecs(
const char *mime, bool encoder, uint32_t flags,
Vector<AString> *matches) {
+ sp<AMessage> format; // initializes as clear/null
+ findMatchingCodecs(mime, encoder, flags, format, matches);
+}
+
+//static
+void MediaCodecList::findMatchingCodecs(
+ const char *mime, bool encoder, uint32_t flags, sp<AMessage> format,
+ Vector<AString> *matches) {
matches->clear();
const sp<IMediaCodecList> list = getInstance();
@@ -368,14 +377,22 @@
const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
CHECK(info != nullptr);
+
AString componentName = info->getCodecName();
+ if (!codecHandlesFormat(mime, info, format)) {
+ ALOGV("skipping codec '%s' which doesn't satisfy format %s",
+ componentName.c_str(), format->debugString(2).c_str());
+ continue;
+ }
+
if ((flags & kHardwareCodecsOnly) && isSoftwareCodec(componentName)) {
ALOGV("skipping SW codec '%s'", componentName.c_str());
- } else {
- matches->push(componentName);
- ALOGV("matching '%s'", componentName.c_str());
+ continue;
}
+
+ matches->push(componentName);
+ ALOGV("matching '%s'", componentName.c_str());
}
if (flags & kPreferSoftwareCodecs ||
@@ -384,4 +401,118 @@
}
}
+/*static*/
+bool MediaCodecList::codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info,
+ sp<AMessage> format) {
+
+ if (format == nullptr) {
+ ALOGD("codecHandlesFormat: no format, so no extra checks");
+ return true;
+ }
+
+ sp<MediaCodecInfo::Capabilities> capabilities = info->getCapabilitiesFor(mime);
+
+ // ... no capabilities listed means 'handle it all'
+ if (capabilities == nullptr) {
+ ALOGD("codecHandlesFormat: no capabilities for refinement");
+ return true;
+ }
+
+ const sp<AMessage> &details = capabilities->getDetails();
+
+ // if parsing the capabilities fails, ignore this particular codec
+ // currently video-centric evaluation
+ //
+ // TODO: like to make it handle the same set of properties from
+ // MediaCodecInfo::isFormatSupported()
+ // not yet done here are:
+ // profile, level, bitrate, features,
+
+ bool isVideo = false;
+ if (strncmp(mime, "video/", 6) == 0) {
+ isVideo = true;
+ }
+
+ if (isVideo) {
+ int width = -1;
+ int height = -1;
+
+ if (format->findInt32("height", &height) && format->findInt32("width", &width)) {
+
+ // is it within the supported size range of the codec?
+ AString sizeRange;
+ AString minSize,maxSize;
+ AString minWidth, minHeight;
+ AString maxWidth, maxHeight;
+ if (!details->findString("size-range", &sizeRange)
+ || !splitString(sizeRange, "-", &minSize, &maxSize)) {
+ ALOGW("Unable to parse size-range from codec info");
+ return false;
+ }
+ if (!splitString(minSize, "x", &minWidth, &minHeight)) {
+ if (!splitString(minSize, "*", &minWidth, &minHeight)) {
+ ALOGW("Unable to parse size-range/min-size from codec info");
+ return false;
+ }
+ }
+ if (!splitString(maxSize, "x", &maxWidth, &maxHeight)) {
+ if (!splitString(maxSize, "*", &maxWidth, &maxHeight)) {
+ ALOGW("Unable to fully parse size-range/max-size from codec info");
+ return false;
+ }
+ }
+
+ // strtol() returns 0 if unable to parse a number, which works for our later tests
+ int minW = strtol(minWidth.c_str(), NULL, 10);
+ int minH = strtol(minHeight.c_str(), NULL, 10);
+ int maxW = strtol(maxWidth.c_str(), NULL, 10);
+ int maxH = strtol(maxHeight.c_str(), NULL, 10);
+
+ if (minW == 0 || minH == 0 || maxW == 0 || maxH == 0) {
+ ALOGW("Unable to parse values from size-range from codec info");
+ return false;
+ }
+
+ // finally, comparison time
+ if (width < minW || width > maxW || height < minH || height > maxH) {
+ ALOGV("format %dx%d outside of allowed %dx%d-%dx%d",
+ width, height, minW, minH, maxW, maxH);
+ // at this point, it's a rejection, UNLESS
+ // the codec allows swapping width and height
+ int32_t swappable;
+ if (!details->findInt32("feature-can-swap-width-height", &swappable)
+ || swappable == 0) {
+ return false;
+ }
+ // NB: deliberate comparison of height vs width limits (and width vs height)
+ if (height < minW || height > maxW || width < minH || width > maxH) {
+ return false;
+ }
+ }
+
+ // @ 'alignment' [e.g. "2x2" which tells us that both dimensions must be even]
+ // no alignment == we're ok with anything
+ AString alignment, alignWidth, alignHeight;
+ if (details->findString("alignment", &alignment)) {
+ if (splitString(alignment, "x", &alignWidth, &alignHeight) ||
+ splitString(alignment, "*", &alignWidth, &alignHeight)) {
+ int wAlign = strtol(alignWidth.c_str(), NULL, 10);
+ int hAlign = strtol(alignHeight.c_str(), NULL, 10);
+ // strtol() returns 0 if failing to parse, treat as "no restriction"
+ if (wAlign > 0 && hAlign > 0) {
+ if ((width % wAlign) != 0 || (height % hAlign) != 0) {
+ ALOGV("format dimensions %dx%d not aligned to %dx%d",
+ width, height, wAlign, hAlign);
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // haven't found a reason to discard this one
+ return true;
+}
+
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 0584054..3517bae 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -104,6 +104,10 @@
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
pid_t pid = kNoPid, uid_t uid = kNoUid);
+ static sp<MediaCodec> CreateByType(
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err,
+ pid_t pid, uid_t uid, sp<AMessage> format);
+
static sp<MediaCodec> CreateByComponentName(
const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
pid_t pid = kNoPid, uid_t uid = kNoUid);
@@ -400,6 +404,7 @@
std::string mLastReplyOrigin;
std::vector<sp<AMessage>> mDeferredMessages;
uint32_t mFlags;
+ int64_t mPresentationTimeUs = 0;
status_t mStickyError;
sp<Surface> mSurface;
SoftwareRenderer *mSoftRenderer;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index 78d1005..3cf455c 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -75,6 +75,16 @@
uint32_t flags,
Vector<AString> *matchingCodecs);
+ // add optional format, to further refine matching codecs
+ static void findMatchingCodecs(
+ const char *mime,
+ bool createEncoder,
+ uint32_t flags,
+ sp<AMessage> format,
+ Vector<AString> *matchingCodecs);
+
+ static bool codecHandlesFormat(const char *mime, sp<MediaCodecInfo> info, sp<AMessage> format);
+
static bool isSoftwareCodec(const AString &componentName);
private:
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 20812bf..3562b00 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -4115,9 +4115,13 @@
// ----------------------------------------------------------------------------
-status_t AudioFlinger::onPreTransact(
- TransactionCode code, const Parcel& /* data */, uint32_t /* flags */)
-{
+status_t AudioFlinger::onTransactWrapper(TransactionCode code,
+ const Parcel& data,
+ uint32_t flags,
+ const std::function<status_t()>& delegate) {
+ (void) data;
+ (void) flags;
+
// make sure transactions reserved to AudioPolicyManager do not come from other processes
switch (code) {
case TransactionCode::SET_STREAM_VOLUME:
@@ -4150,6 +4154,7 @@
default:
return INVALID_OPERATION;
}
+ // Fail silently in these cases.
return OK;
default:
break;
@@ -4177,6 +4182,7 @@
default:
return INVALID_OPERATION;
}
+ // Fail silently in these cases.
return OK;
}
} break;
@@ -4218,7 +4224,7 @@
AudioSystem::get_audio_policy_service();
}
- return OK;
+ return delegate();
}
} // namespace android
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c66ecb0..4b03d10 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -272,7 +272,8 @@
virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
- status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
+ status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
+ const std::function<status_t()>& delegate) override;
// end of IAudioFlinger interface
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 51f3032..d42a6ca 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -815,7 +815,7 @@
if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
- if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
@@ -828,12 +828,16 @@
if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
- if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
- if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
- if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
- if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, " );
- if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, " );
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
+ if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
+ if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
+ if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
+ if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
+ if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
} else {
if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -851,9 +855,9 @@
if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
- if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
- if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, " );
- if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, " );
+ if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
+ if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
+ if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
@@ -7301,19 +7305,29 @@
// the only active track
// 2) invalidate this track: this will cause the client to reconnect and possibly
// be invalidated again until unsilenced
+ bool invalidate = false;
if (activeTrack->isSilenced()) {
if (size > 1) {
- activeTrack->invalidate();
- ALOG_ASSERT(fastTrackToRemove == 0);
- fastTrackToRemove = activeTrack;
- removeTrack_l(activeTrack);
- mActiveTracks.remove(activeTrack);
- size--;
- continue;
+ invalidate = true;
} else {
silenceFastCapture = true;
}
}
+ // Invalidate fast tracks if access to audio history is required as this is not
+ // possible with fast tracks. Once the fast track has been invalidated, no new
+ // fast track will be created until mMaxSharedAudioHistoryMs is cleared.
+ if (mMaxSharedAudioHistoryMs != 0) {
+ invalidate = true;
+ }
+ if (invalidate) {
+ activeTrack->invalidate();
+ ALOG_ASSERT(fastTrackToRemove == 0);
+ fastTrackToRemove = activeTrack;
+ removeTrack_l(activeTrack);
+ mActiveTracks.remove(activeTrack);
+ size--;
+ continue;
+ }
fastTrack = activeTrack;
}
@@ -7833,12 +7847,6 @@
lStatus = PERMISSION_DENIED;
goto Exit;
}
- //TODO: b/185972521 allow resampling buffer resizing on fast mixers by pausing
- // the fast mixer thread while resizing the buffer in the normal thread
- if (hasFastCapture()) {
- lStatus = BAD_VALUE;
- goto Exit;
- }
if (maxSharedAudioHistoryMs < 0
|| maxSharedAudioHistoryMs > AudioFlinger::kMaxSharedAudioHistoryMs) {
lStatus = BAD_VALUE;
@@ -7850,8 +7858,9 @@
}
sampleRate = *pSampleRate;
- // special case for FAST flag considered OK if fast capture is present
- if (hasFastCapture()) {
+ // special case for FAST flag considered OK if fast capture is present and access to
+ // audio history is not required
+ if (hasFastCapture() && mMaxSharedAudioHistoryMs == 0) {
inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
}
@@ -7863,8 +7872,9 @@
*flags = (audio_input_flags_t)(*flags & inputFlags);
}
- // client expresses a preference for FAST, but we get the final say
- if (*flags & AUDIO_INPUT_FLAG_FAST) {
+ // client expresses a preference for FAST and no access to audio history,
+ // but we get the final say
+ if (*flags & AUDIO_INPUT_FLAG_FAST && maxSharedAudioHistoryMs == 0) {
if (
// we formerly checked for a callback handler (non-0 tid),
// but that is no longer required for TRANSFER_OBTAIN mode
@@ -7984,7 +7994,6 @@
if (maxSharedAudioHistoryMs != 0) {
sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
}
-
}
lStatus = NO_ERROR;
@@ -8215,9 +8224,6 @@
status_t AudioFlinger::RecordThread::shareAudioHistory_l(
const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
int64_t sharedAudioStartMs) {
- if (hasFastCapture()) {
- return BAD_VALUE;
- }
if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
return BAD_VALUE;
}
@@ -8460,6 +8466,7 @@
// FIXME if client not keeping up, discard
LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames));
// 'filled' may be non-contiguous, so return only the first contiguous chunk
+
front &= recordThread->mRsmpInFramesP2 - 1;
size_t part1 = recordThread->mRsmpInFramesP2 - front;
if (part1 > (size_t) filled) {
@@ -8674,7 +8681,7 @@
// mRsmpInFrames must be 0 before calling resizeInputBuffer_l for the first time
mRsmpInFrames = 0;
- resizeInputBuffer_l();
+ resizeInputBuffer_l(0 /*maxSharedAudioHistoryMs*/);
// AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
// But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -8915,6 +8922,10 @@
int32_t previousRear = mRsmpInRear;
mRsmpInRear = 0;
+ ALOG_ASSERT(maxSharedAudioHistoryMs >= 0
+ && maxSharedAudioHistoryMs <= AudioFlinger::kMaxSharedAudioHistoryMs,
+ "resizeInputBuffer_l() called with invalid max shared history %d",
+ maxSharedAudioHistoryMs);
if (maxSharedAudioHistoryMs != 0) {
// resizeInputBuffer_l should never be called with a non zero shared history if the
// buffer was not already allocated
@@ -8927,6 +8938,7 @@
}
mRsmpInFrames = rsmpInFrames;
}
+ mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
// Note: mRsmpInFrames is 0 when called with maxSharedAudioHistoryMs equals to 0 so it is always
// initialized
if (mRsmpInFrames < minRsmpInFrames) {
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b6f7f24..65db986 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -338,7 +338,7 @@
virtual void updateOutDevices(const DeviceDescriptorBaseVector& outDevices);
virtual void toAudioPortConfig(struct audio_port_config *config) = 0;
- virtual void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs = 0);
+ virtual void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs);
@@ -1717,7 +1717,7 @@
audio_patch_handle_t *handle);
virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
void updateOutDevices(const DeviceDescriptorBaseVector& outDevices) override;
- void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs = 0) override;
+ void resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs) override;
void addPatchTrack(const sp<PatchRecord>& record);
void deletePatchTrack(const sp<PatchRecord>& record);
@@ -1862,6 +1862,7 @@
DeviceDescriptorBaseVector mOutDevices;
+ int32_t mMaxSharedAudioHistoryMs = 0;
std::string mSharedAudioPackageName = {};
int32_t mSharedAudioStartFrames = -1;
audio_session_t mSharedAudioSessionId = AUDIO_SESSION_NONE;
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index edcdf5a..f67ffc1 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -261,12 +261,8 @@
case STRATEGY_PHONE: {
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
if (!devices.isEmpty()) break;
- devices = availableOutputDevices.getFirstDevicesFromTypes({
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE,
- AUDIO_DEVICE_OUT_WIRED_HEADSET,
- AUDIO_DEVICE_OUT_LINE,
- AUDIO_DEVICE_OUT_USB_HEADSET,
- AUDIO_DEVICE_OUT_USB_DEVICE});
+ devices = availableOutputDevices.getFirstDevicesFromTypes(
+ getLastRemovableMediaDevices());
if (!devices.isEmpty()) break;
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_EARPIECE);
} break;
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 07c889b..c28c24b 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -82,6 +82,7 @@
"device3/RotateAndCropMapper.cpp",
"device3/Camera3OutputStreamInterface.cpp",
"device3/Camera3OutputUtils.cpp",
+ "device3/Camera3DeviceInjectionMethods.cpp",
"gui/RingBufferConsumer.cpp",
"hidl/AidlCameraDeviceCallbacks.cpp",
"hidl/AidlCameraServiceListener.cpp",
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d93b9e5..d05a2e1 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -49,6 +49,7 @@
#include <utils/Timers.h>
#include <cutils/properties.h>
+#include <android/hardware/camera/device/3.7/ICameraInjectionSession.h>
#include <android/hardware/camera2/ICameraDeviceUser.h>
#include "utils/CameraTraces.h"
@@ -358,6 +359,8 @@
}
}
+ mInjectionMethods = new Camera3DeviceInjectionMethods(this);
+
return OK;
}
@@ -431,6 +434,10 @@
mStatusTracker->join();
}
+ if (mInjectionMethods->isInjecting()) {
+ mInjectionMethods->stopInjection();
+ }
+
HalInterface* interface;
{
Mutex::Autolock l(mLock);
@@ -1829,7 +1836,6 @@
return res;
}
-
void Camera3Device::internalUpdateStatusLocked(Status status) {
mStatus = status;
mRecentStatusUpdates.add(mStatus);
@@ -2820,6 +2826,19 @@
mRequestBufferSM.onStreamsConfigured();
}
+ // Since the streams configuration of the injection camera is based on the internal camera, we
+ // must wait until the internal camera configure streams before calling injectCamera() to
+ // configure the injection streams.
+ if (mInjectionMethods->isInjecting()) {
+ ALOGV("%s: Injection camera %s: Start to configure streams.",
+ __FUNCTION__, mInjectionMethods->getInjectedCamId().string());
+ res = mInjectionMethods->injectCamera(config, bufferSizes);
+ if (res != OK) {
+ ALOGE("Can't finish inject camera process!");
+ return res;
+ }
+ }
+
return OK;
}
@@ -3524,6 +3543,146 @@
return res;
}
+status_t Camera3Device::HalInterface::configureInjectedStreams(
+ const camera_metadata_t* sessionParams, camera_stream_configuration* config,
+ const std::vector<uint32_t>& bufferSizes,
+ const CameraMetadata& cameraCharacteristics) {
+ ATRACE_NAME("InjectionCameraHal::configureStreams");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (config->input_is_multi_resolution) {
+ ALOGE("%s: Injection camera device doesn't support multi-resolution input "
+ "stream", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // Convert stream config to HIDL
+ std::set<int> activeStreams;
+ device::V3_2::StreamConfiguration requestedConfiguration3_2;
+ device::V3_4::StreamConfiguration requestedConfiguration3_4;
+ device::V3_7::StreamConfiguration requestedConfiguration3_7;
+ requestedConfiguration3_2.streams.resize(config->num_streams);
+ requestedConfiguration3_4.streams.resize(config->num_streams);
+ requestedConfiguration3_7.streams.resize(config->num_streams);
+ for (size_t i = 0; i < config->num_streams; i++) {
+ device::V3_2::Stream& dst3_2 = requestedConfiguration3_2.streams[i];
+ device::V3_4::Stream& dst3_4 = requestedConfiguration3_4.streams[i];
+ device::V3_7::Stream& dst3_7 = requestedConfiguration3_7.streams[i];
+ camera3::camera_stream_t* src = config->streams[i];
+
+ Camera3Stream* cam3stream = Camera3Stream::cast(src);
+ cam3stream->setBufferFreedListener(this);
+ int streamId = cam3stream->getId();
+ StreamType streamType;
+ switch (src->stream_type) {
+ case CAMERA_STREAM_OUTPUT:
+ streamType = StreamType::OUTPUT;
+ break;
+ case CAMERA_STREAM_INPUT:
+ streamType = StreamType::INPUT;
+ break;
+ default:
+ ALOGE("%s: Stream %d: Unsupported stream type %d", __FUNCTION__,
+ streamId, config->streams[i]->stream_type);
+ return BAD_VALUE;
+ }
+ dst3_2.id = streamId;
+ dst3_2.streamType = streamType;
+ dst3_2.width = src->width;
+ dst3_2.height = src->height;
+ dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
+ dst3_2.rotation =
+ mapToStreamRotation((camera_stream_rotation_t)src->rotation);
+ // For HidlSession version 3.5 or newer, the format and dataSpace sent
+ // to HAL are original, not the overridden ones.
+ if (mHidlSession_3_5 != nullptr) {
+ dst3_2.format = mapToPixelFormat(cam3stream->isFormatOverridden()
+ ? cam3stream->getOriginalFormat()
+ : src->format);
+ dst3_2.dataSpace =
+ mapToHidlDataspace(cam3stream->isDataSpaceOverridden()
+ ? cam3stream->getOriginalDataSpace()
+ : src->data_space);
+ } else {
+ dst3_2.format = mapToPixelFormat(src->format);
+ dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
+ }
+ dst3_4.v3_2 = dst3_2;
+ dst3_4.bufferSize = bufferSizes[i];
+ if (src->physical_camera_id != nullptr) {
+ dst3_4.physicalCameraId = src->physical_camera_id;
+ }
+ dst3_7.v3_4 = dst3_4;
+ dst3_7.groupId = cam3stream->getHalStreamGroupId();
+ dst3_7.sensorPixelModesUsed.resize(src->sensor_pixel_modes_used.size());
+ size_t j = 0;
+ for (int mode : src->sensor_pixel_modes_used) {
+ dst3_7.sensorPixelModesUsed[j++] =
+ static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
+ }
+ activeStreams.insert(streamId);
+ // Create Buffer ID map if necessary
+ mBufferRecords.tryCreateBufferCache(streamId);
+ }
+ // remove BufferIdMap for deleted streams
+ mBufferRecords.removeInactiveBufferCaches(activeStreams);
+
+ StreamConfigurationMode operationMode;
+ res = mapToStreamConfigurationMode(
+ (camera_stream_configuration_mode_t)config->operation_mode,
+ /*out*/ &operationMode);
+ if (res != OK) {
+ return res;
+ }
+ requestedConfiguration3_7.operationMode = operationMode;
+ size_t sessionParamSize = get_camera_metadata_size(sessionParams);
+ requestedConfiguration3_7.operationMode = operationMode;
+ requestedConfiguration3_7.sessionParams.setToExternal(
+ reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+ sessionParamSize);
+
+ // See which version of HAL we have
+ if (mHidlSession_3_7 != nullptr) {
+ requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
+ requestedConfiguration3_7.multiResolutionInputImage =
+ config->input_is_multi_resolution;
+
+ const camera_metadata_t* rawMetadata = cameraCharacteristics.getAndLock();
+ ::android::hardware::camera::device::V3_2::CameraMetadata hidlChars = {};
+ hidlChars.setToExternal(
+ reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(rawMetadata)),
+ get_camera_metadata_size(rawMetadata));
+ cameraCharacteristics.unlock(rawMetadata);
+
+ sp<hardware::camera::device::V3_7::ICameraInjectionSession>
+ hidlInjectionSession_3_7;
+ auto castInjectionResult_3_7 =
+ device::V3_7::ICameraInjectionSession::castFrom(mHidlSession_3_7);
+ if (castInjectionResult_3_7.isOk()) {
+ hidlInjectionSession_3_7 = castInjectionResult_3_7;
+ } else {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__,
+ castInjectionResult_3_7.description().c_str());
+ return DEAD_OBJECT;
+ }
+
+ auto err = hidlInjectionSession_3_7->configureInjectionStreams(
+ requestedConfiguration3_7, hidlChars);
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__,
+ err.description().c_str());
+ return DEAD_OBJECT;
+ }
+ } else {
+ ALOGE("%s: mHidlSession_3_7 does not exist, the lowest version of injection "
+ "session is 3.7", __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ return res;
+}
+
status_t Camera3Device::HalInterface::wrapAsHidlRequest(camera_capture_request_t* request,
/*out*/device::V3_2::CaptureRequest* captureRequest,
/*out*/std::vector<native_handle_t*>* handlesCreated,
@@ -5724,6 +5883,18 @@
return changed;
}
+status_t Camera3Device::RequestThread::setHalInterface(
+ sp<HalInterface> newHalInterface) {
+ if (newHalInterface.get() == nullptr) {
+ ALOGE("%s: The newHalInterface does not exist!", __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ mInterface = newHalInterface;
+
+ return OK;
+}
+
/**
* PreparerThread inner class methods
*/
@@ -6367,4 +6538,58 @@
return mRequestThread->setCameraMute(enabled);
}
+status_t Camera3Device::injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) {
+ ALOGI("%s Injection camera: injectedCamId = %s", __FUNCTION__, injectedCamId.string());
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+
+ status_t res = NO_ERROR;
+ if (mInjectionMethods->isInjecting()) {
+ if (injectedCamId == mInjectionMethods->getInjectedCamId()) {
+ return OK;
+ } else {
+ res = mInjectionMethods->stopInjection();
+ if (res != OK) {
+ ALOGE("%s: Failed to stop the injection camera! ret != NO_ERROR: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+ }
+ }
+
+ res = mInjectionMethods->injectionInitialize(injectedCamId, manager, this);
+ if (res != OK) {
+ ALOGE("%s: Failed to initialize the injection camera! ret != NO_ERROR: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+
+ camera3::camera_stream_configuration injectionConfig;
+ std::vector<uint32_t> injectionBufferSizes;
+ mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
+ // When the second display of android is cast to the remote device, and the opened camera is
+ // also cast to the second display, in this case, because the camera has configured the streams
+ // at this time, we can directly call injectCamera() to replace the internal camera with
+ // injection camera.
+ if (mOperatingMode >= 0 && injectionConfig.num_streams > 0
+ && injectionBufferSizes.size() > 0) {
+ ALOGV("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+ res = mInjectionMethods->injectCamera(
+ injectionConfig, injectionBufferSizes);
+ if (res != OK) {
+ ALOGE("Can't finish inject camera process!");
+ return res;
+ }
+ }
+
+ return OK;
+}
+
+status_t Camera3Device::stopInjection() {
+ ALOGI("%s: Injection camera: stopInjection", __FUNCTION__);
+ Mutex::Autolock il(mInterfaceLock);
+ return mInjectionMethods->stopInjection();
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d9e89fd..f962c78 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -263,6 +263,18 @@
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
/**
+ * The injection camera session to replace the internal camera
+ * session.
+ */
+ status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager);
+
+ /**
+ * Stop the injection camera and restore to internal camera session.
+ */
+ status_t stopInjection();
+
+ /**
* Helper functions to map between framework and HIDL values
*/
static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
@@ -363,6 +375,13 @@
/*inout*/ camera_stream_configuration_t *config,
const std::vector<uint32_t>& bufferSizes);
+ // The injection camera configures the streams to hal.
+ status_t configureInjectedStreams(
+ const camera_metadata_t* sessionParams,
+ /*inout*/ camera_stream_configuration_t* config,
+ const std::vector<uint32_t>& bufferSizes,
+ const CameraMetadata& cameraCharacteristics);
+
// When the call succeeds, the ownership of acquire fences in requests is transferred to
// HalInterface. More specifically, the current implementation will send the fence to
// HAL process and close the FD in cameraserver process. When the call fails, the ownership
@@ -900,6 +919,9 @@
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
status_t setCameraMute(bool enabled);
+
+ status_t setHalInterface(sp<HalInterface> newHalInterface);
+
protected:
virtual bool threadLoop();
@@ -1321,6 +1343,75 @@
// Whether the HAL supports camera muting via test pattern
bool mSupportCameraMute = false;
+ // Injection camera related methods.
+ class Camera3DeviceInjectionMethods : public virtual RefBase {
+ public:
+ Camera3DeviceInjectionMethods(wp<Camera3Device> parent);
+
+ ~Camera3DeviceInjectionMethods();
+
+ // Initialize the injection camera and generate an hal interface.
+ status_t injectionInitialize(
+ const String8& injectedCamId, sp<CameraProviderManager> manager,
+ const sp<
+ android::hardware::camera::device::V3_2 ::ICameraDeviceCallback>&
+ callback);
+
+ // Injection camera will replace the internal camera and configure streams
+ // when device is IDLE and request thread is paused.
+ status_t injectCamera(
+ camera3::camera_stream_configuration& injectionConfig,
+ std::vector<uint32_t>& injectionBufferSizes);
+
+ // Stop the injection camera and switch back to backup hal interface.
+ status_t stopInjection();
+
+ bool isInjecting();
+
+ const String8& getInjectedCamId() const;
+
+ void getInjectionConfig(/*out*/ camera3::camera_stream_configuration* injectionConfig,
+ /*out*/ std::vector<uint32_t>* injectionBufferSizes);
+
+ private:
+ // Configure the streams of injection camera, it need wait until the
+ // output streams are created and configured to the original camera before
+ // proceeding.
+ status_t injectionConfigureStreams(
+ camera3::camera_stream_configuration& injectionConfig,
+ std::vector<uint32_t>& injectionBufferSizes);
+
+ // Disconnect the injection camera and delete the hal interface.
+ void injectionDisconnectImpl();
+
+ // Use injection camera hal interface to replace and backup original
+ // camera hal interface.
+ status_t replaceHalInterface(sp<HalInterface> newHalInterface,
+ bool keepBackup);
+
+ wp<Camera3Device> mParent;
+
+ // Backup of the original camera hal interface.
+ sp<HalInterface> mBackupHalInterface;
+
+ // Generated injection camera hal interface.
+ sp<HalInterface> mInjectedCamHalInterface;
+
+ // Copy the configuration of the internal camera.
+ camera3::camera_stream_configuration mInjectionConfig;
+
+ // Copy the bufferSizes of the output streams of the internal camera.
+ std::vector<uint32_t> mInjectionBufferSizes;
+
+ // Synchronizes access to injection camera between initialize and
+ // disconnect.
+ Mutex mInjectionLock;
+
+ // The injection camera ID.
+ String8 mInjectedCamId;
+ };
+ sp<Camera3DeviceInjectionMethods> mInjectionMethods;
+
}; // class Camera3Device
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
new file mode 100644
index 0000000..f145dac
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3DeviceInjectionMethods"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraProviderManager.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+
+using hardware::camera::device::V3_2::ICameraDeviceSession;
+
+Camera3Device::Camera3DeviceInjectionMethods::Camera3DeviceInjectionMethods(
+ wp<Camera3Device> parent)
+ : mParent(parent) {
+ ALOGV("%s: Created injection camera methods", __FUNCTION__);
+}
+
+Camera3Device::Camera3DeviceInjectionMethods::~Camera3DeviceInjectionMethods() {
+ ALOGV("%s: Removed injection camera methods", __FUNCTION__);
+ injectionDisconnectImpl();
+}
+
+status_t Camera3Device::Camera3DeviceInjectionMethods::injectionInitialize(
+ const String8& injectedCamId, sp<CameraProviderManager> manager,
+ const sp<android::hardware::camera::device::V3_2::ICameraDeviceCallback>&
+ callback) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mInjectionLock);
+
+ if (manager == nullptr) {
+ ALOGE("%s: manager does not exist!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("%s: parent does not exist!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mInjectedCamId = injectedCamId;
+ sp<ICameraDeviceSession> session;
+ ATRACE_BEGIN("Injection CameraHal::openSession");
+ status_t res = manager->openSession(injectedCamId.string(), callback,
+ /*out*/ &session);
+ ATRACE_END();
+ if (res != OK) {
+ ALOGE("Injection camera could not open camera session: %s (%d)",
+ strerror(-res), res);
+ return res;
+ }
+
+ std::shared_ptr<RequestMetadataQueue> queue;
+ auto requestQueueRet =
+ session->getCaptureRequestMetadataQueue([&queue](const auto& descriptor) {
+ queue = std::make_shared<RequestMetadataQueue>(descriptor);
+ if (!queue->isValid() || queue->availableToWrite() <= 0) {
+ ALOGE("Injection camera HAL returns empty request metadata fmq, not "
+ "use it");
+ queue = nullptr;
+ // don't use the queue onwards.
+ }
+ });
+ if (!requestQueueRet.isOk()) {
+ ALOGE("Injection camera transaction error when getting request metadata fmq: "
+ "%s, not use it", requestQueueRet.description().c_str());
+ return DEAD_OBJECT;
+ }
+
+ std::unique_ptr<ResultMetadataQueue>& resQueue = parent->mResultMetadataQueue;
+ auto resultQueueRet = session->getCaptureResultMetadataQueue(
+ [&resQueue](const auto& descriptor) {
+ resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
+ if (!resQueue->isValid() || resQueue->availableToWrite() <= 0) {
+ ALOGE("Injection camera HAL returns empty result metadata fmq, not use "
+ "it");
+ resQueue = nullptr;
+ // Don't use the resQueue onwards.
+ }
+ });
+ if (!resultQueueRet.isOk()) {
+ ALOGE("Injection camera transaction error when getting result metadata queue "
+ "from camera session: %s", resultQueueRet.description().c_str());
+ return DEAD_OBJECT;
+ }
+ IF_ALOGV() {
+ session->interfaceChain(
+ [](::android::hardware::hidl_vec<::android::hardware::hidl_string>
+ interfaceChain) {
+ ALOGV("Injection camera session interface chain:");
+ for (const auto& iface : interfaceChain) {
+ ALOGV(" %s", iface.c_str());
+ }
+ });
+ }
+
+ ALOGV("%s: Injection camera interface = new HalInterface()", __FUNCTION__);
+ mInjectedCamHalInterface =
+ new HalInterface(session, queue, parent->mUseHalBufManager,
+ parent->mSupportOfflineProcessing);
+ if (mInjectedCamHalInterface == nullptr) {
+ ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ return OK;
+}
+
+status_t Camera3Device::Camera3DeviceInjectionMethods::injectCamera(
+ camera3::camera_stream_configuration& injectionConfig,
+ std::vector<uint32_t>& injectionBufferSizes) {
+ status_t res = NO_ERROR;
+ mInjectionConfig = injectionConfig;
+ mInjectionBufferSizes = injectionBufferSizes;
+
+ if (mInjectedCamHalInterface == nullptr) {
+ ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("%s: parent does not exist!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ nsecs_t maxExpectedDuration = parent->getExpectedInFlightDuration();
+ bool wasActive = false;
+ if (parent->mStatus == STATUS_ACTIVE) {
+ ALOGV("%s: Let the device be IDLE and the request thread is paused",
+ __FUNCTION__);
+ parent->mPauseStateNotify = true;
+ res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
+ if (res != OK) {
+ ALOGE("%s: Can't pause captures to inject camera!", __FUNCTION__);
+ return res;
+ }
+ wasActive = true;
+ }
+
+ ALOGV("%s: Injection camera: replaceHalInterface", __FUNCTION__);
+ res = replaceHalInterface(mInjectedCamHalInterface, true);
+ if (res != OK) {
+ ALOGE("%s: Failed to replace the new HalInterface!", __FUNCTION__);
+ injectionDisconnectImpl();
+ return res;
+ }
+
+ res = parent->mRequestThread->setHalInterface(mInjectedCamHalInterface);
+ if (res != OK) {
+ ALOGE("%s: Failed to set new HalInterface in RequestThread!", __FUNCTION__);
+ replaceHalInterface(mBackupHalInterface, false);
+ injectionDisconnectImpl();
+ return res;
+ }
+
+ parent->mNeedConfig = true;
+ res = injectionConfigureStreams(injectionConfig, injectionBufferSizes);
+ parent->mNeedConfig = false;
+ if (res != OK) {
+ ALOGE("Can't injectionConfigureStreams device for streams: %d: %s "
+ "(%d)", parent->mNextStreamId, strerror(-res), res);
+ replaceHalInterface(mBackupHalInterface, false);
+ injectionDisconnectImpl();
+ return res;
+ }
+
+ if (wasActive) {
+ ALOGV("%s: Restarting activity to inject camera", __FUNCTION__);
+ // Reuse current operating mode and session parameters for new stream
+ // config.
+ parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ }
+
+ return OK;
+}
+
+status_t Camera3Device::Camera3DeviceInjectionMethods::stopInjection() {
+ status_t res = NO_ERROR;
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("%s: parent does not exist!", __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ nsecs_t maxExpectedDuration = parent->getExpectedInFlightDuration();
+ bool wasActive = false;
+ if (parent->mStatus == STATUS_ACTIVE) {
+ ALOGV("%s: Let the device be IDLE and the request thread is paused",
+ __FUNCTION__);
+ parent->mPauseStateNotify = true;
+ res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
+ if (res != OK) {
+ ALOGE("%s: Can't pause captures to stop injection!", __FUNCTION__);
+ return res;
+ }
+ wasActive = true;
+ }
+
+ res = replaceHalInterface(mBackupHalInterface, false);
+ if (res != OK) {
+ ALOGE("%s: Failed to restore the backup HalInterface!", __FUNCTION__);
+ injectionDisconnectImpl();
+ return res;
+ }
+ injectionDisconnectImpl();
+
+ if (wasActive) {
+ ALOGV("%s: Restarting activity to stop injection", __FUNCTION__);
+ // Reuse current operating mode and session parameters for new stream
+ // config.
+ parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ }
+
+ return OK;
+}
+
+bool Camera3Device::Camera3DeviceInjectionMethods::isInjecting() {
+ if (mInjectedCamHalInterface == nullptr) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+const String8& Camera3Device::Camera3DeviceInjectionMethods::getInjectedCamId()
+ const {
+ return mInjectedCamId;
+}
+
+void Camera3Device::Camera3DeviceInjectionMethods::getInjectionConfig(
+ /*out*/ camera3::camera_stream_configuration* injectionConfig,
+ /*out*/ std::vector<uint32_t>* injectionBufferSizes) {
+ if (injectionConfig == nullptr || injectionBufferSizes == nullptr) {
+ ALOGE("%s: Injection configuration arguments must not be null!", __FUNCTION__);
+ return;
+ }
+
+ *injectionConfig = mInjectionConfig;
+ *injectionBufferSizes = mInjectionBufferSizes;
+}
+
+
+status_t Camera3Device::Camera3DeviceInjectionMethods::injectionConfigureStreams(
+ camera3::camera_stream_configuration& injectionConfig,
+ std::vector<uint32_t>& injectionBufferSizes) {
+ ATRACE_CALL();
+ status_t res = NO_ERROR;
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("%s: parent does not exist!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ if (parent->mOperatingMode < 0) {
+ ALOGE("Invalid operating mode: %d", parent->mOperatingMode);
+ return BAD_VALUE;
+ }
+
+ // Start configuring the streams
+ ALOGV("%s: Injection camera %s: Starting stream configuration", __FUNCTION__,
+ mInjectedCamId.string());
+
+ parent->mPreparerThread->pause();
+
+ // Do the HAL configuration; will potentially touch stream
+ // max_buffers, usage, and priv fields, as well as data_space and format
+ // fields for IMPLEMENTATION_DEFINED formats.
+
+ const camera_metadata_t* sessionBuffer = parent->mSessionParams.getAndLock();
+ res = mInjectedCamHalInterface->configureInjectedStreams(
+ sessionBuffer, &injectionConfig, injectionBufferSizes,
+ parent->mDeviceInfo);
+ parent->mSessionParams.unlock(sessionBuffer);
+
+ if (res == BAD_VALUE) {
+ // HAL rejected this set of streams as unsupported, clean up config
+ // attempt and return to unconfigured state
+ ALOGE("Set of requested outputs not supported by HAL");
+ parent->cancelStreamsConfigurationLocked();
+ return BAD_VALUE;
+ } else if (res != OK) {
+ // Some other kind of error from configure_streams - this is not
+ // expected
+ ALOGE("Unable to configure streams with HAL: %s (%d)", strerror(-res),
+ res);
+ return res;
+ }
+
+ for (size_t i = 0; i < parent->mOutputStreams.size(); i++) {
+ sp<camera3::Camera3OutputStreamInterface> outputStream =
+ parent->mOutputStreams[i];
+ mInjectedCamHalInterface->onStreamReConfigured(outputStream->getId());
+ }
+
+ // Request thread needs to know to avoid using repeat-last-settings protocol
+ // across configure_streams() calls
+ parent->mRequestThread->configurationComplete(
+ parent->mIsConstrainedHighSpeedConfiguration, parent->mSessionParams,
+ parent->mGroupIdPhysicalCameraMap);
+
+ parent->internalUpdateStatusLocked(STATUS_CONFIGURED);
+
+ ALOGV("%s: Injection camera %s: Stream configuration complete", __FUNCTION__,
+ mInjectedCamId.string());
+
+ auto rc = parent->mPreparerThread->resume();
+
+ if (rc != OK) {
+ ALOGE("%s: Injection camera %s: Preparer thread failed to resume!",
+ __FUNCTION__, mInjectedCamId.string());
+ return rc;
+ }
+
+ return OK;
+}
+
+void Camera3Device::Camera3DeviceInjectionMethods::injectionDisconnectImpl() {
+ ATRACE_CALL();
+ ALOGI("%s: Injection camera disconnect", __FUNCTION__);
+
+ mBackupHalInterface = nullptr;
+ HalInterface* interface = nullptr;
+ {
+ Mutex::Autolock lock(mInjectionLock);
+ if (mInjectedCamHalInterface != nullptr) {
+ interface = mInjectedCamHalInterface.get();
+ // Call close without internal mutex held, as the HAL close may need
+ // to wait on assorted callbacks,etc, to complete before it can
+ // return.
+ }
+ }
+
+ if (interface != nullptr) {
+ interface->close();
+ }
+
+ {
+ Mutex::Autolock lock(mInjectionLock);
+ if (mInjectedCamHalInterface != nullptr) {
+ mInjectedCamHalInterface->clear();
+ mInjectedCamHalInterface = nullptr;
+ }
+ }
+}
+
+status_t Camera3Device::Camera3DeviceInjectionMethods::replaceHalInterface(
+ sp<HalInterface> newHalInterface, bool keepBackup) {
+ Mutex::Autolock lock(mInjectionLock);
+ if (newHalInterface.get() == nullptr) {
+ ALOGE("%s: The newHalInterface does not exist, to stop replacing.",
+ __FUNCTION__);
+ return DEAD_OBJECT;
+ }
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent == nullptr) {
+ ALOGE("%s: parent does not exist!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ if (keepBackup && mBackupHalInterface == nullptr) {
+ mBackupHalInterface = parent->mInterface;
+ } else if (!keepBackup) {
+ mBackupHalInterface = nullptr;
+ }
+ parent->mInterface = newHalInterface;
+
+ return OK;
+}
+
+}; // namespace android
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index 281a4ce..2378f33 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -52,9 +52,6 @@
//
::android::stats::mediametrics::ExtractorData metrics_proto;
- // flesh out the protobuf we'll hand off with our data
- //
-
std::string format;
if (item->getString("android.media.mediaextractor.fmt", &format)) {
metrics_proto.set_format(format);
@@ -86,7 +83,6 @@
metrics_proto.set_entry_point(entry_point);
}
- // android.media.mediaextractor.logSessionId string
std::string log_session_id;
if (item->getString("android.media.mediaextractor.logSessionId", &log_session_id)) {
metrics_proto.set_log_session_id(log_session_id);
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 6cceb06..af2946b 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -39,7 +39,6 @@
bool statsd_mediaparser(const std::shared_ptr<const mediametrics::Item>& item,
const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
- static constexpr bool enabled_statsd = true; // TODO: Remove, dup with dump2StatsdInternal().
if (item == nullptr) return false;
const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
@@ -82,28 +81,25 @@
std::string logSessionId;
item->getString("android.media.mediaparser.logSessionId", &logSessionId);
- if (enabled_statsd) {
- (void) android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
- timestamp_nanos,
- package_name.c_str(),
- package_version_code,
- parserName.c_str(),
- createdByName,
- parserPool.c_str(),
- lastException.c_str(),
- resourceByteCount,
- durationMillis,
- trackMimeTypes.c_str(),
- trackCodecs.c_str(),
- alteredParameters.c_str(),
- videoWidth,
- videoHeight,
- logSessionId.c_str());
- } else {
- ALOGV("NOT sending MediaParser media metrics.");
- }
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
+ timestamp_nanos,
+ package_name.c_str(),
+ package_version_code,
+ parserName.c_str(),
+ createdByName,
+ parserPool.c_str(),
+ lastException.c_str(),
+ resourceByteCount,
+ durationMillis,
+ trackMimeTypes.c_str(),
+ trackCodecs.c_str(),
+ alteredParameters.c_str(),
+ videoWidth,
+ videoHeight,
+ logSessionId.c_str());
+
std::stringstream log;
- log << "result:" << "(result)" << " {"
+ log << "result:" << result << " {"
<< " mediametrics_mediaparser_reported:"
<< android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED
<< " timestamp_nanos:" << timestamp_nanos