Merge "Fix EAC3 bsid parsing"
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index f7863a5..9aafcd3 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -289,7 +289,7 @@
}
camera_status_t
-CameraDevice::allocateCaptureRequest(
+CameraDevice::allocateCaptureRequestLocked(
const ACaptureRequest* request, /*out*/sp<CaptureRequest> &outReq) {
sp<CaptureRequest> req(new CaptureRequest());
req->mCaptureRequest.physicalCameraSettings.resize(1);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index c63b97f..d571585 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -169,7 +169,12 @@
camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
- camera_status_t allocateCaptureRequest(
+ // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
+ // a) This function is called serially.
+ // b) This function is called in accordance with ICameraDeviceUser.submitRequestList,
+ // otherwise, the wrong capture request might have the wrong settings
+ // metadata associated with it.
+ camera_status_t allocateCaptureRequestLocked(
const ACaptureRequest* request, sp<CaptureRequest>& outReq);
static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
index 7d2304e..8bd5a52 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
+++ b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
@@ -73,7 +73,7 @@
requestsV.setCapacity(numRequests);
for (int i = 0; i < numRequests; i++) {
sp<CaptureRequest> req;
- ret = allocateCaptureRequest(requests[i], req);
+ ret = allocateCaptureRequestLocked(requests[i], req);
// We need to call this method since after submitRequestList is called,
// the request metadata queue might have removed the capture request
// metadata. Therefore we simply add the metadata to its wrapper class,
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index 04dda8f..4d00d35 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -75,7 +75,7 @@
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -84,15 +84,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -103,10 +103,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -191,7 +191,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
(void)me; // TODO: validate
@@ -205,13 +205,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index d1bdf0d..137e775 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -37,29 +37,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -68,15 +68,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 6)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -125,13 +125,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
@@ -323,8 +323,8 @@
return;
}
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(encInfo.confSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(encInfo.confSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
index c591e21..edad75a 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
@@ -47,18 +47,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef AMRNB
MEDIA_MIMETYPE_AUDIO_AMR_NB
#else
@@ -67,13 +67,13 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
#ifdef AMRNB
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
@@ -85,19 +85,19 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
#ifdef AMRNB
- .withDefault(new C2BitrateTuning::input(0u, 4750))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
#else
- .withDefault(new C2BitrateTuning::input(0u, 6600))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
#endif
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
@@ -110,13 +110,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index 8c03257..3d3aa7d 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -36,38 +36,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_NB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter(
@@ -75,8 +75,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 4750))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index 074493c..379cb32 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -38,38 +38,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_WB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 16000))
.withFields({C2F(mSampleRate, value).equalTo(16000)})
.withSetter(
@@ -77,8 +77,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 6600))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 6be1807..4bcc2c6 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -141,7 +141,7 @@
static C2R SizeSetter(bool mayBlock,
const C2P<C2StreamPictureSizeInfo::output>& oldMe,
- C2P<C2VideoSizeStreamInfo::output>& me) {
+ C2P<C2StreamPictureSizeInfo::output>& me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -586,7 +586,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 86cd3d8..9290d74 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -198,7 +198,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -845,7 +845,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index 6ddb9ff..b851908 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -45,36 +45,36 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_VIDEO_AVC))
.build());
addParameter(
- DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2560, 2),
C2F(mSize, height).inRange(2, 2560, 2),
@@ -83,7 +83,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -91,8 +91,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -182,9 +182,9 @@
static C2R ProfileLevelSetter(
bool mayBlock,
C2P<C2StreamProfileLevelInfo::output> &me,
- const C2P<C2VideoSizeStreamTuning::input> &size,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
const C2P<C2StreamFrameRateInfo::output> &frameRate,
- const C2P<C2BitrateTuning::output> &bitrate) {
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
(void)mayBlock;
if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
me.set().profile = PROFILE_AVC_CONSTRAINED_BASELINE;
@@ -325,16 +325,16 @@
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -1332,8 +1332,8 @@
mSpsPpsHeaderReceived = true;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -1492,7 +1492,7 @@
if (IV_IDR_FRAME == s_encode_op.u4_encoded_frame_type) {
ALOGV("IDR frame produced");
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index b158f8f..44f1fe0 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -473,7 +473,7 @@
if (!mOutputBlockPool) {
c2_status_t err = [this] {
// TODO: don't use query_vb
- C2StreamFormatConfig::output outputFormat(0u);
+ C2StreamBufferTypeSetting::output outputFormat(0u);
std::vector<std::unique_ptr<C2Param>> params;
c2_status_t err = intf()->query_vb(
{ &outputFormat },
@@ -485,7 +485,7 @@
return err;
}
C2BlockPool::local_id_t poolId =
- outputFormat.value == C2FormatVideo
+ outputFormat.value == C2BufferData::GRAPHIC
? C2BlockPool::BASIC_GRAPHIC
: C2BlockPool::BASIC_LINEAR;
if (params.size()) {
diff --git a/media/codec2/components/flac/C2SoftFlacDec.cpp b/media/codec2/components/flac/C2SoftFlacDec.cpp
index 86b16e8..10b14ce 100644
--- a/media/codec2/components/flac/C2SoftFlacDec.cpp
+++ b/media/codec2/components/flac/C2SoftFlacDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -99,13 +99,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 4ea35c2..0ce2543 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -34,38 +34,38 @@
: C2InterfaceHelper(helper) {
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::input> mPcmEncodingInfo;
};
@@ -223,8 +223,8 @@
}
if (!mWroteHeader) {
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(mHeaderOffset, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(mHeaderOffset, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 1c71d45..504ca78 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -41,18 +41,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef ALAW
MEDIA_MIMETYPE_AUDIO_G711_ALAW
#else
@@ -61,28 +61,28 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).inRange(8000, 48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).equalTo(64000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/gsm/C2SoftGsmDec.cpp b/media/codec2/components/gsm/C2SoftGsmDec.cpp
index 7101c79..69d4885 100644
--- a/media/codec2/components/gsm/C2SoftGsmDec.cpp
+++ b/media/codec2/components/gsm/C2SoftGsmDec.cpp
@@ -36,44 +36,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MSGSM))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 13200))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 13200))
.withFields({C2F(mBitrate, value).equalTo(13200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -85,13 +85,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/hevc/Android.bp b/media/codec2/components/hevc/Android.bp
index 2a045e1..369bd78 100644
--- a/media/codec2/components/hevc/Android.bp
+++ b/media/codec2/components/hevc/Android.bp
@@ -9,8 +9,17 @@
static_libs: ["libhevcdec"],
- include_dirs: [
- "external/libhevc/decoder",
- "external/libhevc/common",
+}
+
+cc_library_shared {
+ name: "libcodec2_soft_hevcenc",
+ defaults: [
+ "libcodec2_soft-defaults",
+ "libcodec2_soft_sanitize_signed-defaults",
],
+
+ srcs: ["C2SoftHevcEnc.cpp"],
+
+ static_libs: ["libhevcenc"],
+
}
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index f0d7d88..bb8dda0 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -192,7 +192,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -839,7 +839,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
new file mode 100644
index 0000000..2c0a7a0
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -0,0 +1,802 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftHevcEnc"
+#include <log/log.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
+#include <SimpleC2Interface.h>
+#include <util/C2InterfaceHelper.h>
+
+#include "ihevc_typedefs.h"
+#include "itt_video_api.h"
+#include "ihevce_api.h"
+#include "ihevce_plugin.h"
+#include "C2SoftHevcEnc.h"
+
+namespace android {
+
+class C2SoftHevcEnc::IntfImpl : public C2InterfaceHelper {
+ public:
+ explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper>& helper)
+ : C2InterfaceHelper(helper) {
+ setDerivedInstance(this);
+
+ addParameter(
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
+ .build());
+
+ addParameter(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+ MEDIA_MIMETYPE_VIDEO_RAW))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+ MEDIA_MIMETYPE_VIDEO_HEVC))
+ .build());
+
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+ .withConstValue(new C2StreamUsageTuning::input(
+ 0u, (uint64_t)C2MemoryUsage::CPU_READ))
+ .build());
+
+ addParameter(
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(320, 1920, 2),
+ C2F(mSize, height).inRange(128, 1088, 2),
+ })
+ .withSetter(SizeSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
+ .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+ .withFields({C2F(mFrameRate, value).greaterThan(0.)})
+ .withSetter(
+ Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
+ .withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
+ .withSetter(BitrateSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_HEVC_MAIN, LEVEL_HEVC_MAIN_1))
+ .withFields({
+ C2F(mProfileLevel, profile)
+ .oneOf({C2Config::PROFILE_HEVC_MAIN,
+ C2Config::PROFILE_HEVC_MAIN_STILL}),
+ C2F(mProfileLevel, level)
+ .oneOf({LEVEL_HEVC_MAIN_1, LEVEL_HEVC_MAIN_2,
+ LEVEL_HEVC_MAIN_2_1, LEVEL_HEVC_MAIN_3,
+ LEVEL_HEVC_MAIN_3_1, LEVEL_HEVC_MAIN_4,
+ LEVEL_HEVC_MAIN_4_1, LEVEL_HEVC_MAIN_5,
+ LEVEL_HEVC_MAIN_5_1, LEVEL_HEVC_MAIN_5_2}),
+ })
+ .withSetter(ProfileLevelSetter, mSize, mFrameRate, mBitrate)
+ .build());
+
+ addParameter(
+ DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
+ .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
+ .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
+ .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
+ .withDefault(
+ new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
+ .withFields({C2F(mSyncFramePeriod, value).any()})
+ .withSetter(
+ Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
+ .build());
+ }
+
+ static C2R BitrateSetter(bool mayBlock,
+ C2P<C2StreamBitrateInfo::output>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (me.v.value <= 4096) {
+ me.set().value = 4096;
+ }
+ return res;
+ }
+
+ static C2R SizeSetter(bool mayBlock,
+ const C2P<C2StreamPictureSizeInfo::input>& oldMe,
+ C2P<C2StreamPictureSizeInfo::input>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+ me.set().width = oldMe.v.width;
+ }
+ if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+ me.set().height = oldMe.v.height;
+ }
+ return res;
+ }
+
+ static C2R ProfileLevelSetter(
+ bool mayBlock,
+ C2P<C2StreamProfileLevelInfo::output> &me,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
+ const C2P<C2StreamFrameRateInfo::output> &frameRate,
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
+ (void)mayBlock;
+ if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+ me.set().profile = PROFILE_HEVC_MAIN;
+ }
+
+ struct LevelLimits {
+ C2Config::level_t level;
+ uint64_t samplesPerSec;
+ uint64_t samples;
+ uint32_t bitrate;
+ };
+
+ constexpr LevelLimits kLimits[] = {
+ { LEVEL_HEVC_MAIN_1, 552960, 36864, 128000 },
+ { LEVEL_HEVC_MAIN_2, 3686400, 122880, 1500000 },
+ { LEVEL_HEVC_MAIN_2_1, 7372800, 245760, 3000000 },
+ { LEVEL_HEVC_MAIN_3, 16588800, 552960, 6000000 },
+ { LEVEL_HEVC_MAIN_3_1, 33177600, 983040, 10000000 },
+ { LEVEL_HEVC_MAIN_4, 66846720, 2228224, 12000000 },
+ { LEVEL_HEVC_MAIN_4_1, 133693440, 2228224, 20000000 },
+ { LEVEL_HEVC_MAIN_5, 267386880, 8912896, 25000000 },
+ { LEVEL_HEVC_MAIN_5_1, 534773760, 8912896, 40000000 },
+ { LEVEL_HEVC_MAIN_5_2, 1069547520, 8912896, 60000000 },
+ { LEVEL_HEVC_MAIN_6, 1069547520, 35651584, 60000000 },
+ { LEVEL_HEVC_MAIN_6_1, 2139095040, 35651584, 120000000 },
+ { LEVEL_HEVC_MAIN_6_2, 4278190080, 35651584, 240000000 },
+ };
+
+ uint64_t samples = size.v.width * size.v.height;
+ uint64_t samplesPerSec = samples * frameRate.v.value;
+
+ // Check if the supplied level meets the MB / bitrate requirements. If
+ // not, update the level with the lowest level meeting the requirements.
+
+ bool found = false;
+ // By default needsUpdate = false in case the supplied level does meet
+ // the requirements.
+ bool needsUpdate = false;
+ for (const LevelLimits &limit : kLimits) {
+ if (samples <= limit.samples && samplesPerSec <= limit.samplesPerSec &&
+ bitrate.v.value <= limit.bitrate) {
+ // This is the lowest level that meets the requirements, and if
+ // we haven't seen the supplied level yet, that means we don't
+ // need the update.
+ if (needsUpdate) {
+ ALOGD("Given level %x does not cover current configuration: "
+ "adjusting to %x", me.v.level, limit.level);
+ me.set().level = limit.level;
+ }
+ found = true;
+ break;
+ }
+ if (me.v.level == limit.level) {
+ // We break out of the loop when the lowest feasible level is
+ // found. The fact that we're here means that our level doesn't
+ // meet the requirement and needs to be updated.
+ needsUpdate = true;
+ }
+ }
+ if (!found) {
+ // We set to the highest supported level.
+ me.set().level = LEVEL_HEVC_MAIN_5_2;
+ }
+ return C2R::Ok();
+ }
+
+ UWORD32 getProfile_l() const {
+ switch (mProfileLevel->profile) {
+ case PROFILE_HEVC_MAIN: [[fallthrough]];
+ case PROFILE_HEVC_MAIN_STILL: return 1;
+ default:
+ ALOGD("Unrecognized profile: %x", mProfileLevel->profile);
+ return 1;
+ }
+ }
+
+ UWORD32 getLevel_l() const {
+ struct Level {
+ C2Config::level_t c2Level;
+ UWORD32 hevcLevel;
+ };
+ constexpr Level levels[] = {
+ { LEVEL_HEVC_MAIN_1, 30 },
+ { LEVEL_HEVC_MAIN_2, 60 },
+ { LEVEL_HEVC_MAIN_2_1, 63 },
+ { LEVEL_HEVC_MAIN_3, 90 },
+ { LEVEL_HEVC_MAIN_3_1, 93 },
+ { LEVEL_HEVC_MAIN_4, 120 },
+ { LEVEL_HEVC_MAIN_4_1, 123 },
+ { LEVEL_HEVC_MAIN_5, 150 },
+ { LEVEL_HEVC_MAIN_5_1, 153 },
+ { LEVEL_HEVC_MAIN_5_2, 156 },
+ { LEVEL_HEVC_MAIN_6, 180 },
+ { LEVEL_HEVC_MAIN_6_1, 183 },
+ { LEVEL_HEVC_MAIN_6_2, 186 },
+ };
+ for (const Level &level : levels) {
+ if (mProfileLevel->level == level.c2Level) {
+ return level.hevcLevel;
+ }
+ }
+ ALOGD("Unrecognized level: %x", mProfileLevel->level);
+ return 156;
+ }
+ uint32_t getSyncFramePeriod_l() const {
+ if (mSyncFramePeriod->value < 0 ||
+ mSyncFramePeriod->value == INT64_MAX) {
+ return 0;
+ }
+ double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
+ return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+ }
+
+ std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
+ return mSize;
+ }
+ std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
+ return mFrameRate;
+ }
+ std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const {
+ return mBitrate;
+ }
+ std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
+ return mRequestSync;
+ }
+
+ private:
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+ std::shared_ptr<C2StreamUsageTuning::input> mUsage;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+ std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+ std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
+ std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
+};
+constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
+
+static size_t GetCPUCoreCount() {
+ long cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+ cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ // _SC_NPROC_ONLN must be defined...
+ cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+ CHECK(cpuCoreCount >= 1);
+ ALOGV("Number of CPU cores: %ld", cpuCoreCount);
+ return (size_t)cpuCoreCount;
+}
+
+C2SoftHevcEnc::C2SoftHevcEnc(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl)
+ : SimpleC2Component(
+ std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+ mIntf(intfImpl),
+ mIvVideoColorFormat(IV_YUV_420P),
+ mHevcEncProfile(1),
+ mHevcEncLevel(30),
+ mStarted(false),
+ mSpsPpsHeaderReceived(false),
+ mSignalledEos(false),
+ mSignalledError(false),
+ mCodecCtx(nullptr) {
+ // If dump is enabled, then create an empty file
+ GENERATE_FILE_NAMES();
+ CREATE_DUMP_FILE(mInFile);
+ CREATE_DUMP_FILE(mOutFile);
+
+ gettimeofday(&mTimeStart, nullptr);
+ gettimeofday(&mTimeEnd, nullptr);
+}
+
+C2SoftHevcEnc::~C2SoftHevcEnc() {
+ releaseEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onInit() {
+ return initEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onStop() {
+ if (!mStarted) {
+ return C2_OK;
+ }
+ return releaseEncoder();
+}
+
+void C2SoftHevcEnc::onReset() {
+ onStop();
+ initEncoder();
+}
+
+void C2SoftHevcEnc::onRelease() {
+ onStop();
+}
+
+c2_status_t C2SoftHevcEnc::onFlush_sm() {
+ return C2_OK;
+}
+
+static void fillEmptyWork(const std::unique_ptr<C2Work>& work) {
+ uint32_t flags = 0;
+ if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("Signalling EOS");
+ }
+ work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->workletsProcessed = 1u;
+}
+
+c2_status_t C2SoftHevcEnc::initEncParams() {
+ mCodecCtx = nullptr;
+ mNumCores = MIN(GetCPUCoreCount(), CODEC_MAX_CORES);
+ memset(&mEncParams, 0, sizeof(ihevce_static_cfg_params_t));
+
+ // default configuration
+ IHEVCE_PLUGIN_STATUS_T err = ihevce_set_def_params(&mEncParams);
+ if (IHEVCE_EOK != err) {
+ ALOGE("HEVC default init failed : 0x%x", err);
+ return C2_CORRUPTED;
+ }
+
+ // update configuration
+ mEncParams.s_src_prms.i4_width = mSize->width;
+ mEncParams.s_src_prms.i4_height = mSize->height;
+ mEncParams.s_src_prms.i4_frm_rate_denom = 1000;
+ mEncParams.s_src_prms.i4_frm_rate_num = mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0] =
+ mBitrate->value;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0] =
+ mBitrate->value << 1;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_codec_level = mHevcEncLevel;
+ mEncParams.s_coding_tools_prms.i4_max_i_open_gop_period = mIDRInterval;
+ mEncParams.s_coding_tools_prms.i4_max_cra_open_gop_period = mIDRInterval;
+ mIvVideoColorFormat = IV_YUV_420P;
+ mEncParams.s_multi_thrd_prms.i4_max_num_cores = mNumCores;
+ mEncParams.s_out_strm_prms.i4_codec_profile = mHevcEncProfile;
+ mEncParams.s_config_prms.i4_rate_control_mode = 2;
+ mEncParams.s_lap_prms.i4_rc_look_ahead_pics = 0;
+
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::releaseEncoder() {
+ mSpsPpsHeaderReceived = false;
+ mSignalledEos = false;
+ mSignalledError = false;
+ mStarted = false;
+
+ if (mCodecCtx) {
+ IHEVCE_PLUGIN_STATUS_T err = ihevce_close(mCodecCtx);
+ if (IHEVCE_EOK != err) return C2_CORRUPTED;
+ mCodecCtx = nullptr;
+ }
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ (void)drainMode;
+ (void)pool;
+ return C2_OK;
+}
+c2_status_t C2SoftHevcEnc::initEncoder() {
+ CHECK(!mCodecCtx);
+ {
+ IntfImpl::Lock lock = mIntf->lock();
+ mSize = mIntf->getSize_l();
+ mBitrate = mIntf->getBitrate_l();
+ mFrameRate = mIntf->getFrameRate_l();
+ mHevcEncProfile = mIntf->getProfile_l();
+ mHevcEncLevel = mIntf->getLevel_l();
+ mIDRInterval = mIntf->getSyncFramePeriod_l();
+ }
+
+ c2_status_t status = initEncParams();
+
+ if (C2_OK != status) {
+ ALOGE("Failed to initialize encoder params : 0x%x", status);
+ mSignalledError = true;
+ return status;
+ }
+
+ IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+ err = ihevce_init(&mEncParams, &mCodecCtx);
+ if (IHEVCE_EOK != err) {
+ ALOGE("HEVC encoder init failed : 0x%x", err);
+ return C2_CORRUPTED;
+ }
+
+ mStarted = true;
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+ const C2GraphicView* const input,
+ uint64_t timestamp) {
+ ihevce_static_cfg_params_t* params = &mEncParams;
+ memset(ps_encode_ip, 0, sizeof(ihevce_inp_buf_t));
+
+ if (!input) {
+ return C2_OK;
+ }
+
+ if (input->width() < mSize->width ||
+ input->height() < mSize->height) {
+ /* Expect width height to be configured */
+ ALOGW("unexpected Capacity Aspect %d(%d) x %d(%d)", input->width(),
+ mSize->width, input->height(), mSize->height);
+ return C2_BAD_VALUE;
+ }
+
+ const C2PlanarLayout& layout = input->layout();
+ uint8_t* yPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_Y]);
+ uint8_t* uPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_U]);
+ uint8_t* vPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_V]);
+ int32_t yStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+ int32_t uStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ int32_t vStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
+
+ uint32_t width = mSize->width;
+ uint32_t height = mSize->height;
+
+ // width and height are always even
+ // width and height are always even (as block size is 16x16)
+ CHECK_EQ((width & 1u), 0u);
+ CHECK_EQ((height & 1u), 0u);
+
+ size_t yPlaneSize = width * height;
+
+ switch (layout.type) {
+ case C2PlanarLayout::TYPE_RGB:
+ [[fallthrough]];
+ case C2PlanarLayout::TYPE_RGBA: {
+ MemoryBlock conversionBuffer =
+ mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+ mConversionBuffersInUse.emplace(conversionBuffer.data(),
+ conversionBuffer);
+ yPlane = conversionBuffer.data();
+ uPlane = yPlane + yPlaneSize;
+ vPlane = uPlane + yPlaneSize / 4;
+ yStride = width;
+ uStride = vStride = yStride / 2;
+ ConvertRGBToPlanarYUV(yPlane, yStride, height,
+ conversionBuffer.size(), *input);
+ break;
+ }
+ case C2PlanarLayout::TYPE_YUV: {
+ if (!IsYUV420(*input)) {
+ ALOGE("input is not YUV420");
+ return C2_BAD_VALUE;
+ }
+
+ if (layout.planes[layout.PLANE_Y].colInc == 1 &&
+ layout.planes[layout.PLANE_U].colInc == 1 &&
+ layout.planes[layout.PLANE_V].colInc == 1 &&
+ uStride == vStride && yStride == 2 * vStride) {
+ // I420 compatible - already set up above
+ break;
+ }
+
+ // copy to I420
+ yStride = width;
+ uStride = vStride = yStride / 2;
+ MemoryBlock conversionBuffer =
+ mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+ mConversionBuffersInUse.emplace(conversionBuffer.data(),
+ conversionBuffer);
+ MediaImage2 img =
+ CreateYUV420PlanarMediaImage2(width, height, yStride, height);
+ status_t err = ImageCopy(conversionBuffer.data(), &img, *input);
+ if (err != OK) {
+ ALOGE("Buffer conversion failed: %d", err);
+ return C2_BAD_VALUE;
+ }
+ yPlane = conversionBuffer.data();
+ uPlane = yPlane + yPlaneSize;
+ vPlane = uPlane + yPlaneSize / 4;
+ break;
+ }
+
+ case C2PlanarLayout::TYPE_YUVA:
+ ALOGE("YUVA plane type is not supported");
+ return C2_BAD_VALUE;
+
+ default:
+ ALOGE("Unrecognized plane type: %d", layout.type);
+ return C2_BAD_VALUE;
+ }
+
+ switch (mIvVideoColorFormat) {
+ case IV_YUV_420P: {
+ // input buffer is supposed to be const but Ittiam API wants bare
+ // pointer.
+ ps_encode_ip->apv_inp_planes[0] = yPlane;
+ ps_encode_ip->apv_inp_planes[1] = uPlane;
+ ps_encode_ip->apv_inp_planes[2] = vPlane;
+
+ ps_encode_ip->ai4_inp_strd[0] = yStride;
+ ps_encode_ip->ai4_inp_strd[1] = uStride;
+ ps_encode_ip->ai4_inp_strd[2] = vStride;
+
+ ps_encode_ip->ai4_inp_size[0] = yStride * height;
+ ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+ ps_encode_ip->ai4_inp_size[2] = vStride * height >> 1;
+ break;
+ }
+
+ case IV_YUV_422ILE: {
+ // TODO
+ break;
+ }
+
+ case IV_YUV_420SP_UV:
+ case IV_YUV_420SP_VU:
+ default: {
+ ps_encode_ip->apv_inp_planes[0] = yPlane;
+ ps_encode_ip->apv_inp_planes[1] = uPlane;
+ ps_encode_ip->apv_inp_planes[2] = nullptr;
+
+ ps_encode_ip->ai4_inp_strd[0] = yStride;
+ ps_encode_ip->ai4_inp_strd[1] = uStride;
+ ps_encode_ip->ai4_inp_strd[2] = 0;
+
+ ps_encode_ip->ai4_inp_size[0] = yStride * height;
+ ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+ ps_encode_ip->ai4_inp_size[2] = 0;
+ break;
+ }
+ }
+
+ ps_encode_ip->i4_curr_bitrate =
+ params->s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0];
+ ps_encode_ip->i4_curr_peak_bitrate =
+ params->s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0];
+ ps_encode_ip->i4_curr_rate_factor = params->s_config_prms.i4_rate_factor;
+ ps_encode_ip->u8_pts = timestamp;
+ return C2_OK;
+}
+
+void C2SoftHevcEnc::process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ // Initialize output work
+ work->result = C2_OK;
+ work->workletsProcessed = 1u;
+ work->worklets.front()->output.flags = work->input.flags;
+
+ if (mSignalledError || mSignalledEos) {
+ work->result = C2_BAD_VALUE;
+ ALOGD("Signalled Error / Signalled Eos");
+ return;
+ }
+ c2_status_t status = C2_OK;
+
+ // Initialize encoder if not already initialized
+ if (!mStarted) {
+ status = initEncoder();
+ if (C2_OK != status) {
+ ALOGE("Failed to initialize encoder : 0x%x", status);
+ mSignalledError = true;
+ work->result = status;
+ return;
+ }
+ }
+
+ std::shared_ptr<const C2GraphicView> view;
+ std::shared_ptr<C2Buffer> inputBuffer = nullptr;
+ bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+ if (!work->input.buffers.empty()) {
+ inputBuffer = work->input.buffers[0];
+ view = std::make_shared<const C2GraphicView>(
+ inputBuffer->data().graphicBlocks().front().map().get());
+ if (view->error() != C2_OK) {
+ ALOGE("graphic view map err = %d", view->error());
+ mSignalledError = true;
+ return;
+ }
+ }
+
+ IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+
+ fillEmptyWork(work);
+ if (!mSpsPpsHeaderReceived) {
+ ihevce_out_buf_t s_header_op{};
+ err = ihevce_encode_header(mCodecCtx, &s_header_op);
+ if (err == IHEVCE_EOK && s_header_op.i4_bytes_generated) {
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(
+ s_header_op.i4_bytes_generated, 0u);
+ if (!csd) {
+ ALOGE("CSD allocation failed");
+ mSignalledError = true;
+ work->result = C2_NO_MEMORY;
+ return;
+ }
+ memcpy(csd->m.value, s_header_op.pu1_output_buf,
+ s_header_op.i4_bytes_generated);
+ DUMP_TO_FILE(mOutFile, csd->m.value, csd->flexCount());
+ work->worklets.front()->output.configUpdate.push_back(
+ std::move(csd));
+ mSpsPpsHeaderReceived = true;
+ }
+ if (!inputBuffer) {
+ return;
+ }
+ }
+ ihevce_inp_buf_t s_encode_ip{};
+ ihevce_out_buf_t s_encode_op{};
+ uint64_t timestamp = work->input.ordinal.timestamp.peekull();
+
+ status = setEncodeArgs(&s_encode_ip, view.get(), timestamp);
+ if (C2_OK != status) {
+ mSignalledError = true;
+ ALOGE("setEncodeArgs failed : 0x%x", status);
+ work->result = status;
+ return;
+ }
+
+ uint64_t timeDelay = 0;
+ uint64_t timeTaken = 0;
+ GETTIME(&mTimeStart, nullptr);
+ TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+
+ ihevce_inp_buf_t* ps_encode_ip = (inputBuffer) ? &s_encode_ip : nullptr;
+
+ err = ihevce_encode(mCodecCtx, ps_encode_ip, &s_encode_op);
+ if (IHEVCE_EOK != err) {
+ ALOGE("Encode Frame failed : 0x%x", err);
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+
+ GETTIME(&mTimeEnd, nullptr);
+ /* Compute time taken for decode() */
+ TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+
+ ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", (int)timeTaken,
+ (int)timeDelay, s_encode_op.i4_bytes_generated);
+
+ if (s_encode_op.i4_bytes_generated) {
+ std::shared_ptr<C2LinearBlock> block;
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ status = pool->fetchLinearBlock(s_encode_op.i4_bytes_generated, usage, &block);
+ if (C2_OK != status) {
+ ALOGE("fetchLinearBlock for Output failed with status 0x%x", status);
+ work->result = C2_NO_MEMORY;
+ mSignalledError = true;
+ return;
+ }
+ C2WriteView wView = block->map().get();
+ if (C2_OK != wView.error()) {
+ ALOGE("write view map failed with status 0x%x", wView.error());
+ work->result = wView.error();
+ mSignalledError = true;
+ return;
+ }
+ memcpy(wView.data(), s_encode_op.pu1_output_buf,
+ s_encode_op.i4_bytes_generated);
+
+ std::shared_ptr<C2Buffer> buffer =
+ createLinearBuffer(block, 0, s_encode_op.i4_bytes_generated);
+
+ DUMP_TO_FILE(mOutFile, s_encode_op.pu1_output_buf,
+ s_encode_op.i4_bytes_generated);
+
+ work->worklets.front()->output.ordinal.timestamp = s_encode_op.u8_pts;
+ if (s_encode_op.i4_is_key_frame) {
+ ALOGV("IDR frame produced");
+ buffer->setInfo(
+ std::make_shared<C2StreamPictureTypeMaskInfo::output>(
+ 0u /* stream id */, C2Config::SYNC_FRAME));
+ }
+ work->worklets.front()->output.buffers.push_back(buffer);
+ }
+ if (eos) {
+ mSignalledEos = true;
+ }
+}
+
+class C2SoftHevcEncFactory : public C2ComponentFactory {
+ public:
+ C2SoftHevcEncFactory()
+ : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+ GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(
+ new C2SoftHevcEnc(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(C2ComponentInterface*)> deleter) override {
+ *interface = std::shared_ptr<C2ComponentInterface>(
+ new SimpleInterface<C2SoftHevcEnc::IntfImpl>(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual ~C2SoftHevcEncFactory() override = default;
+
+ private:
+ std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
+ return new ::android::C2SoftHevcEncFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
+ delete factory;
+}
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
new file mode 100644
index 0000000..c22fea2
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_HEVC_ENC_H_
+#define ANDROID_C2_SOFT_HEVC_ENC_H_
+
+#include <map>
+#include <utils/Vector.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <SimpleC2Component.h>
+
+#include "ihevc_typedefs.h"
+
+namespace android {
+#define MIN(a, b) ((a) < (b)) ? (a) : (b)
+
+/** Get time */
+#define GETTIME(a, b) gettimeofday(a, b);
+
+/** Compute difference between start and end */
+#define TIME_DIFF(start, end, diff) \
+ diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
+ ((end).tv_usec - (start).tv_usec);
+
+#define CODEC_MAX_CORES 4
+
+struct C2SoftHevcEnc : public SimpleC2Component {
+ class IntfImpl;
+
+ C2SoftHevcEnc(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl);
+
+ // From SimpleC2Component
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+ c2_status_t drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+
+ protected:
+ virtual ~C2SoftHevcEnc();
+
+ private:
+ std::shared_ptr<IntfImpl> mIntf;
+ ihevce_static_cfg_params_t mEncParams;
+ size_t mNumCores;
+ UWORD32 mIDRInterval;
+ IV_COLOR_FORMAT_T mIvVideoColorFormat;
+ UWORD32 mHevcEncProfile;
+ UWORD32 mHevcEncLevel;
+ bool mStarted;
+ bool mSpsPpsHeaderReceived;
+ bool mSignalledEos;
+ bool mSignalledError;
+ void* mCodecCtx;
+ MemoryBlockPool mConversionBuffers;
+ std::map<void*, MemoryBlock> mConversionBuffersInUse;
+ // configurations used by component in process
+ // (TODO: keep this in intf but make them internal only)
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+ std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+
+#ifdef FILE_DUMP_ENABLE
+ char mInFile[200];
+ char mOutFile[200];
+#endif /* FILE_DUMP_ENABLE */
+
+ // profile
+ struct timeval mTimeStart;
+ struct timeval mTimeEnd;
+
+ c2_status_t initEncParams();
+ c2_status_t initEncoder();
+ c2_status_t releaseEncoder();
+ c2_status_t setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+ const C2GraphicView* const input,
+ uint64_t timestamp);
+ C2_DO_NOT_COPY(C2SoftHevcEnc);
+};
+
+#ifdef FILE_DUMP_ENABLE
+
+#define INPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define INPUT_DUMP_EXT "yuv"
+#define OUTPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define OUTPUT_DUMP_EXT "h265"
+#define GENERATE_FILE_NAMES() \
+{ \
+ GETTIME(&mTimeStart, NULL); \
+ strcpy(mInFile, ""); \
+ ALOGD("GENERATE_FILE_NAMES"); \
+ sprintf(mInFile, "%s_%ld.%ld.%s", INPUT_DUMP_PATH, mTimeStart.tv_sec, \
+ mTimeStart.tv_usec, INPUT_DUMP_EXT); \
+ strcpy(mOutFile, ""); \
+ sprintf(mOutFile, "%s_%ld.%ld.%s", OUTPUT_DUMP_PATH, \
+ mTimeStart.tv_sec, mTimeStart.tv_usec, OUTPUT_DUMP_EXT); \
+}
+
+#define CREATE_DUMP_FILE(m_filename) \
+{ \
+ FILE* fp = fopen(m_filename, "wb"); \
+ if (fp != NULL) { \
+ ALOGD("Opened file %s", m_filename); \
+ fclose(fp); \
+ } else { \
+ ALOGD("Could not open file %s", m_filename); \
+ } \
+}
+#define DUMP_TO_FILE(m_filename, m_buf, m_size) \
+{ \
+ FILE* fp = fopen(m_filename, "ab"); \
+ if (fp != NULL && m_buf != NULL) { \
+ int i; \
+ ALOGD("Dump to file!"); \
+ i = fwrite(m_buf, 1, m_size, fp); \
+ if (i != (int)m_size) { \
+ ALOGD("Error in fwrite, returned %d", i); \
+ perror("Error in write to file"); \
+ } \
+ fclose(fp); \
+ } else { \
+ ALOGD("Could not write to file %s", m_filename); \
+ if (fp != NULL) fclose(fp); \
+ } \
+}
+#else /* FILE_DUMP_ENABLE */
+#define INPUT_DUMP_PATH
+#define INPUT_DUMP_EXT
+#define OUTPUT_DUMP_PATH
+#define OUTPUT_DUMP_EXT
+#define GENERATE_FILE_NAMES()
+#define CREATE_DUMP_FILE(m_filename)
+#define DUMP_TO_FILE(m_filename, m_buf, m_size)
+#endif /* FILE_DUMP_ENABLE */
+
+} // namespace android
+
+#endif // C2_SOFT_HEVC_ENC_H__
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index c8b8397..9db6d8f 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -40,29 +40,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MPEG))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({8000, 11025, 12000, 16000,
22050, 24000, 32000, 44100, 48000})})
@@ -70,15 +70,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 320000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -90,13 +90,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -555,4 +555,3 @@
ALOGV("in %s", __func__);
delete factory;
}
-
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index da32ec0..290677e 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -180,7 +180,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -892,7 +892,7 @@
ALOGI("Configuring decoder: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
@@ -931,7 +931,7 @@
ALOGI("Configuring decoder out: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 0b89cff..3d4a733 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -186,7 +186,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -574,7 +574,7 @@
PVSetPostProcType(mDecHandle, 0);
if (handleResChange(work)) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
@@ -646,7 +646,7 @@
return;
} else if (resChange) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index c8796f3..89fa59d 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -52,26 +52,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef MPEG4
MEDIA_MIMETYPE_VIDEO_MPEG4
#else
@@ -80,14 +80,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 176, 144))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 176, 144))
.withFields({
#ifdef MPEG4
C2F(mSize, width).inRange(16, 176, 16),
@@ -101,7 +101,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 17.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -110,8 +110,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -217,14 +217,14 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -446,8 +446,8 @@
}
++mNumInputFrames;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(outputSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(outputSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -595,7 +595,7 @@
work->worklets.front()->output.ordinal.timestamp = inputTimeStamp;
if (hintTrack.CodeType == 0) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 3ce1fd6..680712e 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -40,44 +40,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_OPUS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).equalTo(48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 6000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6000))
.withFields({C2F(mBitrate, value).inRange(6000, 510000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -89,13 +89,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
index 68fcea1..a0b2443 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.cpp
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -42,29 +42,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_OPUS))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 48000))
.withFields({C2F(mSampleRate, value).oneOf({
8000, 12000, 16000, 24000, 48000})})
@@ -72,15 +72,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 128000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 128000))
.withFields({C2F(mBitrate, value).inRange(500, 512000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -104,13 +104,13 @@
uint32_t getComplexity() const { return mComplexity->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -423,8 +423,8 @@
int headerLen = WriteOpusHeaders(opusHeader, mSampleRate, header,
sizeof(header), mCodecDelay, mSeekPreRoll);
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(headerLen, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(headerLen, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 5c83481..802caa4 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(8000, 192000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(1, 10000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -98,13 +98,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 48825e4..e7393ee 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -45,44 +45,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_VORBIS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).inRange(8000, 96000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(32000, 500000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 9ba2362..3120f7a 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -215,7 +215,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -700,7 +700,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 155a84f..6509a88 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -633,7 +633,7 @@
std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block);
if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
work->worklets.front()->output.ordinal = work->input.ordinal;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 87ed1a9..5591a49 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -229,26 +229,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef VP9
MEDIA_MIMETYPE_VIDEO_VP9
#else
@@ -257,14 +257,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2048, 2),
C2F(mSize, height).inRange(2, 2048, 2),
@@ -285,7 +285,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -312,8 +312,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
.withSetter(BitrateSetter)
.build());
@@ -416,18 +416,18 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamTemporalLayeringTuning::output> mLayering;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index 1c0e70b..ed730c3 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -66,29 +66,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -97,15 +97,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -116,10 +116,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -203,7 +203,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
uint32_t getBitrate() const { return mBitrate->value; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
@@ -218,13 +218,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
@@ -1067,6 +1067,8 @@
int i_loud_norm;
int i_target_loudness;
unsigned int i_sbr_mode;
+ uint32_t ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
/* Sampling Frequency */
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
@@ -1115,6 +1117,24 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+ if (pv_alloc_ptr == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, nullptr);
diff --git a/media/codec2/core/include/C2Buffer.h b/media/codec2/core/include/C2Buffer.h
index c428122..3d3587c 100644
--- a/media/codec2/core/include/C2Buffer.h
+++ b/media/codec2/core/include/C2Buffer.h
@@ -1994,7 +1994,6 @@
GRAPHIC, ///< the buffer contains a single graphic block
GRAPHIC_CHUNKS, ///< the buffer contains one of more graphic blocks
};
- typedef type_t Type; // deprecated
/**
* Gets the type of this buffer (data).
@@ -2042,23 +2041,6 @@
*/
const C2BufferData data() const;
- /**
- * These will still work if used in onDeathNotify.
- */
-#if 0
- inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
- return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
- return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
- return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
- }
-#endif
-
///@name Pre-destroy notification handling
///@{
@@ -2163,8 +2145,6 @@
*/
static std::shared_ptr<C2Buffer> CreateGraphicBuffer(const C2ConstGraphicBlock &block);
-
-
protected:
// no public constructor
explicit C2Buffer(const std::vector<C2ConstLinearBlock> &blocks);
@@ -2173,7 +2153,6 @@
private:
class Impl;
std::shared_ptr<Impl> mImpl;
-// Type _mType;
};
/**
@@ -2200,109 +2179,6 @@
/// @}
-/// \cond INTERNAL
-
-/// \todo These are no longer used
-
-/// \addtogroup linear
-/// @{
-
-/** \deprecated */
-class C2LinearBuffer
- : public C2Buffer, public _C2LinearRangeAspect,
- public std::enable_shared_from_this<C2LinearBuffer> {
-public:
- /** \todo what is this? */
- const C2Handle *handle() const;
-
-protected:
- inline C2LinearBuffer(const C2ConstLinearBlock &block);
-
-private:
- class Impl;
- Impl *mImpl;
-};
-
-class C2ReadCursor;
-
-class C2WriteCursor {
-public:
- uint32_t remaining() const; // remaining data to be read
- void commit(); // commits the current position. discard data before current position
- void reset() const; // resets position to the last committed position
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2ReadCursor slice(uint32_t size) const;
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2WriteCursor reserve(uint32_t size);
- // bool read(T&);
- // bool write(T&);
- C2Fence waitForSpace(uint32_t size);
-};
-
-/// @}
-
-/// \addtogroup graphic
-/// @{
-
-struct C2ColorSpace {
-//public:
- enum Standard {
- BT601,
- BT709,
- BT2020,
- // TODO
- };
-
- enum Range {
- LIMITED,
- FULL,
- // TODO
- };
-
- enum TransferFunction {
- BT709Transfer,
- BT2020Transfer,
- HybridLogGamma2,
- HybridLogGamma4,
- // TODO
- };
-};
-
-/** \deprecated */
-class C2GraphicBuffer : public C2Buffer {
-public:
- // constant attributes
- inline uint32_t width() const { return mWidth; }
- inline uint32_t height() const { return mHeight; }
- inline uint32_t format() const { return mFormat; }
- inline const C2MemoryUsage usage() const { return mUsage; }
-
- // modifiable attributes
-
-
- virtual const C2ColorSpace colorSpace() const = 0;
- // best effort
- virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
- virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
-
- const C2Handle *handle() const;
-
-protected:
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
- C2MemoryUsage mUsage;
-
- class Impl;
- Impl *mImpl;
-};
-
-/// @}
-
-/// \endcond
-
/// @}
#endif // C2BUFFER_H_
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index fb6edb6..9545c45 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -240,19 +240,6 @@
kParamIndexTimestampGapAdjustment, // input-surface, struct
kParamIndexSurfaceAllocator, // u32
-
- // deprecated indices due to renaming
- kParamIndexAacStreamFormat = kParamIndexAacPackaging,
- kParamIndexCsd = kParamIndexInitData,
- kParamIndexMaxVideoSizeHint = kParamIndexMaxPictureSize,
- kParamIndexMime = kParamIndexMediaType,
- kParamIndexRequestedInfos = kParamIndexSubscribedParamIndices,
-
-
- // deprecated indices due to removal
- kParamIndexSupportedParams = 0xDEAD0000,
- kParamIndexReadOnlyParams,
- kParamIndexTemporal,
};
}
@@ -337,14 +324,8 @@
// read-only
typedef C2GlobalParam<C2Setting, C2SimpleValueStruct<C2Component::domain_t>, kParamIndexDomain>
C2ComponentDomainSetting;
-typedef C2ComponentDomainSetting C2ComponentDomainInfo; // deprecated
-typedef C2Component::domain_t C2DomainKind; // deprecated
constexpr char C2_PARAMKEY_COMPONENT_DOMAIN[] = "component.domain";
-constexpr C2Component::domain_t C2DomainAudio = C2Component::DOMAIN_AUDIO; // deprecated
-constexpr C2Component::domain_t C2DomainOther = C2Component::DOMAIN_OTHER; // deprecate
-constexpr C2Component::domain_t C2DomainVideo = C2Component::DOMAIN_VIDEO; // deprecate
-
/**
* Component attributes.
*
@@ -359,9 +340,6 @@
C2ComponentAttributesSetting;
constexpr char C2_PARAMKEY_COMPONENT_ATTRIBUTES[] = "component.attributes";
-// deprecated
-typedef C2ComponentAttributesSetting C2ComponentTemporalInfo;
-
/**
* Time stretching.
*
@@ -707,7 +685,6 @@
typedef C2StreamParam<C2Info, C2ProfileLevelStruct, kParamIndexProfileLevel>
C2StreamProfileLevelInfo;
constexpr char C2_PARAMKEY_PROFILE_LEVEL[] = "coded.pl";
-#define C2_PARAMKEY_STREAM_PROFILE_LEVEL C2_PARAMKEY_PROFILE_LEVEL
/**
* Codec-specific initialization data.
@@ -719,9 +696,7 @@
* TODO: define for other codecs.
*/
typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexInitData> C2StreamInitDataInfo;
-typedef C2StreamInitDataInfo C2StreamCsdInfo; // deprecated
constexpr char C2_PARAMKEY_INIT_DATA[] = "coded.init-data";
-#define C2_PARAMKEY_STREAM_INIT_DATA C2_PARAMKEY_INIT_DATA
/**
* Supplemental Data.
@@ -781,11 +756,8 @@
* port media type.
*/
typedef C2PortParam<C2Setting, C2StringValue, kParamIndexMediaType> C2PortMediaTypeSetting;
-typedef C2PortMediaTypeSetting C2PortMimeConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_MEDIA_TYPE[] = "input.media-type";
constexpr char C2_PARAMKEY_OUTPUT_MEDIA_TYPE[] = "output.media-type";
-#define C2_NAME_INPUT_PORT_MIME_SETTING C2_PARAMKEY_INPUT_MEDIA_TYPE
-#define C2_NAME_OUTPUT_PORT_MIME_SETTING C2_PARAMKEY_OUTPUT_MEDIA_TYPE
typedef C2StreamParam<C2Setting, C2StringValue, kParamIndexMediaType> C2StreamMediaTypeSetting;
@@ -808,24 +780,20 @@
*/
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest> C2PortRequestedDelayTuning;
-typedef C2PortRequestedDelayTuning C2PortRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY_REQUEST[] = "input.delay.requested";
constexpr char C2_PARAMKEY_OUTPUT_DELAY_REQUEST[] = "output.delay.requested";
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest>
C2RequestedPipelineDelayTuning;
-typedef C2RequestedPipelineDelayTuning C2ComponentRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY_REQUEST[] = "pipeline-delay.requested";
// read-only
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2PortActualDelayTuning;
-typedef C2PortActualDelayTuning C2PortLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY[] = "input.delay.actual";
constexpr char C2_PARAMKEY_OUTPUT_DELAY[] = "output.delay.actual";
// read-only
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2ActualPipelineDelayTuning;
-typedef C2ActualPipelineDelayTuning C2ComponentLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY[] = "algo.delay.actual";
/**
@@ -875,7 +843,6 @@
*/
// private
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountTuning;
-typedef C2PortStreamCountTuning C2PortStreamCountConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_COUNT[] = "input.stream-count";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_COUNT[] = "output.stream-count";
@@ -985,20 +952,9 @@
typedef C2StreamParam<C2Setting, C2SimpleValueStruct<C2EasyEnum<C2BufferData::type_t>>,
kParamIndexBufferType>
C2StreamBufferTypeSetting;
-
-constexpr C2BufferData::type_t C2FormatAudio = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatCompressed = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatVideo = C2BufferData::GRAPHIC; // deprecated
-typedef C2BufferData::type_t C2FormatKind; // deprecated
-
-typedef C2StreamBufferTypeSetting C2StreamFormatConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE[] = "input.buffers.type";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE[] = "output.buffers.type";
-// deprecated
-#define C2_NAME_INPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE
-#define C2_NAME_OUTPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE
-
/**
* Memory usage.
*
@@ -1007,8 +963,6 @@
typedef C2StreamParam<C2Tuning, C2Uint64Value, kParamIndexUsage> C2StreamUsageTuning;
constexpr char C2_PARAMKEY_INPUT_STREAM_USAGE[] = "input.buffers.usage";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_USAGE[] = "output.buffers.usage";
-// deprecated
-#define C2_NAME_INPUT_STREAM_USAGE_SETTING C2_PARAMKEY_INPUT_STREAM_USAGE
/**
* Picture (video or image frame) size.
@@ -1068,8 +1022,6 @@
constexpr char C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE[] = "input.buffers.max-size";
constexpr char C2_PARAMKEY_OUTPUT_MAX_BUFFER_SIZE[] = "output.buffers.max-size";
-#define C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE
-
/* ---------------------------------------- misc. state ---------------------------------------- */
/**
@@ -1170,9 +1122,7 @@
* Bitrate
*/
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexBitrate> C2StreamBitrateInfo;
-typedef C2StreamBitrateInfo C2BitrateTuning; // deprecated
constexpr char C2_PARAMKEY_BITRATE[] = "coded.bitrate";
-#define C2_NAME_STREAM_BITRATE_SETTING C2_PARAMKEY_BITRATE
/**
* Bitrate mode.
@@ -1261,15 +1211,8 @@
*
* This is used for the output of the video decoder, and the input of the video encoder.
*/
-typedef C2PictureSizeStruct C2VideoSizeStruct; // deprecated
-
typedef C2StreamParam<C2Info, C2PictureSizeStruct, kParamIndexPictureSize> C2StreamPictureSizeInfo;
constexpr char C2_PARAMKEY_PICTURE_SIZE[] = "raw.size";
-#define C2_PARAMKEY_STREAM_PICTURE_SIZE C2_PARAMKEY_PICTURE_SIZE
-#define C2_NAME_STREAM_VIDEO_SIZE_INFO C2_PARAMKEY_PICTURE_SIZE
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamInfo; // deprecated
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamTuning; // deprecated
-#define C2_NAME_STREAM_VIDEO_SIZE_SETTING C2_PARAMKEY_PICTURE_SIZE
/**
* Crop rectangle.
@@ -1344,12 +1287,10 @@
kParamIndexScalingMethod>
C2StreamScalingMethodTuning;
constexpr char C2_PARAMKEY_SCALING_MODE[] = "raw.scaling-method";
-#define C2_PARAMKEY_STREAM_SCALING_MODE C2_PARAMKEY_SCALING_MODE
typedef C2StreamParam<C2Tuning, C2PictureSizeStruct, kParamIndexScaledPictureSize>
C2StreamScaledPictureSizeTuning;
constexpr char C2_PARAMKEY_SCALED_PICTURE_SIZE[] = "raw.scaled-size";
-#define C2_PARAMKEY_STREAM_SCALED_PICTURE_SIZE C2_PARAMKEY_SCALED_PICTURE_SIZE
typedef C2StreamParam<C2Tuning, C2RectStruct, kParamIndexScaledCropRect>
C2StreamScaledCropRectTuning;
@@ -1504,15 +1445,8 @@
MATRIX_BT2020_CONSTANT, ///< Rec.ITU-R BT.2020 constant luminance
MATRIX_VENDOR_START = 0x80, ///< vendor-specific matrix coefficient values start here
MATRIX_OTHER = 0xff, ///< max value, reserved for undefined values
-
- MATRIX_SMPTE240M = MATRIX_240M, // deprecated
- MATRIX_BT2020CONSTANT = MATRIX_BT2020_CONSTANT, // deprecated
)
-constexpr C2Color::matrix_t MATRIX_BT470_6M = MATRIX_FCC47_73_682; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT709_5 = MATRIX_BT709; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT601_6 = MATRIX_BT601; // deprecated
-
struct C2ColorAspectsStruct {
C2Color::range_t range;
C2Color::primaries_t primaries;
@@ -1635,7 +1569,6 @@
*/
typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2StreamFrameRateInfo;
constexpr char C2_PARAMKEY_FRAME_RATE[] = "coded.frame-rate";
-#define C2_NAME_STREAM_FRAME_RATE_SETTING C2_PARAMKEY_FRAME_RATE
typedef C2PortParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2PortFrameRateInfo;
constexpr char C2_PARAMKEY_INPUT_FRAME_RATE[] = "input.frame-rate";
@@ -1668,9 +1601,6 @@
B_FRAME = (1 << 3), ///< backward predicted (out-of-order) frame
)
-typedef C2Config::picture_type_t C2PictureTypeMask; // deprecated
-constexpr C2Config::picture_type_t C2PictureTypeKeyFrame = C2Config::SYNC_FRAME; // deprecated
-
/**
* Allowed picture types.
*/
@@ -1750,8 +1680,6 @@
typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexSyncFrameInterval>
C2StreamSyncFrameIntervalTuning;
constexpr char C2_PARAMKEY_SYNC_FRAME_INTERVAL[] = "coding.sync-frame-interval";
-// deprecated
-#define C2_PARAMKEY_SYNC_FRAME_PERIOD C2_PARAMKEY_SYNC_FRAME_INTERVAL
/**
* Temporal layering
@@ -1885,8 +1813,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexSampleRate> C2StreamSampleRateInfo;
constexpr char C2_PARAMKEY_SAMPLE_RATE[] = "raw.sample-rate";
constexpr char C2_PARAMKEY_CODED_SAMPLE_RATE[] = "coded.sample-rate";
-// deprecated
-#define C2_NAME_STREAM_SAMPLE_RATE_SETTING C2_PARAMKEY_SAMPLE_RATE
/**
* Channel count.
@@ -1894,8 +1820,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexChannelCount> C2StreamChannelCountInfo;
constexpr char C2_PARAMKEY_CHANNEL_COUNT[] = "raw.channel-count";
constexpr char C2_PARAMKEY_CODED_CHANNEL_COUNT[] = "coded.channel-count";
-// deprecated
-#define C2_NAME_STREAM_CHANNEL_COUNT_SETTING C2_PARAMKEY_CHANNEL_COUNT
/**
* Max channel count. Used to limit the number of coded or decoded channels.
@@ -2005,16 +1929,10 @@
AAC_PACKAGING_ADTS
)
-typedef C2Config::aac_packaging_t C2AacStreamFormatKind; // deprecated
-// deprecated
-constexpr C2Config::aac_packaging_t C2AacStreamFormatRaw = C2Config::AAC_PACKAGING_RAW;
-constexpr C2Config::aac_packaging_t C2AacStreamFormatAdts = C2Config::AAC_PACKAGING_ADTS;
-
typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2EasyEnum<C2Config::aac_packaging_t>>,
kParamIndexAacPackaging> C2StreamAacPackagingInfo;
typedef C2StreamAacPackagingInfo C2StreamAacFormatInfo;
constexpr char C2_PARAMKEY_AAC_PACKAGING[] = "coded.aac-packaging";
-#define C2_NAME_STREAM_AAC_FORMAT_SETTING C2_PARAMKEY_AAC_PACKAGING
/* ================================ PLATFORM-DEFINED PARAMETERS ================================ */
@@ -2134,7 +2052,6 @@
typedef C2GlobalParam<C2Tuning, C2EasyBoolValue, kParamIndexInputSurfaceEos>
C2InputSurfaceEosTuning;
constexpr char C2_PARAMKEY_INPUT_SURFACE_EOS[] = "input-surface.eos";
-#define C2_NAME_INPUT_SURFACE_EOS_TUNING C2_PARAMKEY_INPUT_SURFACE_EOS
/**
* Start/suspend/resume/stop controls and timestamps for input surface.
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index efc5c89..d264bf3 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -1012,15 +1012,6 @@
_mNamedValues(_NamedValuesGetter<B>::getNamedValues()),
_mFieldId(offset) {}
-/*
- template<typename T, typename B=typename std::remove_extent<T>::type>
- inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
- : _mType(this->GetType((B*)nullptr)),
- _mExtent(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mFieldId(offset) {}
-*/
-
/// \deprecated
template<typename T, typename S, class B=typename std::remove_extent<T>::type>
inline C2FieldDescriptor(S*, T S::* field, const char *name)
diff --git a/media/codec2/hidl/1.0/utils/InputSurface.cpp b/media/codec2/hidl/1.0/utils/InputSurface.cpp
index 2cbe64b..85c44c3 100644
--- a/media/codec2/hidl/1.0/utils/InputSurface.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurface.cpp
@@ -45,7 +45,7 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mEos, C2_NAME_INPUT_SURFACE_EOS_TUNING)
+ DefineParam(mEos, C2_PARAMKEY_INPUT_SURFACE_EOS)
.withDefault(new C2InputSurfaceEosTuning(false))
.withFields({C2F(mEos, value).oneOf({true, false})})
.withSetter(EosSetter)
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index 1024f50..c9932ef 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -124,7 +124,7 @@
}
// TODO: read settings properly from the interface
- C2VideoSizeStreamTuning::input inputSize;
+ C2StreamPictureSizeInfo::input inputSize;
C2StreamUsageTuning::input usage;
c2_status_t c2Status = queryFromSink({ &inputSize, &usage },
{},
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 31da111..1f36270 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -93,14 +93,14 @@
std::vector<std::unique_ptr<C2SettingResult>> failures;
for (size_t i = 0; i < updates.size(); ++i) {
C2Param* param = updates[i].get();
- if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
+ if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
csd = true;
} else if ((param->index() ==
C2StreamSampleRateInfo::output::PARAM_TYPE) ||
(param->index() ==
C2StreamChannelCountInfo::output::PARAM_TYPE) ||
(param->index() ==
- C2VideoSizeStreamInfo::output::PARAM_TYPE)) {
+ C2StreamPictureSizeInfo::output::PARAM_TYPE)) {
configParam.push_back(param);
}
}
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
index 95d1b72..7db41c0 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
@@ -229,7 +229,7 @@
// Set Default config param.
bool Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
std::vector<std::unique_ptr<C2SettingResult>> failures;
- C2VideoSizeStreamTuning::input inputSize(0u, nWidth, nHeight);
+ C2StreamPictureSizeInfo::input inputSize(0u, nWidth, nHeight);
std::vector<C2Param*> configParam{&inputSize};
c2_status_t status =
mComponent->config(configParam, C2_DONT_BLOCK, &failures);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index ff2419d..7a444a3 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -186,7 +186,7 @@
* MediaCodec behavior.
*/
virtual status_t registerCsd(
- const C2StreamCsdInfo::output * /* csd */,
+ const C2StreamInitDataInfo::output * /* csd */,
size_t * /* index */,
sp<MediaCodecBuffer> * /* clientBuffer */) = 0;
@@ -1187,7 +1187,7 @@
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> c2Buffer;
@@ -1286,7 +1286,7 @@
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> newBuffer = new LocalLinearBuffer(
@@ -1592,6 +1592,7 @@
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
mInputMetEos(false) {
+ mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
buffers->reset(new DummyInputBuffers(""));
}
@@ -2153,7 +2154,7 @@
1 << C2PlatformAllocatorStore::BUFFERQUEUE);
if (inputFormat != nullptr) {
- bool graphic = (iStreamFormat.value == C2FormatVideo);
+ bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
std::shared_ptr<C2BlockPool> pool;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -2269,12 +2270,16 @@
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
+ if (outputSurface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
outputGeneration = output->generation;
}
- bool graphic = (oStreamFormat.value == C2FormatVideo);
+ bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
C2BlockPool::local_id_t outputPoolId_;
{
@@ -2447,7 +2452,7 @@
return OK;
}
- C2StreamFormatConfig::output oStreamFormat(0u);
+ C2StreamBufferTypeSetting::output oStreamFormat(0u);
c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
if (err != C2_OK) {
return UNKNOWN_ERROR;
@@ -2638,6 +2643,11 @@
mReorderStash.lock()->setDepth(reorderDepth.value);
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
+ if (output->surface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
} else {
ALOGD("[%s] onWorkDone: failed to read reorder depth", mName);
}
@@ -2734,7 +2744,7 @@
// TODO: properly translate these to metadata
switch (info->coreIndex().coreIndex()) {
case C2StreamPictureTypeMaskInfo::CORE_INDEX:
- if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2PictureTypeKeyFrame) {
+ if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2Config::SYNC_FRAME) {
flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
}
break;
@@ -2813,7 +2823,6 @@
sp<IGraphicBufferProducer> producer;
if (newSurface) {
newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- newSurface->setMaxDequeuedBufferCount(mNumOutputSlots + kRenderingDepth);
producer = newSurface->getIGraphicBufferProducer();
producer->setGenerationNumber(generation);
} else {
@@ -2841,6 +2850,7 @@
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ newSurface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
output->surface = newSurface;
output->generation = generation;
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 9ce886a..1ea29b4 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -250,6 +250,7 @@
struct OutputSurface {
sp<Surface> surface;
uint32_t generation;
+ int maxDequeueBuffers;
};
Mutexed<OutputSurface> mOutputSurface;
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 13b63c9..0fd5731 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -224,6 +224,7 @@
mInitCheck = BAD_VALUE;
return;
}
+ memset(mediaImage, 0, sizeof(*mediaImage));
mAllocatedDepth = layout.planes[0].allocatedDepth;
uint32_t bitDepth = layout.planes[0].bitDepth;
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 0a6a717..6da131f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -101,7 +101,7 @@
{ C2Color::MATRIX_BT709, ColorAspects::MatrixBT709_5 },
{ C2Color::MATRIX_FCC47_73_682, ColorAspects::MatrixBT470_6M },
{ C2Color::MATRIX_BT601, ColorAspects::MatrixBT601_6 },
- { C2Color::MATRIX_SMPTE240M, ColorAspects::MatrixSMPTE240M },
+ { C2Color::MATRIX_240M, ColorAspects::MatrixSMPTE240M },
{ C2Color::MATRIX_BT2020, ColorAspects::MatrixBT2020 },
{ C2Color::MATRIX_BT2020_CONSTANT, ColorAspects::MatrixBT2020Constant },
{ C2Color::MATRIX_OTHER, ColorAspects::MatrixOther },
@@ -855,19 +855,19 @@
switch (primaries) {
case C2Color::PRIMARIES_BT601_525:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_525;
break;
case C2Color::PRIMARIES_BT601_625:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_625;
break;
case C2Color::PRIMARIES_BT2020:
- *dataSpace |= (matrix == C2Color::MATRIX_BT2020CONSTANT
+ *dataSpace |= (matrix == C2Color::MATRIX_BT2020_CONSTANT
? HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE
: HAL_DATASPACE_STANDARD_BT2020);
break;
diff --git a/media/codec2/tests/C2ComponentInterface_test.cpp b/media/codec2/tests/C2ComponentInterface_test.cpp
index e907964..67f733d 100644
--- a/media/codec2/tests/C2ComponentInterface_test.cpp
+++ b/media/codec2/tests/C2ComponentInterface_test.cpp
@@ -182,9 +182,9 @@
return std::make_unique<T>();
}
-template <> std::unique_ptr<C2PortMimeConfig::input> makeParam() {
+template <> std::unique_ptr<C2PortMediaTypeSetting::input> makeParam() {
// TODO(hiroh): Set more precise length.
- return C2PortMimeConfig::input::AllocUnique(100);
+ return C2PortMediaTypeSetting::input::AllocUnique(100);
}
#define TRACED_FAILURE(func) \
@@ -323,17 +323,17 @@
EXPECT_EQ(C2SettingResult::BAD_VALUE, failures[0]->failure);
}
-// There is only used enum type for the field type, that is C2DomainKind.
+// There is only used enum type for the field type, that is C2Component::domain_t.
// If another field type is added, it is necessary to add function for that.
template <>
void C2CompIntfTest::getTestValues(
const C2FieldSupportedValues &validValueInfos,
- std::vector<C2DomainKind> *const validValues,
- std::vector<C2DomainKind> *const invalidValues) {
+ std::vector<C2Component::domain_t> *const validValues,
+ std::vector<C2Component::domain_t> *const invalidValues) {
UNUSED(validValueInfos);
- validValues->emplace_back(C2DomainVideo);
- validValues->emplace_back(C2DomainAudio);
- validValues->emplace_back(C2DomainOther);
+ validValues->emplace_back(C2Component::DOMAIN_VIDEO);
+ validValues->emplace_back(C2Component::DOMAIN_AUDIO);
+ validValues->emplace_back(C2Component::DOMAIN_OTHER);
// There is no invalid value.
UNUSED(invalidValues);
@@ -634,20 +634,20 @@
std::vector<std::shared_ptr<C2ParamDescriptor>> supportedParams;
ASSERT_EQ(C2_OK, mIntf->querySupportedParams_nb(&supportedParams));
- EACH_TEST_SELF(C2ComponentLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentTemporalInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ActualPipelineDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentAttributesSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentDomainInfo, TEST_ENUM_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentDomainSetting, TEST_ENUM_WRITABLE_FIELD);
// TODO(hiroh): Support parameters based on uint32_t[] and char[].
- // EACH_TEST_INPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
- // EACH_TEST_OUTPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_INPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_OUTPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_INPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_OUTPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
@@ -656,10 +656,10 @@
// EACH_TEST_SELF(C2ReadOnlyParamsInfo, TEST_U32ARRAY_WRITABLE_FIELD);
// EACH_TEST_SELF(C2RequestedInfosInfo, TEST_U32ARRAY_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_INPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_OUTPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
diff --git a/media/codec2/tests/C2SampleComponent_test.cpp b/media/codec2/tests/C2SampleComponent_test.cpp
index cd354ad..9956834 100644
--- a/media/codec2/tests/C2SampleComponent_test.cpp
+++ b/media/codec2/tests/C2SampleComponent_test.cpp
@@ -152,7 +152,7 @@
std::unordered_map<uint32_t, C2Param &> mMyParams;
- C2ComponentDomainInfo mDomainInfo;
+ C2ComponentDomainSetting mDomainInfo;
MyComponentInstance() {
mMyParams.insert({mDomainInfo.index(), mDomainInfo});
@@ -187,12 +187,12 @@
c2_blocking_t mayBlock) const override {
(void)mayBlock;
for (C2FieldSupportedValuesQuery &query : fields) {
- if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::value)) {
+ if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainSetting::value)) {
query.values = C2FieldSupportedValues(
false /* flag */,
&mDomainInfo.value
//,
- //{(int32_t)C2DomainVideo}
+ //{(int32_t)C2Component::DOMAIN_VIDEO}
);
query.status = C2_OK;
} else {
@@ -391,20 +391,20 @@
}
TEST_F(C2SampleComponentTest, ReflectorTest) {
- C2ComponentDomainInfo domainInfo;
+ C2ComponentDomainSetting domainInfo;
std::shared_ptr<MyComponentInstance> myComp(new MyComponentInstance);
std::shared_ptr<C2ComponentInterface> comp = myComp;
std::unique_ptr<C2StructDescriptor> desc{
- myComp->getParamReflector()->describe(C2ComponentDomainInfo::CORE_INDEX)};
+ myComp->getParamReflector()->describe(C2ComponentDomainSetting::CORE_INDEX)};
dumpStruct(*desc);
std::vector<C2FieldSupportedValuesQuery> query = {
- { C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ { C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT },
- C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT),
- C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value)),
+ C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value)),
};
EXPECT_EQ(C2_OK, comp->querySupportedValues_vb(query, C2_DONT_BLOCK));
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index f07d9b0..e075849 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -712,8 +712,8 @@
}
uint32_t mediaTypeIndex =
- traits->kind == C2Component::KIND_ENCODER ? C2PortMimeConfig::output::PARAM_TYPE
- : C2PortMimeConfig::input::PARAM_TYPE;
+ traits->kind == C2Component::KIND_ENCODER ? C2PortMediaTypeSetting::output::PARAM_TYPE
+ : C2PortMediaTypeSetting::input::PARAM_TYPE;
std::vector<std::unique_ptr<C2Param>> params;
res = intf->query_vb({}, { mediaTypeIndex }, C2_MAY_BLOCK, ¶ms);
if (res != C2_OK) {
@@ -724,7 +724,7 @@
ALOGD("failed to query interface: unexpected vector size: %zu", params.size());
return mInit;
}
- C2PortMimeConfig *mediaTypeConfig = C2PortMimeConfig::From(params[0].get());
+ C2PortMediaTypeSetting *mediaTypeConfig = C2PortMediaTypeSetting::From(params[0].get());
if (mediaTypeConfig == nullptr) {
ALOGD("failed to query media type");
return mInit;
@@ -856,6 +856,7 @@
emplace("libcodec2_soft_h263dec.so");
emplace("libcodec2_soft_h263enc.so");
emplace("libcodec2_soft_hevcdec.so");
+ emplace("libcodec2_soft_hevcenc.so");
emplace("libcodec2_soft_mp3dec.so");
emplace("libcodec2_soft_mpeg2dec.so");
emplace("libcodec2_soft_mpeg4dec.so");
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 0f327cf..1ee6930 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -43,7 +43,7 @@
strcpy(mAttributes.tags, "");
}
mStreamType = static_cast<audio_stream_type_t>(parcel->readInt32());
- mGroupId = parcel->readUint32();
+ mGroupId = static_cast<volume_group_t>(parcel->readUint32());
return NO_ERROR;
}
@@ -60,7 +60,7 @@
parcel->writeUtf8AsUtf16(mAttributes.tags);
}
parcel->writeInt32(static_cast<int32_t>(mStreamType));
- parcel->writeUint32(mGroupId);
+ parcel->writeUint32(static_cast<uint32_t>(mGroupId));
return NO_ERROR;
}
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index d9f6e36..1bce16f 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -1167,8 +1167,6 @@
case SET_STREAM_VOLUME:
case REGISTER_POLICY_MIXES:
case SET_MASTER_MONO:
- case START_AUDIO_SOURCE:
- case STOP_AUDIO_SOURCE:
case GET_SURROUND_FORMATS:
case SET_SURROUND_FORMAT_ENABLED:
case SET_ASSISTANT_UID:
diff --git a/media/libaudioclient/include/media/AudioAttributes.h b/media/libaudioclient/include/media/AudioAttributes.h
index edf26eb..0a35e9e 100644
--- a/media/libaudioclient/include/media/AudioAttributes.h
+++ b/media/libaudioclient/include/media/AudioAttributes.h
@@ -17,6 +17,7 @@
#pragma once
+#include <media/AudioCommonTypes.h>
#include <system/audio.h>
#include <system/audio_policy.h>
#include <binder/Parcelable.h>
@@ -28,7 +29,7 @@
public:
AudioAttributes() = default;
AudioAttributes(const audio_attributes_t &attributes) : mAttributes(attributes) {}
- AudioAttributes(uint32_t groupId,
+ AudioAttributes(volume_group_t groupId,
audio_stream_type_t stream,
const audio_attributes_t &attributes) :
mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {}
@@ -39,7 +40,7 @@
status_t writeToParcel(Parcel *parcel) const override;
audio_stream_type_t getStreamType() const { return mStreamType; }
- uint32_t getGroupId() const { return mGroupId; }
+ volume_group_t getGroupId() const { return mGroupId; }
private:
audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -53,7 +54,7 @@
* @brief mGroupId: for future volume management, define groups within a strategy that follows
* the same curves of volume (extension of stream types to manage volume)
*/
- uint32_t mGroupId = 0;
+ volume_group_t mGroupId = VOLUME_GROUP_NONE;
};
} // namespace android
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5188da1..8e446ea 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -27,7 +27,7 @@
const product_strategy_t PRODUCT_STRATEGY_NONE = static_cast<product_strategy_t>(-1);
using AttributesVector = std::vector<audio_attributes_t>;
-using StreamTypes = std::vector<audio_stream_type_t>;
+using StreamTypeVector = std::vector<audio_stream_type_t>;
constexpr bool operator==(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
{
@@ -38,5 +38,9 @@
{
return !(lhs==rhs);
}
+
+enum volume_group_t : uint32_t;
+static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
+
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index a1e869f..b25f82e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -123,15 +123,13 @@
status_t DeviceHalHidl::setMasterVolume(float volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+ return processReturn("setMasterVolume", mDevice->setMasterVolume(volume));
}
status_t DeviceHalHidl::getMasterVolume(float *volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
+ Return<void> ret = mDevice->getMasterVolume(
[&](Result r, float v) {
retval = r;
if (retval == Result::OK) {
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index fe47d0b..416bdaa 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -15,6 +15,7 @@
*/
#include <assert.h>
#include <inttypes.h>
+#include <iterator>
#include <math.h>
#include <stdlib.h>
#include <string.h>
@@ -102,20 +103,15 @@
printf("\n -M");
printf("\n Mono mode (force all input audio channels to be identical)");
printf("\n -basslvl:<effect_level>");
- printf("\n A value that ranges between 0 - 15 default 0");
+ printf("\n A value that ranges between %d - %d default 0", LVM_BE_MIN_EFFECTLEVEL,
+ LVM_BE_MAX_EFFECTLEVEL);
printf("\n");
printf("\n -eqPreset:<preset Value>");
- printf("\n 0 - Normal");
- printf("\n 1 - Classical");
- printf("\n 2 - Dance");
- printf("\n 3 - Flat");
- printf("\n 4 - Folk");
- printf("\n 5 - Heavy Metal");
- printf("\n 6 - Hip Hop");
- printf("\n 7 - Jazz");
- printf("\n 8 - Pop");
- printf("\n 9 - Rock");
- printf("\n default 0");
+ const size_t numPresetLvls = std::size(gEqualizerPresets);
+ for (size_t i = 0; i < numPresetLvls; ++i) {
+ printf("\n %zu - %s", i, gEqualizerPresets[i].name);
+ }
+ printf("\n default - 0");
printf("\n -bE ");
printf("\n Enable Dynamic Bass Enhancement");
printf("\n");
@@ -619,7 +615,7 @@
std::fill(fp + 1, fp + channelCount, *fp); // replicate ch 0
}
}
-#if 1
+#ifndef BYPASS_EXEC
errCode = lvmExecute(floatIn.data(), floatOut.data(), pContext, plvmConfigParams);
if (errCode) {
printf("\nError: lvmExecute returned with %d\n", errCode);
@@ -689,7 +685,7 @@
lvmConfigParams.monoMode = true;
} else if (!strncmp(argv[i], "-basslvl:", 9)) {
const int bassEffectLevel = atoi(argv[i] + 9);
- if (bassEffectLevel > 15 || bassEffectLevel < 0) {
+ if (bassEffectLevel > LVM_BE_MAX_EFFECTLEVEL || bassEffectLevel < LVM_BE_MIN_EFFECTLEVEL) {
printf("Error: Unsupported Bass Effect Level : %d\n",
bassEffectLevel);
printUsage();
@@ -698,7 +694,8 @@
lvmConfigParams.bassEffectLevel = bassEffectLevel;
} else if (!strncmp(argv[i], "-eqPreset:", 10)) {
const int eqPresetLevel = atoi(argv[i] + 10);
- if (eqPresetLevel > 9 || eqPresetLevel < 0) {
+ const int numPresetLvls = std::size(gEqualizerPresets);
+ if (eqPresetLevel >= numPresetLvls || eqPresetLevel < 0) {
printf("Error: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
printUsage();
return -1;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 998f096..9d3338b 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -171,11 +171,7 @@
}
struct CodecObserver : public BnOMXObserver {
- CodecObserver() {}
-
- void setNotificationMessage(const sp<AMessage> &msg) {
- mNotify = msg;
- }
+ explicit CodecObserver(const sp<AMessage> &msg) : mNotify(msg) {}
// from IOMXObserver
virtual void onMessages(const std::list<omx_message> &messages) {
@@ -251,7 +247,7 @@
virtual ~CodecObserver() {}
private:
- sp<AMessage> mNotify;
+ const sp<AMessage> mNotify;
DISALLOW_EVIL_CONSTRUCTORS(CodecObserver);
};
@@ -1248,6 +1244,7 @@
info.mRenderInfo = NULL;
info.mGraphicBuffer = graphicBuffer;
info.mNewGraphicBuffer = false;
+ info.mDequeuedAt = mDequeueCounter;
// TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
// OMX doesn't use the shared memory buffer, but some code still
@@ -6629,7 +6626,8 @@
CHECK(mCodec->mOMXNode == NULL);
- sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
+ sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
+ notify->setInt32("generation", mCodec->mNodeGeneration + 1);
sp<RefBase> obj;
CHECK(msg->findObject("codecInfo", &obj));
@@ -6644,7 +6642,7 @@
AString componentName;
CHECK(msg->findString("componentName", &componentName));
- sp<CodecObserver> observer = new CodecObserver;
+ sp<CodecObserver> observer = new CodecObserver(notify);
sp<IOMX> omx;
sp<IOMXNode> omxNode;
@@ -6675,9 +6673,7 @@
mDeathNotifier.clear();
}
- notify = new AMessage(kWhatOMXMessageList, mCodec);
- notify->setInt32("generation", ++mCodec->mNodeGeneration);
- observer->setNotificationMessage(notify);
+ ++mCodec->mNodeGeneration;
mCodec->mComponentName = componentName;
mCodec->mRenderTracker.setComponentName(componentName);
@@ -8167,6 +8163,10 @@
OMX_CommandPortEnable, kPortIndexOutput);
}
+ // Clear the RenderQueue in which queued GraphicBuffers hold the
+ // actual buffer references in order to free them early.
+ mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
+
if (err == OK) {
err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
@@ -8572,7 +8572,7 @@
}
sp<IOMX> omx = client.interface();
- sp<CodecObserver> observer = new CodecObserver;
+ sp<CodecObserver> observer = new CodecObserver(new AMessage);
sp<IOMXNode> omxNode;
err = omx->allocateNode(name, observer, &omxNode);
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 42b98b1..18a6bd8 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -431,7 +431,7 @@
|| !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
if (frameTimeUs < 0) {
- int64_t thumbNailTime;
+ int64_t thumbNailTime = -1ll;
if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
|| thumbNailTime < 0) {
thumbNailTime = 0;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index f34d54c..fa3d372 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -165,6 +165,9 @@
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (!meta) {
+ continue;
+ }
ALOGV("getting track %zu of %zu, meta=%s", i, n, meta->toString().c_str());
const char *mime;
@@ -186,6 +189,9 @@
}
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (!trackMeta) {
+ return NULL;
+ }
if (metaOnly) {
return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
@@ -280,6 +286,9 @@
size_t i;
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (!meta) {
+ continue;
+ }
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -296,6 +305,9 @@
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
i, MediaExtractor::kIncludeExtensiveMetaData);
+ if (!trackMeta) {
+ return UNKNOWN_ERROR;
+ }
if (metaOnly) {
if (outFrame != NULL) {
@@ -529,6 +541,9 @@
String8 timedTextLang;
for (size_t i = 0; i < numTracks; ++i) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (!trackMeta) {
+ continue;
+ }
int64_t durationUs;
if (trackMeta->findInt64(kKeyDuration, &durationUs)) {
@@ -667,8 +682,9 @@
!strcasecmp(fileMIME, "video/x-matroska")) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(0);
const char *trackMIME;
- CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
-
+ if (trackMeta != nullptr) {
+ CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
+ }
if (!strncasecmp("audio/", trackMIME, 6)) {
// The matroska file only contains a single audio track,
// rewrite its mime type.
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 09424b8..16b3319 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -588,6 +588,7 @@
{ "genre", kKeyGenre },
{ "location", kKeyLocation },
{ "lyricist", kKeyWriter },
+ { "manufacturer", kKeyManufacturer },
{ "title", kKeyTitle },
{ "year", kKeyYear },
}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index 8a86a0d..da86758 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1178,6 +1178,8 @@
int i_target_loudness;
unsigned int i_sbr_mode;
int i;
+ int ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
#ifdef ENABLE_MPEG_D_DRC
{
@@ -1228,6 +1230,29 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+
+ if (pv_alloc_ptr == NULL) {
+ ALOGE("Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 5c2d96d..e20174f 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -107,6 +107,15 @@
<Limit name="bitrate" range="1-12000000" />
<Feature name="intra-refresh" />
</MediaCodec>
+ <MediaCodec name="c2.android.hevc.encoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="320x128" max="512x512" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-4096" /> <!-- max 512x512 -->
+ <Limit name="blocks-per-second" range="1-122880" />
+ <Limit name="bitrate" range="1-10000000" />
+ </MediaCodec>
<MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
<Alias name="OMX.google.mpeg4.encoder" />
<!-- profiles and levels: ProfileCore : Level2 -->
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 2ecfa43..5e7f90a 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1234,7 +1234,7 @@
const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
ssize_t index = mFetcherInfos.indexOfKey(uri);
if (index < 0) {
- ALOGE("did not find fetcher for uri: %s", uri.c_str());
+ ALOGE("did not find fetcher for uri: %s", uriDebugString(uri).c_str());
return false;
}
@@ -2005,7 +2005,7 @@
if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
ALOGW("swapping stream type %d %s to empty stream",
- stream, mStreams[idx].mUri.c_str());
+ stream, uriDebugString(mStreams[idx].mUri).c_str());
}
mStreams[idx].mUri = mStreams[idx].mNewUri;
mStreams[idx].mNewUri.clear();
@@ -2033,7 +2033,7 @@
CHECK(idx >= 0);
if (mStreams[idx].mNewUri.empty()) {
ALOGW("swapping extra stream type %d %s to empty stream",
- stream, mStreams[idx].mUri.c_str());
+ stream, uriDebugString(mStreams[idx].mUri).c_str());
}
mStreams[idx].mUri = mStreams[idx].mNewUri;
mStreams[idx].mNewUri.clear();
@@ -2138,7 +2138,7 @@
ALOGV("stopping newUri = %s", newUri.c_str());
ssize_t index = mFetcherInfos.indexOfKey(newUri);
if (index < 0) {
- ALOGE("did not find fetcher for newUri: %s", newUri.c_str());
+ ALOGE("did not find fetcher for newUri: %s", uriDebugString(newUri).c_str());
continue;
}
FetcherInfo &info = mFetcherInfos.editValueAt(index);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 4392799..b2361b8 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -1205,8 +1205,7 @@
if (val.size() < 2
|| val.c_str()[0] != '"'
|| val.c_str()[val.size() - 1] != '"') {
- ALOGE("Expected quoted string for URI, got '%s' instead.",
- val.c_str());
+ ALOGE("Expected quoted string for URI.");
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 562c625..d153598 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -365,10 +365,10 @@
if (err == ERROR_NOT_CONNECTED) {
return ERROR_NOT_CONNECTED;
} else if (err < 0) {
- ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
+ ALOGE("failed to fetch cipher key from '%s'.", uriDebugString(keyURI).c_str());
return ERROR_IO;
} else if (key->size() != 16) {
- ALOGE("key file '%s' wasn't 16 bytes in size.", keyURI.c_str());
+ ALOGE("key file '%s' wasn't 16 bytes in size.", uriDebugString(keyURI).c_str());
return ERROR_MALFORMED;
}
@@ -1366,7 +1366,7 @@
}
if (bytesRead < 0) {
status_t err = bytesRead;
- ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+ ALOGE("failed to fetch .ts segment at url '%s'", uriDebugString(uri).c_str());
notifyError(err);
return;
}
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 437bdb7..a0407af 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -144,6 +144,9 @@
// The language code for this media
kKeyMediaLanguage = 'lang', // cstring
+ // The manufacturer code for this media
+ kKeyManufacturer = 'manu', // cstring
+
// To store the timed text format data
kKeyTextFormatData = 'text', // raw data
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 20cb415..789e62a 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -255,7 +255,7 @@
struct hostent *ent = gethostbyname(host.c_str());
if (ent == NULL) {
- ALOGE("Unknown host %s", host.c_str());
+ ALOGE("Unknown host %s", uriDebugString(host).c_str());
reply->setInt32("result", -ENOENT);
reply->post();
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index c581e9d..9263565 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -80,7 +80,7 @@
return false;
}
- ALOGI("%s", line.c_str());
+ ALOGV("%s", line.c_str());
switch (line.c_str()[0]) {
case 'v':
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 8454ca1..b4515e4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -345,8 +345,7 @@
struct hostent *ent = gethostbyname(mSessionHost.c_str());
if (ent == NULL) {
- ALOGE("Failed to look up address of session host '%s'",
- mSessionHost.c_str());
+ ALOGE("Failed to look up address of session host");
return false;
}
@@ -531,7 +530,7 @@
mSessionURL.append(AStringPrintf("%u", port));
mSessionURL.append(path);
- ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
+ ALOGV("rewritten session url: '%s'", mSessionURL.c_str());
}
sp<AMessage> reply = new AMessage('conn', this);
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 339f622..0b274a7 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -76,7 +76,7 @@
"libbinder",
"libmedia",
"libmedia_omx",
- "libmedia_jni",
+ "libmedia_jni_utils",
"libmediadrm",
"libstagefright",
"libstagefright_foundation",
@@ -84,7 +84,7 @@
"liblog",
"libutils",
"libcutils",
- "libandroid",
+ "libnativewindow",
"libandroid_runtime",
"libbinder",
"libhidlbase",
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 20b1667..1883f63 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -23,7 +23,7 @@
#include "NdkImageReaderPriv.h"
#include <android_media_Utils.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <utils/Log.h>
#include "hardware/camera3.h"
@@ -190,7 +190,7 @@
auto lockedBuffer = std::make_unique<CpuConsumer::LockedBuffer>();
- uint64_t grallocUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ uint64_t grallocUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
status_t ret =
lockImageFromBuffer(mBuffer, grallocUsage, mBuffer->mFence->dup(), lockedBuffer.get());
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index c3eb437..b929f7f 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -26,7 +26,7 @@
#include <utils/Log.h>
#include <android_media_Utils.h>
#include <android_runtime/android_view_Surface.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <grallocusage/GrallocUsageConversion.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
@@ -272,7 +272,7 @@
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
- mHalUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index cd8ecb5..26a6238 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -342,6 +342,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
EXPORT const char* AMEDIAFORMAT_KEY_LOOP = "loop";
EXPORT const char* AMEDIAFORMAT_KEY_LYRICIST = "lyricist";
+EXPORT const char* AMEDIAFORMAT_KEY_MANUFACTURER = "manufacturer";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE = "max-bitrate";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index cc1d9ef..ddf5291 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -214,6 +214,7 @@
extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LOOP __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LYRICIST __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MANUFACTURER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 171167d..7bdd3ad 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -105,6 +105,7 @@
AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
AMEDIAFORMAT_KEY_LOOP; # var introduced=29
AMEDIAFORMAT_KEY_LYRICIST; # var introduced=29
+ AMEDIAFORMAT_KEY_MANUFACTURER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_BIT_RATE; # var introduced=29
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 1c54aec..599c446 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -85,7 +85,7 @@
return false;
}
} else {
- if (appOps.noteOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+ if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
ALOGE("Request denied by app op: %d", op);
return false;
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a8c4bd1..ff33957 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -872,6 +872,22 @@
dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
}
+ if (mLastIoBeginNs > 0) { // MMAP may not set this
+ dprintf(fd, " Last %s occurred (msecs): %lld\n",
+ isOutput() ? "write" : "read",
+ (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
+ }
+
+ if (mProcessTimeMs.getN() > 0) {
+ dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
+ }
+
+ if (mIoJitterMs.getN() > 0) {
+ dprintf(fd, " Hal %s jitter ms stats: %s\n",
+ isOutput() ? "write" : "read",
+ mIoJitterMs.toString().c_str());
+ }
+
if (locked) {
mLock.unlock();
}
@@ -1704,7 +1720,7 @@
// mStreamTypes[] initialized in constructor body
mTracks(type == MIXER),
mOutput(output),
- mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+ mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
@@ -1857,8 +1873,6 @@
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
}
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
- dprintf(fd, " Last write occurred (msecs): %llu\n",
- (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -3189,8 +3203,8 @@
Vector< sp<Track> > tracksToRemove;
mStandbyTimeNs = systemTime();
- nsecs_t lastWriteFinished = -1; // time last server write completed
- int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
+ int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
+ int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
// MIXER
nsecs_t lastWarning = 0;
@@ -3236,7 +3250,8 @@
}
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- while (!exitPending())
+ // loopCount is used for statistics and diagnostics.
+ for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
// Log merge requests are performed during AudioFlinger binder transactions, but
// that does not cover audio playback. It's requested here for that reason.
@@ -3394,11 +3409,11 @@
// use the time before we called the HAL write - it is a bit more accurate
// to when the server last read data than the current time here.
//
- // If we haven't written anything, mLastWriteTime will be -1
+ // If we haven't written anything, mLastIoBeginNs will be -1
// and we use systemTime().
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
- ? systemTime() : mLastWriteTime;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
+ ? systemTime() : mLastIoBeginNs;
}
for (const sp<Track> &t : mActiveTracks) {
@@ -3635,43 +3650,68 @@
// mSleepTimeUs == 0 means we must write to audio hardware
if (mSleepTimeUs == 0) {
ssize_t ret = 0;
- // We save lastWriteFinished here, as previousLastWriteFinished,
- // for throttling. On thread start, previousLastWriteFinished will be
- // set to -1, which properly results in no throttling after the first write.
- nsecs_t previousLastWriteFinished = lastWriteFinished;
- nsecs_t delta = 0;
+ // writePeriodNs is updated >= 0 when ret > 0.
+ int64_t writePeriodNs = -1;
if (mBytesRemaining) {
// FIXME rewrite to reduce number of system calls
- mLastWriteTime = systemTime(); // also used for dumpsys
+ const int64_t lastIoBeginNs = systemTime();
ret = threadLoop_write();
- lastWriteFinished = systemTime();
- delta = lastWriteFinished - mLastWriteTime;
+ const int64_t lastIoEndNs = systemTime();
if (ret < 0) {
mBytesRemaining = 0;
- } else {
+ } else if (ret > 0) {
mBytesWritten += ret;
mBytesRemaining -= ret;
- mFramesWritten += ret / mFrameSize;
+ const int64_t frames = ret / mFrameSize;
+ mFramesWritten += frames;
+
+ writePeriodNs = lastIoEndNs - mLastIoEndNs;
+ // process information relating to write time.
+ if (audio_has_proportional_frames(mFormat)) {
+ // we are in a continuous mixing cycle
+ if (mMixerStatus == MIXER_TRACKS_READY &&
+ loopCount == lastLoopCountWritten + 1) {
+
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {frames, writePeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs =
+ (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+
+ // write blocked detection
+ const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
+ if (mType == MIXER && deltaWriteNs > maxPeriod) {
+ mNumDelayedWrites++;
+ if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
+ ATRACE_NAME("underrun");
+ ALOGW("write blocked for %lld msecs, "
+ "%d delayed writes, thread %d",
+ (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
+ mNumDelayedWrites, mId);
+ lastWarning = lastIoEndNs;
+ }
+ }
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountWritten = loopCount;
}
} else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
(mMixerStatus == MIXER_DRAIN_ALL)) {
threadLoop_drain();
}
if (mType == MIXER && !mStandby) {
- // write blocked detection
- if (delta > maxPeriod) {
- mNumDelayedWrites++;
- if ((lastWriteFinished - lastWarning) > kWarningThrottleNs) {
- ATRACE_NAME("underrun");
- ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
- lastWarning = lastWriteFinished;
- }
- }
if (mThreadThrottle
&& mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
- && ret > 0) { // we wrote something
+ && writePeriodNs > 0) { // we have write period info
// Limit MixerThread data processing to no more than twice the
// expected processing rate.
//
@@ -3690,12 +3730,9 @@
// 2. threadLoop_mix (significant for heavy mixing, especially
// on low tier processors)
- // it's OK if deltaMs (and deltaNs) is an overestimate.
- nsecs_t deltaNs;
- // deltaNs = lastWriteFinished - previousLastWriteFinished;
- __builtin_sub_overflow(
- lastWriteFinished,previousLastWriteFinished, &deltaNs);
- const int32_t deltaMs = deltaNs / 1000000;
+ // it's OK if deltaMs is an overestimate.
+
+ const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
@@ -3708,7 +3745,8 @@
mThreadThrottleTimeMs += throttleMs;
// Throttle must be attributed to the previous mixer loop's write time
// to allow back-to-back throttling.
- lastWriteFinished += throttleMs * 1000000;
+ // This also ensures proper timing statistics.
+ mLastIoEndNs = systemTime(); // we fetch the write end time again.
} else {
uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
if (diff > 0) {
@@ -6751,8 +6789,10 @@
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
+ int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
+
// loop while there is work to do
- for (;;) {
+ for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
Vector< sp<EffectChain> > effectChains;
// activeTracks accumulates a copy of a subset of mActiveTracks
@@ -6951,6 +6991,7 @@
int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
ssize_t framesRead;
+ const int64_t lastIoBeginNs = systemTime(); // start IO timing
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
@@ -7008,10 +7049,12 @@
}
}
+ const int64_t lastIoEndNs = systemTime(); // end IO timing
+
// Update server timestamp with server stats
// systemTime() is optional if the hardware supports timestamps.
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
// Update server timestamp with kernel stats
if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
@@ -7060,6 +7103,24 @@
ALOG_ASSERT(framesRead > 0);
mFramesRead += framesRead;
+ if (audio_has_proportional_frames(mFormat)
+ && loopCount == lastLoopCountRead + 1) {
+ const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {framesRead, readPeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountRead = loopCount;
+
#ifdef TEE_SINK
(void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
#endif
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1131b26..4968829 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -512,6 +512,15 @@
TimestampVerifier< // For timestamp statistics.
int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
audio_devices_t mTimestampCorrectedDevices = AUDIO_DEVICE_NONE;
+
+ // ThreadLoop statistics per iteration.
+ int64_t mLastIoBeginNs = -1;
+ int64_t mLastIoEndNs = -1;
+
+ // This should be read under ThreadBase lock (if not on the threadLoop thread).
+ audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
+ audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
+
bool mIsMsdDevice = false;
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it
@@ -1030,7 +1039,6 @@
float mMasterVolume;
std::atomic<float> mMasterBalance{};
audio_utils::Balance mBalance;
- nsecs_t mLastWriteTime;
int mNumWrites;
int mNumDelayedWrites;
bool mInWrite;
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 5ccc8fd..48b5271 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -20,6 +20,23 @@
#include <utils/Log.h>
#include <math.h>
+namespace android {
+
+/**
+ * VolumeSource is the discriminent for volume management on an output.
+ * It used to be the stream type by legacy, it may be host volume group or a volume curves if
+ * we allow to have more than one curve per volume group.
+ */
+enum VolumeSource : std::underlying_type<audio_stream_type_t>::type;
+static const VolumeSource VOLUME_SOURCE_NONE = static_cast<VolumeSource>(AUDIO_STREAM_DEFAULT);
+
+static inline VolumeSource streamToVolumeSource(audio_stream_type_t stream) {
+ return static_cast<VolumeSource>(stream);
+}
+
+
+} // namespace android
+
// Absolute min volume in dB (can be represented in single precision normal float value)
#define VOLUME_MIN_DB (-758)
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index e5ebab7..c9037a1 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -19,7 +19,6 @@
"src/Serializer.cpp",
"src/SoundTriggerSession.cpp",
"src/TypeConverter.cpp",
- "src/VolumeCurve.cpp",
],
shared_libs: [
"libcutils",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c84636e..cf9519b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -73,7 +73,7 @@
virtual void dump(String8 *dst, int spaces) const
{
- dst->appendFormat("%*s- ActivityCount: %d, StopTime: %" PRId64 " \n", spaces, "",
+ dst->appendFormat("%*s- ActivityCount: %d, StopTime: %" PRId64 ", ", spaces, "",
getActivityCount(), getStopTime());
}
private:
@@ -82,6 +82,37 @@
};
/**
+ * @brief VolumeActivity: it tracks the activity for volume policy (volume index, mute,
+ * memorize previous stop, and store mute if incompatible device with another strategy.
+ */
+class VolumeActivity : public ActivityTracking
+{
+public:
+ bool isMuted() const { return mMuteCount > 0; }
+ int getMuteCount() const { return mMuteCount; }
+ int incMuteCount() { return ++mMuteCount; }
+ int decMuteCount() { return mMuteCount > 0 ? --mMuteCount : -1; }
+
+ void dump(String8 *dst, int spaces) const override
+ {
+ ActivityTracking::dump(dst, spaces);
+ dst->appendFormat(", Volume: %.03f, MuteCount: %02d\n", mCurVolumeDb, mMuteCount);
+ }
+ void setVolume(float volume) { mCurVolumeDb = volume; }
+ float getVolume() const { return mCurVolumeDb; }
+
+private:
+ int mMuteCount = 0; /**< mute request counter */
+ float mCurVolumeDb = NAN; /**< current volume in dB. */
+};
+/**
+ * Note: volume activities shall be indexed by CurvesId if we want to allow multiple
+ * curves per volume group, inferring a mute management or volume balancing between HW and SW is
+ * done
+ */
+using VolumeActivities = std::map<VolumeSource, VolumeActivity>;
+
+/**
* @brief The Activity class: it tracks the activity for volume policy (volume index, mute,
* memorize previous stop, and store mute if incompatible device with another strategy.
* Having this class prevents from looping on all attributes (legacy streams) of the strategy
@@ -92,6 +123,10 @@
void setMutedByDevice( bool isMuted) { mIsMutedByDevice = isMuted; }
bool isMutedByDevice() const { return mIsMutedByDevice; }
+ void dump(String8 *dst, int spaces) const override {
+ ActivityTracking::dump(dst, spaces);
+ dst->appendFormat("\n");
+ }
private:
/**
* strategies muted because of incompatible device selection.
@@ -128,15 +163,6 @@
bool force);
/**
- * Changes the stream active count and mActiveClients only.
- * This does not change the client->active() state or the output descriptor's
- * global active count.
- */
- virtual void changeStreamActiveCount(const sp<TrackClientDescriptor>& client, int delta);
- uint32_t streamActiveCount(audio_stream_type_t stream) const
- { return mActiveCount[stream]; }
-
- /**
* @brief setStopTime set the stop time due to the client stoppage or a re routing of this
* client
* @param client to be considered
@@ -148,13 +174,61 @@
* Changes the client->active() state and the output descriptor's global active count,
* along with the stream active count and mActiveClients.
* The client must be previously added by the base class addClient().
+ * In case of duplicating thread, client shall be added on the duplicated thread, not on the
+ * involved outputs but setClientActive will be called on all output to track strategy and
+ * active client for a given output.
+ * Active ref count of the client will be incremented/decremented through setActive API
*/
- void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
+ virtual void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
- bool isActive(uint32_t inPastMs = 0) const;
- bool isStreamActive(audio_stream_type_t stream,
- uint32_t inPastMs = 0,
- nsecs_t sysTime = 0) const;
+ bool isActive(uint32_t inPastMs) const;
+ bool isActive(VolumeSource volumeSource = VOLUME_SOURCE_NONE,
+ uint32_t inPastMs = 0,
+ nsecs_t sysTime = 0) const;
+ bool isAnyActive(VolumeSource volumeSourceToIgnore) const;
+
+ std::vector<VolumeSource> getActiveVolumeSources() const {
+ std::vector<VolumeSource> activeList;
+ for (const auto &iter : mVolumeActivities) {
+ if (iter.second.isActive()) {
+ activeList.push_back(iter.first);
+ }
+ }
+ return activeList;
+ }
+ uint32_t getActivityCount(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).getActivityCount() : 0;
+ }
+ bool isMuted(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).isMuted() : false;
+ }
+ int getMuteCount(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).getMuteCount() : 0;
+ }
+ int incMuteCount(VolumeSource vs)
+ {
+ return mVolumeActivities[vs].incMuteCount();
+ }
+ int decMuteCount(VolumeSource vs)
+ {
+ return mVolumeActivities[vs].decMuteCount();
+ }
+ void setCurVolume(VolumeSource vs, float volume)
+ {
+ // Even if not activity for this group registered, need to create anyway
+ mVolumeActivities[vs].setVolume(volume);
+ }
+ float getCurVolume(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities) ?
+ mVolumeActivities.at(vs).getVolume() : NAN;
+ }
bool isStrategyActive(product_strategy_t ps, uint32_t inPastMs = 0, nsecs_t sysTime = 0) const
{
@@ -195,40 +269,36 @@
// it is possible that when a client is removed, we could remove its
// associated active count by calling changeStreamActiveCount(),
// but that would be hiding a problem, so we log fatal instead.
- auto it2 = mActiveClients.find(client);
- LOG_ALWAYS_FATAL_IF(it2 != mActiveClients.end(),
- "%s(%d) removing client portId %d which is active (count %zu)",
- __func__, mId, portId, it2->second);
+ auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+ LOG_ALWAYS_FATAL_IF(clientIter != mActiveClients.end(),
+ "%s(%d) removing client portId %d which is active (count %d)",
+ __func__, mId, portId, client->getActivityCount());
ClientMapHandler<TrackClientDescriptor>::removeClient(portId);
}
- using ActiveClientMap = std::map<sp<TrackClientDescriptor>, size_t /* count */>;
- // required for duplicating thread
- const ActiveClientMap& getActiveClients() const {
+ const TrackClientVector& getActiveClients() const {
return mActiveClients;
}
DeviceVector mDevices; /**< current devices this output is routed to */
- nsecs_t mStopTime[AUDIO_STREAM_CNT];
- int mMuteCount[AUDIO_STREAM_CNT]; // mute request counter
AudioMix *mPolicyMix = nullptr; // non NULL when used by a dynamic policy
protected:
const sp<AudioPort> mPort;
AudioPolicyClientInterface * const mClientInterface;
- float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume in dB
- uint32_t mActiveCount[AUDIO_STREAM_CNT]; // number of streams of each type active on this output
uint32_t mGlobalActiveCount = 0; // non-client-specific active count
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
- // The ActiveClientMap shows the clients that contribute to the streams counts
+ // The ActiveClients shows the clients that contribute to the @VolumeSource counts
// and may include upstream clients from a duplicating thread.
// Compare with the ClientMap (mClients) which are external AudioTrack clients of the
// output descriptor (and do not count internal PatchTracks).
- ActiveClientMap mActiveClients;
+ TrackClientVector mActiveClients;
RoutingActivities mRoutingActivities; /**< track routing activity on this ouput.*/
+
+ VolumeActivities mVolumeActivities; /**< track volume activity on this ouput.*/
};
// Audio output driven by a software mixer in audio flinger.
@@ -250,8 +320,13 @@
virtual bool isFixedVolume(audio_devices_t device);
sp<SwAudioOutputDescriptor> subOutput1() { return mOutput1; }
sp<SwAudioOutputDescriptor> subOutput2() { return mOutput2; }
- void changeStreamActiveCount(
- const sp<TrackClientDescriptor>& client, int delta) override;
+ void setClientActive(const sp<TrackClientDescriptor>& client, bool active) override;
+ void setAllClientsInactive()
+ {
+ for (const auto &client : clientsList(true)) {
+ setClientActive(client, false);
+ }
+ }
virtual bool setVolume(float volume,
audio_stream_type_t stream,
audio_devices_t device,
@@ -344,25 +419,27 @@
public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
{
public:
- bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return whether a stream is playing remotely, override to change the definition of
+ * return whether any source contributing to VolumeSource is playing remotely, override
+ * to change the definition of
* local/remote playback, used for instance by notification manager to not make
* media players lose audio focus when not playing locally
* For the base implementation, "remotely" means playing during screen mirroring which
* uses an output for playback with a non-empty, non "0" address.
*/
- bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return whether a stream is playing, but not on a "remote" device.
+ * return whether any source contributing to VolumeSource is playing, but not on a "remote"
+ * device.
* Override to change the definition of a local/remote playback.
* Used for instance by policy manager to alter the speaker playback ("speaker safe" behavior)
* when media plays or not locally.
* For the base implementation, "remotely" means playing during screen mirroring.
*/
- bool isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
* @brief isStrategyActiveOnSameModule checks if the given strategy is active (or was active
@@ -409,9 +486,21 @@
sp<SwAudioOutputDescriptor> getPrimaryOutput() const;
/**
- * return true if any output is playing anything besides the stream to ignore
+ * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+ * hold the volume source to be ignored
+ * @param volumeSourceToIgnore source not considered in the activity detection
+ * @return true if any output is active for any source except the one to be ignored
*/
- bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+ bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+ {
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+ if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+ return true;
+ }
+ }
+ return false;
+ }
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
@@ -424,12 +513,24 @@
public DefaultKeyedVector< audio_io_handle_t, sp<HwAudioOutputDescriptor> >
{
public:
- bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return true if any output is playing anything besides the stream to ignore
+ * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+ * hold the volume source to be ignored
+ * @param volumeSourceToIgnore source not considered in the activity detection
+ * @return true if any output is active for any source except the one to be ignored
*/
- bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+ bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+ {
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+ if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+ return true;
+ }
+ }
+ return false;
+ }
void dump(String8 *dst) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index d52eb3d..2264d8f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -20,7 +20,6 @@
#include <unordered_set>
#include <AudioGain.h>
-#include <VolumeCurve.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
@@ -40,13 +39,11 @@
AudioPolicyConfig(HwModuleCollection &hwModules,
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
- sp<DeviceDescriptor> &defaultOutputDevice,
- VolumeCurvesCollection *volumes = nullptr)
+ sp<DeviceDescriptor> &defaultOutputDevice)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
mDefaultOutputDevice(defaultOutputDevice),
- mVolumeCurves(volumes),
mIsSpeakerDrcEnabled(false)
{}
@@ -58,13 +55,6 @@
mSource = file;
}
- void setVolumes(const VolumeCurvesCollection &volumes)
- {
- if (mVolumeCurves != nullptr) {
- *mVolumeCurves = volumes;
- }
- }
-
void setHwModules(const HwModuleCollection &hwModules)
{
mHwModules = hwModules;
@@ -182,7 +172,6 @@
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
sp<DeviceDescriptor> &mDefaultOutputDevice;
- VolumeCurvesCollection *mVolumeCurves;
// TODO: remove when legacy conf file is removed. true on devices that use DRC on the
// DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
// Note: remove also speaker_drc_enabled from global configuration of XML config file.
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 2e44a60..4bb225d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -28,6 +28,7 @@
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <policy.h>
+#include <Volume.h>
#include "AudioPatch.h"
#include "EffectDescriptor.h"
@@ -62,7 +63,7 @@
mPreferredDeviceId = preferredDeviceId;
}
bool isPreferredDeviceForExclusiveUse() const { return mPreferredDeviceForExclusiveUse; }
- void setActive(bool active) { mActive = active; }
+ virtual void setActive(bool active) { mActive = active; }
bool active() const { return mActive; }
bool hasPreferredDevice(bool activeOnly = false) const {
return mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
@@ -85,12 +86,13 @@
TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
audio_attributes_t attributes, audio_config_base_t config,
audio_port_handle_t preferredDeviceId, audio_stream_type_t stream,
- product_strategy_t strategy, audio_output_flags_t flags,
+ product_strategy_t strategy, VolumeSource volumeSource,
+ audio_output_flags_t flags,
bool isPreferredDeviceForExclusiveUse,
std::vector<wp<SwAudioOutputDescriptor>> secondaryOutputs) :
ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId,
isPreferredDeviceForExclusiveUse),
- mStream(stream), mStrategy(strategy), mFlags(flags),
+ mStream(stream), mStrategy(strategy), mVolumeSource(volumeSource), mFlags(flags),
mSecondaryOutputs(std::move(secondaryOutputs)) {}
~TrackClientDescriptor() override = default;
@@ -104,12 +106,41 @@
const std::vector<wp<SwAudioOutputDescriptor>>& getSecondaryOutputs() const {
return mSecondaryOutputs;
};
+ VolumeSource volumeSource() const { return mVolumeSource; }
+
+ void setActive(bool active) override
+ {
+ int delta = active ? 1 : -1;
+ changeActivityCount(delta);
+ }
+ void changeActivityCount(int delta)
+ {
+ if (delta > 0) {
+ mActivityCount += delta;
+ } else {
+ LOG_ALWAYS_FATAL_IF(!mActivityCount, "%s(%s) invalid delta %d, inactive client",
+ __func__, toShortString().c_str(), delta);
+ LOG_ALWAYS_FATAL_IF(static_cast<int>(mActivityCount) < -delta,
+ "%s(%s) invalid delta %d, active client count %d",
+ __func__, toShortString().c_str(), delta, mActivityCount);
+ mActivityCount += delta;
+ }
+ ClientDescriptor::setActive(mActivityCount > 0);
+ }
+ uint32_t getActivityCount() const { return mActivityCount; }
private:
const audio_stream_type_t mStream;
const product_strategy_t mStrategy;
+ const VolumeSource mVolumeSource;
const audio_output_flags_t mFlags;
const std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
+
+ /**
+ * required for duplicating thread, prevent from removing active client from an output
+ * involved in a duplication.
+ */
+ uint32_t mActivityCount = 0;
};
class RecordClientDescriptor: public ClientDescriptor
@@ -148,7 +179,8 @@
public:
SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
- audio_stream_type_t stream, product_strategy_t strategy);
+ audio_stream_type_t stream, product_strategy_t strategy,
+ VolumeSource volumeSource);
~SourceClientDescriptor() override = default;
sp<AudioPatch> patchDesc() const { return mPatchDesc; }
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
new file mode 100644
index 0000000..d408446
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <Volume.h>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <vector>
+
+namespace android {
+
+class IVolumeCurves
+{
+public:
+ virtual ~IVolumeCurves() = default;
+
+ virtual void clearCurrentVolumeIndex() = 0;
+ virtual void addCurrentVolumeIndex(audio_devices_t device, int index) = 0;
+ virtual bool canBeMuted() const = 0;
+ virtual int getVolumeIndexMin() const = 0;
+ virtual int getVolumeIndex(audio_devices_t device) const = 0;
+ virtual int getVolumeIndexMax() const = 0;
+ virtual float volIndexToDb(device_category device, int indexInUi) const = 0;
+ virtual bool hasVolumeIndexForDevice(audio_devices_t device) const = 0;
+ virtual status_t initVolume(int indexMin, int indexMax) = 0;
+ virtual std::vector<audio_attributes_t> getAttributes() const = 0;
+ virtual std::vector<audio_stream_type_t> getStreamTypes() const = 0;
+ virtual void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const = 0;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
deleted file mode 100644
index 750da55..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <Volume.h>
-#include <utils/Errors.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class IVolumeCurvesCollection
-{
-public:
- virtual ~IVolumeCurvesCollection() = default;
-
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
- int index) = 0;
- virtual bool canBeMuted(audio_stream_type_t stream) = 0;
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const = 0;
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device) = 0;
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const = 0;
- virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
- int indexInUi) const = 0;
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
-
- virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
- virtual void switchVolumeCurve(audio_stream_type_t src, audio_stream_type_t dst) = 0;
- virtual void restoreOriginVolumeCurve(audio_stream_type_t stream)
- {
- switchVolumeCurve(stream, stream);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const = 0;
-
- virtual void dump(String8 *dst) const = 0;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
deleted file mode 100644
index 76ec198..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "IVolumeCurvesCollection.h"
-#include <policy.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-#include <string>
-#include <utility>
-
-namespace android {
-
-struct CurvePoint
-{
- CurvePoint() {}
- CurvePoint(int index, int attenuationInMb) :
- mIndex(index), mAttenuationInMb(attenuationInMb) {}
- uint32_t mIndex;
- int mAttenuationInMb;
-};
-
-inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
-{
- return lhs.mIndex < rhs.mIndex;
-}
-
-// A volume curve for a given use case and device category
-// It contains of list of points of this curve expressing the attenuation in Millibels for
-// a given volume index from 0 to 100
-class VolumeCurve : public RefBase
-{
-public:
- VolumeCurve(device_category device, audio_stream_type_t stream) :
- mDeviceCategory(device), mStreamType(stream) {}
-
- device_category getDeviceCategory() const { return mDeviceCategory; }
- audio_stream_type_t getStreamType() const { return mStreamType; }
-
- void add(const CurvePoint &point) { mCurvePoints.add(point); }
-
- float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
-
- void dump(String8 *result) const;
-
-private:
- SortedVector<CurvePoint> mCurvePoints;
- device_category mDeviceCategory;
- audio_stream_type_t mStreamType;
-};
-
-// Volume Curves for a given use case indexed by device category
-class VolumeCurvesForStream : public KeyedVector<device_category, sp<VolumeCurve> >
-{
-public:
- VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
- {
- mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
- }
-
- sp<VolumeCurve> getCurvesFor(device_category device) const
- {
- if (indexOfKey(device) < 0) {
- return 0;
- }
- return valueFor(device);
- }
-
- int getVolumeIndex(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
- if (mIndexCur.indexOfKey(device) < 0) {
- device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
- }
- return mIndexCur.valueFor(device);
- }
-
- bool canBeMuted() const { return mCanBeMuted; }
- void clearCurrentVolumeIndex() { mIndexCur.clear(); }
- void addCurrentVolumeIndex(audio_devices_t device, int index) { mIndexCur.add(device, index); }
-
- void setVolumeIndexMin(int volIndexMin) { mIndexMin = volIndexMin; }
- int getVolumeIndexMin() const { return mIndexMin; }
-
- void setVolumeIndexMax(int volIndexMax) { mIndexMax = volIndexMax; }
- int getVolumeIndexMax() const { return mIndexMax; }
-
- bool hasVolumeIndexForDevice(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- return mIndexCur.indexOfKey(device) >= 0;
- }
-
- const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
- {
- ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
- return mOriginVolumeCurves.valueFor(deviceCategory);
- }
- void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
- {
- ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
- replaceValueFor(deviceCategory, volumeCurve);
- }
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- device_category deviceCategory = volumeCurve->getDeviceCategory();
- ssize_t index = indexOfKey(deviceCategory);
- if (index < 0) {
- // Keep track of original Volume Curves per device category in order to switch curves.
- mOriginVolumeCurves.add(deviceCategory, volumeCurve);
- return KeyedVector::add(deviceCategory, volumeCurve);
- }
- return index;
- }
-
- float volIndexToDb(device_category deviceCat, int indexInUi) const
- {
- sp<VolumeCurve> vc = getCurvesFor(deviceCat);
- if (vc != 0) {
- return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
- } else {
- ALOGE("Invalid device category %d for Volume Curve", deviceCat);
- return 0.0f;
- }
- }
-
- void dump(String8 *dst, int spaces, bool curvePoints = false) const;
-
-private:
- KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
- KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
- int mIndexMin; /**< min volume index. */
- int mIndexMax; /**< max volume index. */
- bool mCanBeMuted; /**< true is the stream can be muted. */
-};
-
-// Collection of Volume Curves indexed by use case
-class VolumeCurvesCollection : public KeyedVector<audio_stream_type_t, VolumeCurvesForStream>,
- public IVolumeCurvesCollection
-{
-public:
- VolumeCurvesCollection()
- {
- // Create an empty collection of curves
- for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
- audio_stream_type_t stream = static_cast<audio_stream_type_t>(i);
- KeyedVector::add(stream, VolumeCurvesForStream());
- }
- }
-
- // Once XML has been parsed, must be call first to sanity check table and initialize indexes
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
- {
- editValueAt(stream).setVolumeIndexMin(indexMin);
- editValueAt(stream).setVolumeIndexMax(indexMax);
- return NO_ERROR;
- }
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream)
- {
- editCurvesFor(stream).clearCurrentVolumeIndex();
- }
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index)
- {
- editCurvesFor(stream).addCurrentVolumeIndex(device, index);
- }
- virtual bool canBeMuted(audio_stream_type_t stream) { return getCurvesFor(stream).canBeMuted(); }
-
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMin();
- }
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMax();
- }
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
- {
- return getCurvesFor(stream).getVolumeIndex(device);
- }
- virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
- {
- const VolumeCurvesForStream &sourceCurves = getCurvesFor(streamSrc);
- VolumeCurvesForStream &dstCurves = editCurvesFor(streamDst);
- ALOG_ASSERT(sourceCurves.size() == dstCurves.size(), "device category not aligned");
- for (size_t index = 0; index < sourceCurves.size(); index++) {
- device_category cat = sourceCurves.keyAt(index);
- dstCurves.setVolumeCurve(cat, sourceCurves.getOriginVolumeCurve(cat));
- }
- }
- virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
- {
- return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const
- {
- return getCurvesFor(stream).hasVolumeIndexForDevice(device);
- }
-
- void dump(String8 *dst) const override;
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- audio_stream_type_t streamType = volumeCurve->getStreamType();
- return editCurvesFor(streamType).add(volumeCurve);
- }
- VolumeCurvesForStream &editCurvesFor(audio_stream_type_t stream)
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return editValueAt(stream);
- }
- const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return valueFor(stream);
- }
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 77e7add..7293bc4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,16 +34,8 @@
AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
AudioPolicyClientInterface *clientInterface)
- : mPort(port)
- , mClientInterface(clientInterface)
+ : mPort(port), mClientInterface(clientInterface)
{
- // clear usage count for all stream types
- for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
- mActiveCount[i] = 0;
- mCurVolume[i] = -1.0;
- mMuteCount[i] = 0;
- mStopTime[i] = 0;
- }
if (mPort.get() != nullptr) {
mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
if (mPort->mGains.size() > 0) {
@@ -85,124 +77,73 @@
return hasSameHwModuleAs(outputDesc);
}
-void AudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
- int delta)
-{
- if (delta == 0) return;
- const audio_stream_type_t stream = client->stream();
- if ((delta + (int)mActiveCount[stream]) < 0) {
- // any mismatched active count will abort.
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active stream count %d",
- __func__, client->toShortString().c_str(), delta, mActiveCount[stream]);
- // mActiveCount[stream] = 0;
- // return;
- }
- mActiveCount[stream] += delta;
- mRoutingActivities[client->strategy()].changeActivityCount(delta);
-
- if (delta > 0) {
- mActiveClients[client] += delta;
- } else {
- auto it = mActiveClients.find(client);
- if (it == mActiveClients.end()) { // client not found!
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, inactive client",
- __func__, client->toShortString().c_str(), delta);
- } else if (it->second < -delta) { // invalid delta!
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active client count %zu",
- __func__, client->toShortString().c_str(), delta, it->second);
- }
- it->second += delta;
- if (it->second == 0) {
- (void)mActiveClients.erase(it);
- }
- }
-
- ALOGV("%s stream %d, count %d", __FUNCTION__, stream, mActiveCount[stream]);
-}
-
void AudioOutputDescriptor::setStopTime(const sp<TrackClientDescriptor>& client, nsecs_t sysTime)
{
- mStopTime[client->stream()] = sysTime;
+ mVolumeActivities[client->volumeSource()].setStopTime(sysTime);
mRoutingActivities[client->strategy()].setStopTime(sysTime);
}
void AudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
{
- LOG_ALWAYS_FATAL_IF(getClient(client->portId()) == nullptr,
- "%s(%d) does not exist on output descriptor", __func__, client->portId());
-
- if (active == client->active()) {
- ALOGW("%s(%s): ignored active: %d, current stream count %d",
- __func__, client->toShortString().c_str(),
- active, mActiveCount[client->stream()]);
+ auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+ if (active == (clientIter != end(mActiveClients))) {
+ ALOGW("%s(%s): ignored active: %d, current stream count %d", __func__,
+ client->toShortString().c_str(), active,
+ mRoutingActivities.at(client->strategy()).getActivityCount());
return;
}
+ if (active) {
+ mActiveClients.push_back(client);
+ } else {
+ mActiveClients.erase(clientIter);
+ }
const int delta = active ? 1 : -1;
- changeStreamActiveCount(client, delta);
+ // If ps is unknown, it is time to track it!
+ mRoutingActivities[client->strategy()].changeActivityCount(delta);
+ mVolumeActivities[client->volumeSource()].changeActivityCount(delta);
// Handle non-client-specific activity ref count
int32_t oldGlobalActiveCount = mGlobalActiveCount;
if (!active && mGlobalActiveCount < 1) {
ALOGW("%s(%s): invalid deactivation with globalRefCount %d",
- __func__, client->toShortString().c_str(), mGlobalActiveCount);
+ __func__, client->toShortString().c_str(), mGlobalActiveCount);
mGlobalActiveCount = 1;
}
mGlobalActiveCount += delta;
- if ((oldGlobalActiveCount == 0) && (mGlobalActiveCount > 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
+ if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ if ((oldGlobalActiveCount == 0) || (mGlobalActiveCount == 0)) {
mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
- MIX_STATE_MIXING);
- }
- } else if ((oldGlobalActiveCount > 0) && (mGlobalActiveCount == 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
- MIX_STATE_IDLE);
+ mGlobalActiveCount > 0 ? MIX_STATE_MIXING : MIX_STATE_IDLE);
}
}
-
client->setActive(active);
}
+bool AudioOutputDescriptor::isActive(VolumeSource vs, uint32_t inPastMs, nsecs_t sysTime) const
+{
+ return (vs == VOLUME_SOURCE_NONE) ?
+ isActive(inPastMs) : (mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).isActive(inPastMs, sysTime) : false);
+}
+
bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const
{
nsecs_t sysTime = 0;
if (inPastMs != 0) {
sysTime = systemTime();
}
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
+ for (const auto &iter : mVolumeActivities) {
+ if (iter.first == streamToVolumeSource(AUDIO_STREAM_PATCH)) {
continue;
}
- if (isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+ if (iter.second.isActive(inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
- uint32_t inPastMs,
- nsecs_t sysTime) const
-{
- if (mActiveCount[stream] != 0) {
- return true;
- }
- if (inPastMs == 0) {
- return false;
- }
- if (sysTime == 0) {
- sysTime = systemTime();
- }
- if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
- return true;
- }
- return false;
-}
-
-
bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused)
{
return false;
@@ -217,9 +158,9 @@
// We actually change the volume if:
// - the float value returned by computeVolume() changed
// - the force flag is set
- if (volume != mCurVolume[stream] || force) {
+ if (volume != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
- mCurVolume[stream] = volume;
+ setCurVolume(static_cast<VolumeSource>(stream), volume);
return true;
}
return false;
@@ -266,6 +207,13 @@
return clients;
}
+bool AudioOutputDescriptor::isAnyActive(VolumeSource volumeSourceToIgnore) const
+{
+ return std::find_if(begin(mActiveClients), end(mActiveClients),
+ [&volumeSourceToIgnore](const auto &client) {
+ return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
+}
+
void AudioOutputDescriptor::dump(String8 *dst) const
{
dst->appendFormat(" ID: %d\n", mId);
@@ -274,20 +222,22 @@
dst->appendFormat(" Channels: %08x\n", mChannelMask);
dst->appendFormat(" Devices: %s\n", devices().toString().c_str());
dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
- dst->append(" Stream volume activeCount muteCount\n");
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- dst->appendFormat(" %02d %.03f %02d %02d\n",
- i, mCurVolume[i], streamActiveCount((audio_stream_type_t)i), mMuteCount[i]);
+ for (const auto &iter : mRoutingActivities) {
+ dst->appendFormat(" Product Strategy id: %d", iter.first);
+ iter.second.dump(dst, 4);
+ }
+ for (const auto &iter : mVolumeActivities) {
+ dst->appendFormat(" Volume Activities id: %d", iter.first);
+ iter.second.dump(dst, 4);
}
dst->append(" AudioTrack Clients:\n");
ClientMapHandler<TrackClientDescriptor>::dump(dst);
dst->append("\n");
- if (mActiveClients.size() > 0) {
+ if (!mActiveClients.empty()) {
dst->append(" AudioTrack active (stream) clients:\n");
size_t index = 0;
- for (const auto& clientPair : mActiveClients) {
- dst->appendFormat(" Refcount: %zu", clientPair.second);
- clientPair.first->dump(dst, 2, index++);
+ for (const auto& client : mActiveClients) {
+ client->dump(dst, 2, index++);
}
dst->append(" \n");
}
@@ -388,15 +338,14 @@
}
}
-void SwAudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
- int delta)
+void SwAudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
{
// forward usage count change to attached outputs
if (isDuplicated()) {
- mOutput1->changeStreamActiveCount(client, delta);
- mOutput2->changeStreamActiveCount(client, delta);
+ mOutput1->setClientActive(client, active);
+ mOutput2->setClientActive(client, active);
}
- AudioOutputDescriptor::changeStreamActiveCount(client, delta);
+ AudioOutputDescriptor::setClientActive(client, active);
}
bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device)
@@ -445,19 +394,16 @@
uint32_t delayMs,
bool force)
{
- bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
-
- if (changed) {
- // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
- // enabled
- float volume = Volume::DbToAmpl(mCurVolume[stream]);
- if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
- mClientInterface->setStreamVolume(
- AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs);
- }
- mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs);
+ if (!AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force)) {
+ return false;
}
- return changed;
+ // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is enabled
+ float volumeAmpl = Volume::DbToAmpl(getCurVolume(static_cast<VolumeSource>(stream)));
+ if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+ mClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volumeAmpl, mIoHandle, delayMs);
+ }
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ return true;
}
status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
@@ -660,24 +606,24 @@
}
// SwAudioOutputCollection implementation
-bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool SwAudioOutputCollection::isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)
&& ((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
return true;
}
@@ -685,14 +631,13 @@
return false;
}
-bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
- uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
if (((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
- outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
// do not consider re routing (when the output is going to a dynamic policy)
// as "remote playback"
if (outputDesc->mPolicyMix == NULL) {
@@ -775,22 +720,6 @@
return NULL;
}
-bool SwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
- for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
- if (s == (size_t) streamToIgnore) {
- continue;
- }
- for (size_t i = 0; i < size(); i++) {
- const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
- if (outputDesc->streamActiveCount((audio_stream_type_t)s)!= 0) {
- return true;
- }
- }
- }
- return false;
-}
-
sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
{
for (size_t i = 0; i < size(); i++) {
@@ -825,34 +754,18 @@
}
// HwAudioOutputCollection implementation
-bool HwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool HwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<HwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool HwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
- for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
- if (s == (size_t) streamToIgnore) {
- continue;
- }
- for (size_t i = 0; i < size(); i++) {
- const sp<HwAudioOutputDescriptor> outputDesc = valueAt(i);
- if (outputDesc->streamActiveCount((audio_stream_type_t)s) != 0) {
- return true;
- }
- }
- }
- return false;
-}
-
void HwAudioOutputCollection::dump(String8 *dst) const
{
dst->append("\nOutputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 633c40e..ad07ab1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -20,6 +20,7 @@
#include <sstream>
#include <utils/Log.h>
#include <utils/String8.h>
+#include <TypeConverter.h>
#include "AudioGain.h"
#include "AudioOutputDescriptor.h"
#include "AudioPatch.h"
@@ -45,6 +46,7 @@
mPortId, mSessionId, mUid);
dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+ dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
}
@@ -53,6 +55,7 @@
{
ClientDescriptor::dump(dst, spaces, index);
dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+ dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
}
std::string TrackClientDescriptor::toShortString() const
@@ -82,10 +85,10 @@
SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream,
- product_strategy_t strategy) :
+ product_strategy_t strategy, VolumeSource volumeSource) :
TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE,
- stream, strategy, AUDIO_OUTPUT_FLAG_NONE, false,
+ stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
{} /* Sources do not support secondary outputs*/),
mPatchDesc(patchDesc), mSrcDevice(srcDevice)
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index e0b233d..ec7ff57 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -341,7 +341,7 @@
}
}
if (!allowToCreate) {
- ALOGE("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
+ ALOGV("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
name, deviceType, address);
return nullptr;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 98d375c..81d3968 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -201,25 +201,6 @@
static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
};
-struct VolumeTraits : public AndroidCollectionTraits<VolumeCurve, VolumeCurvesCollection>
-{
- static constexpr const char *tag = "volume";
- static constexpr const char *collectionTag = "volumes";
- static constexpr const char *volumePointTag = "point";
- static constexpr const char *referenceTag = "reference";
-
- struct Attributes
- {
- static constexpr const char *stream = "stream";
- static constexpr const char *deviceCategory = "deviceCategory";
- static constexpr const char *reference = "ref";
- static constexpr const char *referenceName = "name";
- };
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
- // No Children
-};
-
struct SurroundSoundTraits
{
static constexpr const char *tag = "surroundSound";
@@ -703,67 +684,6 @@
return NO_ERROR;
}
-Return<VolumeTraits::Element> VolumeTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
-{
- std::string streamTypeLiteral = getXmlAttribute(cur, Attributes::stream);
- if (streamTypeLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- audio_stream_type_t streamType;
- if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
- ALOGE("%s: Invalid %s", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- std::string deviceCategoryLiteral = getXmlAttribute(cur, Attributes::deviceCategory);
- if (deviceCategoryLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::deviceCategory);
- return Status::fromStatusT(BAD_VALUE);
- }
- device_category deviceCategory;
- if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
- ALOGE("%s: Invalid %s=%s", __func__, Attributes::deviceCategory,
- deviceCategoryLiteral.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
-
- std::string referenceName = getXmlAttribute(cur, Attributes::reference);
- const xmlNode *ref = NULL;
- if (!referenceName.empty()) {
- ref = getReference<VolumeTraits>(cur->parent, referenceName);
- if (ref == NULL) {
- ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
- }
-
- Element volCurve = new VolumeCurve(deviceCategory, streamType);
-
- for (const xmlNode *child = referenceName.empty() ? cur->xmlChildrenNode : ref->xmlChildrenNode;
- child != NULL; child = child->next) {
- if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(volumePointTag))) {
- auto pointDefinition = make_xmlUnique(xmlNodeListGetString(
- child->doc, child->xmlChildrenNode, 1));
- if (pointDefinition == nullptr) {
- return Status::fromStatusT(BAD_VALUE);
- }
- ALOGV("%s: %s=%s",
- __func__, tag, reinterpret_cast<const char*>(pointDefinition.get()));
- std::vector<int32_t> point;
- collectionFromString<DefaultTraits<int32_t>>(
- reinterpret_cast<const char*>(pointDefinition.get()), point, ",");
- if (point.size() != 2) {
- ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
- reinterpret_cast<const char*>(pointDefinition.get()));
- return Status::fromStatusT(BAD_VALUE);
- }
- volCurve->add(CurvePoint(point[0], point[1]));
- }
- }
- return volCurve;
-}
-
status_t SurroundSoundTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
{
config->setDefaultSurroundFormats();
@@ -851,14 +771,6 @@
}
config->setHwModules(modules);
- // deserialize volume section
- VolumeTraits::Collection volumes;
- status = deserializeCollection<VolumeTraits>(root, &volumes, config);
- if (status != NO_ERROR) {
- return status;
- }
- config->setVolumes(volumes);
-
// Global Configuration
GlobalConfigTraits::deserialize(root, config);
diff --git a/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
new file mode 100644
index 0000000..57bd4f8
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Input Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="a2dp input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="a2dp input"
+ sources="BT A2DP In"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 42c52de..b28381b 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!-- Copyright (C) 2015 The Android Open Source Project
+<!-- Copyright (C) 2019 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -173,8 +173,8 @@
</module>
- <!-- A2dp Audio HAL -->
- <xi:include href="a2dp_audio_policy_configuration.xml"/>
+ <!-- A2dp Input Audio HAL -->
+ <xi:include href="a2dp_in_audio_policy_configuration.xml"/>
<!-- Usb Audio HAL -->
<xi:include href="usb_audio_policy_configuration.xml"/>
@@ -182,8 +182,8 @@
<!-- Remote Submix Audio HAL -->
<xi:include href="r_submix_audio_policy_configuration.xml"/>
- <!-- Hearing aid Audio HAL -->
- <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+ <!-- Bluetooth Audio HAL -->
+ <xi:include href="bluetooth_audio_policy_configuration.xml"/>
<!-- MSD Audio HAL (optional) -->
<xi:include href="msd_audio_policy_configuration.xml"/>
@@ -191,7 +191,11 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
new file mode 100644
index 0000000..b4cc1d3
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+
+ <!-- Modules section:
+ There is one section per audio HW module present on the platform.
+ Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+ The module names are the same as in current .conf file:
+ “primary”, “A2DP”, “remote_submix”, “USB”
+ Each module will contain the following sections:
+ “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+ module.
+ This contains both permanently attached devices and removable devices.
+ “mixPorts”: listing all output and input streams exposed by the audio HAL
+ “routes”: list of possible connections between input and output devices or between stream and
+ devices.
+ "route": is defined by an attribute:
+ -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+ -"sink": the sink involved in this route
+ -"sources": all the sources than can be connected to the sink via vis route
+ “attachedDevices”: permanently attached devices.
+ The attachedDevices section is a list of devices names. The names correspond to device names
+ defined in <devicePorts> section.
+ “defaultOutputDevice”: device to be used by default when no policy rule applies
+ -->
+ <modules>
+ <!-- Primary Audio HAL -->
+ <module name="primary" halVersion="3.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ <item>Built-In Back Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="deep_buffer" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="compressed_offload" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_MP3"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC_LC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="voice_tx" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </mixPort>
+ <mixPort name="voice_rx" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+ <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <gains>
+ <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+ minValueMB="-8400"
+ maxValueMB="4000"
+ defaultValueMB="0"
+ stepValueMB="100"/>
+ </gains>
+ </devicePort>
+ <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ </devicePorts>
+ <!-- route declaration, i.e. list all available sources for a given sink -->
+ <routes>
+ <route type="mix" sink="Earpiece"
+ sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+ <route type="mix" sink="Speaker"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headset"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headphones"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+ <route type="mix" sink="Telephony Tx"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
+ <route type="mix" sink="voice_rx"
+ sources="Telephony Rx"/>
+ </routes>
+
+ </module>
+
+ <!-- A2dp Audio HAL -->
+ <xi:include href="a2dp_audio_policy_configuration.xml"/>
+
+ <!-- Usb Audio HAL -->
+ <xi:include href="usb_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ <!-- Hearing aid Audio HAL -->
+ <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 40dcc22..9ad609d 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -30,7 +30,11 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
new file mode 100644
index 0000000..ce78eb0
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000,16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
index 3c48e88..e6e6bdb 100644
--- a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
@@ -2,7 +2,7 @@
<!-- Hearing aid Audio HAL Audio Policy Configuration file -->
<module name="hearing_aid" halVersion="2.0">
<mixPorts>
- <mixPort name="hearing aid output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <mixPort name="hearing aid output" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="24000,16000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 5c33fb3..35d86ee 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -19,6 +19,7 @@
#include <EngineConfig.h>
#include <AudioPolicyManagerInterface.h>
#include <ProductStrategy.h>
+#include <VolumeGroup.h>
namespace android {
namespace audio_policy {
@@ -67,6 +68,26 @@
status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const override;
+ VolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const override;
+
+ VolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const override;
+
+ IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const override
+ {
+ return mVolumeGroups.find(group) != end(mVolumeGroups) ?
+ mVolumeGroups.at(group)->getVolumeCurves() : nullptr;
+ }
+
+ VolumeGroupVector getVolumeGroups() const override;
+
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const override;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const override;
+
+ StreamTypeVector getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const override;
+
+ AttributesVector getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const override;
+
void dump(String8 *dst) const override;
@@ -87,10 +108,20 @@
return is_state_in_call(getPhoneState());
}
-private:
+ VolumeSource toVolumeSource(audio_stream_type_t stream) const
+ {
+ return static_cast<VolumeSource>(stream);
+ }
+
+ status_t switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
+
+ status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+
+ private:
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
+ VolumeGroupMap mVolumeGroups;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
/** current forced use configuration. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 72505b2..767a8ed 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -16,6 +16,8 @@
#pragma once
+#include "VolumeGroup.h"
+
#include <system/audio.h>
#include <AudioPolicyManagerInterface.h>
#include <utils/RefBase.h>
@@ -38,7 +40,7 @@
private:
struct AudioAttributes {
audio_stream_type_t mStream = AUDIO_STREAM_DEFAULT;
- uint32_t mGroupId = 0;
+ volume_group_t mVolumeGroup = VOLUME_GROUP_NONE;
audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
};
@@ -85,6 +87,12 @@
audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
+ volume_group_t getDefaultVolumeGroup() const;
+
bool isDefault() const;
void dump(String8 *dst, int spaces = 0) const;
@@ -108,6 +116,10 @@
{
public:
/**
+ * @brief initialize: set default product strategy in cache.
+ */
+ void initialize();
+ /**
* @brief getProductStrategyForAttribute. The order of the vector is dimensionning.
* @param attr
* @return applicable product strategy for the given attribute, default if none applicable.
@@ -136,9 +148,16 @@
std::string getDeviceAddressForProductStrategy(product_strategy_t strategy) const;
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
product_strategy_t getDefault() const;
void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
};
} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
new file mode 100644
index 0000000..54314e3
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "IVolumeCurves.h"
+#include <policy.h>
+#include <AudioPolicyManagerInterface.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+#include <string>
+#include <map>
+#include <utility>
+
+namespace android {
+
+struct CurvePoint
+{
+ CurvePoint() {}
+ CurvePoint(int index, int attenuationInMb) :
+ mIndex(index), mAttenuationInMb(attenuationInMb) {}
+ uint32_t mIndex;
+ int mAttenuationInMb;
+};
+
+inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
+{
+ return lhs.mIndex < rhs.mIndex;
+}
+
+// A volume curve for a given use case and device category
+// It contains of list of points of this curve expressing the attenuation in Millibels for
+// a given volume index from 0 to 100
+class VolumeCurve : public RefBase
+{
+public:
+ VolumeCurve(device_category device) : mDeviceCategory(device) {}
+
+ void add(const CurvePoint &point) { mCurvePoints.add(point); }
+
+ float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const;
+
+ device_category getDeviceCategory() const { return mDeviceCategory; }
+
+private:
+ const device_category mDeviceCategory;
+ SortedVector<CurvePoint> mCurvePoints;
+};
+
+// Volume Curves for a given use case indexed by device category
+class VolumeCurves : public KeyedVector<device_category, sp<VolumeCurve> >,
+ public IVolumeCurves
+{
+public:
+ VolumeCurves(int indexMin = 0, int indexMax = 100) :
+ mIndexMin(indexMin), mIndexMax(indexMax)
+ {
+ addCurrentVolumeIndex(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+ }
+ status_t initVolume(int indexMin, int indexMax) override
+ {
+ mIndexMin = indexMin;
+ mIndexMax = indexMax;
+ return NO_ERROR;
+ }
+
+ sp<VolumeCurve> getCurvesFor(device_category device) const
+ {
+ if (indexOfKey(device) < 0) {
+ return 0;
+ }
+ return valueFor(device);
+ }
+
+ virtual int getVolumeIndex(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
+ if (mIndexCur.find(device) == end(mIndexCur)) {
+ device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
+ }
+ return mIndexCur.at(device);
+ }
+
+ virtual bool canBeMuted() const { return mCanBeMuted; }
+ virtual void clearCurrentVolumeIndex() { mIndexCur.clear(); }
+ void addCurrentVolumeIndex(audio_devices_t device, int index) override
+ {
+ mIndexCur[device] = index;
+ }
+
+ int getVolumeIndexMin() const { return mIndexMin; }
+
+ int getVolumeIndexMax() const { return mIndexMax; }
+
+ bool hasVolumeIndexForDevice(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ return mIndexCur.find(device) != end(mIndexCur);
+ }
+
+ status_t switchCurvesFrom(const VolumeCurves &referenceCurves)
+ {
+ if (size() != referenceCurves.size()) {
+ ALOGE("%s! device category not aligned, cannot switch", __FUNCTION__);
+ return BAD_TYPE;
+ }
+ for (size_t index = 0; index < size(); index++) {
+ device_category cat = keyAt(index);
+ setVolumeCurve(cat, referenceCurves.getOriginVolumeCurve(cat));
+ }
+ return NO_ERROR;
+ }
+ status_t restoreOriginVolumeCurve()
+ {
+ return switchCurvesFrom(*this);
+ }
+
+ const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
+ {
+ ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
+ return mOriginVolumeCurves.valueFor(deviceCategory);
+ }
+ void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
+ {
+ ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
+ replaceValueFor(deviceCategory, volumeCurve);
+ }
+
+ ssize_t add(const sp<VolumeCurve> &volumeCurve)
+ {
+ device_category deviceCategory = volumeCurve->getDeviceCategory();
+ ssize_t index = indexOfKey(deviceCategory);
+ if (index < 0) {
+ // Keep track of original Volume Curves per device category in order to switch curves.
+ mOriginVolumeCurves.add(deviceCategory, volumeCurve);
+ return KeyedVector::add(deviceCategory, volumeCurve);
+ }
+ return index;
+ }
+
+ virtual float volIndexToDb(device_category deviceCat, int indexInUi) const
+ {
+ sp<VolumeCurve> vc = getCurvesFor(deviceCat);
+ if (vc != 0) {
+ return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+ } else {
+ ALOGE("Invalid device category %d for Volume Curve", deviceCat);
+ return 0.0f;
+ }
+ }
+ void addAttributes(const audio_attributes_t &attr)
+ {
+ mAttributes.push_back(attr);
+ }
+ AttributesVector getAttributes() const override { return mAttributes; }
+ void addStreamType(audio_stream_type_t stream)
+ {
+ mStreams.push_back(stream);
+ }
+ StreamTypeVector getStreamTypes() const override { return mStreams; }
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const override;
+
+private:
+ KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
+ std::map<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+ int mIndexMin; /**< min volume index. */
+ int mIndexMax; /**< max volume index. */
+ const bool mCanBeMuted = true; /**< true is the stream can be muted. */
+
+ AttributesVector mAttributes;
+ StreamTypeVector mStreams; /**< Keep it for legacy. */
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
new file mode 100644
index 0000000..c34b406
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioPolicyManagerInterface.h>
+#include <VolumeCurve.h>
+#include <system/audio.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <utils/Errors.h>
+
+namespace android {
+
+class VolumeGroup : public virtual RefBase, private HandleGenerator<uint32_t>
+{
+public:
+ VolumeGroup(const std::string &name, int indexMin, int indexMax);
+ std::string getName() const { return mName; }
+ volume_group_t getId() const { return mId; }
+
+ void add(const sp<VolumeCurve> &curve);
+
+ VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
+
+ void addSupportedAttributes(const audio_attributes_t &attr);
+ AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+
+ void addSupportedStream(audio_stream_type_t stream);
+ StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
+
+ void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ const std::string mName;
+ const volume_group_t mId;
+ VolumeCurves mGroupVolumeCurves;
+};
+
+class VolumeGroupMap : public std::map<volume_group_t, sp<VolumeGroup> >
+{
+public:
+ void dump(String8 *dst, int spaces = 0) const;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 755f2a8..4d7c4a0 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -15,7 +15,7 @@
*/
#define LOG_TAG "APM::AudioPolicyEngine/Base"
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
@@ -55,8 +55,10 @@
if (!is_state_in_call(oldState) && is_state_in_call(state)) {
ALOGV(" Entering call in setPhoneState()");
+ switchVolumeCurve(AUDIO_STREAM_VOICE_CALL, AUDIO_STREAM_DTMF);
} else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
ALOGV(" Exiting call in setPhoneState()");
+ restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
}
return NO_ERROR;
}
@@ -94,28 +96,60 @@
engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
{
auto loadProductStrategies =
- [](auto& strategyConfigs, auto& productStrategies) {
- uint32_t groupid = 0;
+ [](auto& strategyConfigs, auto& productStrategies, auto& volumeGroups) {
for (auto& strategyConfig : strategyConfigs) {
sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
for (const auto &group : strategyConfig.attributesGroups) {
- for (const auto &attr : group.attributesVect) {
- strategy->addAttributes({group.stream, groupid, attr});
+ const auto &iter = std::find_if(begin(volumeGroups), end(volumeGroups),
+ [&group](const auto &volumeGroup) {
+ return group.volumeGroup == volumeGroup.second->getName(); });
+ ALOG_ASSERT(iter != end(volumeGroups), "Invalid Volume Group Name %s",
+ group.volumeGroup.c_str());
+ if (group.stream != AUDIO_STREAM_DEFAULT) {
+ iter->second->addSupportedStream(group.stream);
}
- groupid += 1;
+ for (const auto &attr : group.attributesVect) {
+ strategy->addAttributes({group.stream, iter->second->getId(), attr});
+ iter->second->addSupportedAttributes(attr);
+ }
}
product_strategy_t strategyId = strategy->getId();
productStrategies[strategyId] = strategy;
}
};
+ auto loadVolumeGroups = [](auto &volumeConfigs, auto &volumeGroups) {
+ for (auto &volumeConfig : volumeConfigs) {
+ sp<VolumeGroup> volumeGroup = new VolumeGroup(volumeConfig.name, volumeConfig.indexMin,
+ volumeConfig.indexMax);
+ volumeGroups[volumeGroup->getId()] = volumeGroup;
+ for (auto &configCurve : volumeConfig.volumeCurves) {
+ device_category deviceCat = DEVICE_CATEGORY_SPEAKER;
+ if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory, deviceCat)) {
+ ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
+ continue;
+ }
+ sp<VolumeCurve> curve = new VolumeCurve(deviceCat);
+ for (auto &point : configCurve.curvePoints) {
+ curve->add({point.index, point.attenuationInMb});
+ }
+ volumeGroup->add(curve);
+ }
+ }
+ };
auto result = engineConfig::parse();
if (result.parsedConfig == nullptr) {
ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
- result = {std::make_unique<engineConfig::Config>(gDefaultEngineConfig), 0};
+ engineConfig::Config config = gDefaultEngineConfig;
+ android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
+ result = {std::make_unique<engineConfig::Config>(config),
+ static_cast<size_t>(ret == NO_ERROR ? 0 : 1)};
}
ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
- loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies);
+ loadVolumeGroups(result.parsedConfig->volumeGroups, mVolumeGroups);
+ loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies,
+ mVolumeGroups);
+ mProductStrategies.initialize();
return result;
}
@@ -173,9 +207,77 @@
return NO_ERROR;
}
+VolumeCurves *EngineBase::getVolumeCurvesForAttributes(const audio_attributes_t &attr) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForAttributes(attr);
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s", toString(attr).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForStreamType(audio_stream_type_t stream) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForStreamType(stream);
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s",
+ toString(stream).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+status_t EngineBase::switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+{
+ auto srcCurves = getVolumeCurvesForStreamType(streamSrc);
+ auto dstCurves = getVolumeCurvesForStreamType(streamDst);
+
+ if (srcCurves == nullptr || dstCurves == nullptr) {
+ return BAD_VALUE;
+ }
+ return dstCurves->switchCurvesFrom(*srcCurves);
+}
+
+status_t EngineBase::restoreOriginVolumeCurve(audio_stream_type_t stream)
+{
+ VolumeCurves *curves = getVolumeCurvesForStreamType(stream);
+ return curves != nullptr ? curves->switchCurvesFrom(*curves) : BAD_VALUE;
+}
+
+VolumeGroupVector EngineBase::getVolumeGroups() const
+{
+ VolumeGroupVector group;
+ for (const auto &iter : mVolumeGroups) {
+ group.push_back(iter.first);
+ }
+ return group;
+}
+
+volume_group_t EngineBase::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ return mProductStrategies.getVolumeGroupForAttributes(attr);
+}
+
+volume_group_t EngineBase::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ return mProductStrategies.getVolumeGroupForStreamType(stream);
+}
+
+StreamTypeVector EngineBase::getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const
+{
+ // @TODO default music stream to control volume if no group?
+ return (mVolumeGroups.find(volumeGroup) != end(mVolumeGroups)) ?
+ mVolumeGroups.at(volumeGroup)->getStreamTypes() :
+ StreamTypeVector(AUDIO_STREAM_MUSIC);
+}
+
+AttributesVector EngineBase::getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const
+{
+ return (mVolumeGroups.find(volumeGroup) != end(mVolumeGroups)) ?
+ mVolumeGroups.at(volumeGroup)->getSupportedAttributes() : AttributesVector();
+}
+
void EngineBase::dump(String8 *dst) const
{
mProductStrategies.dump(dst, 2);
+ mVolumeGroups.dump(dst, 2);
}
} // namespace audio_policy
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 3940c0c..fede0d9 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -25,11 +25,11 @@
const engineConfig::ProductStrategies gOrderedStrategies = {
{"STRATEGY_PHONE",
{
- {"phone", AUDIO_STREAM_VOICE_CALL,
+ {"phone", AUDIO_STREAM_VOICE_CALL, "AUDIO_STREAM_VOICE_CALL",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT, 0,
""}},
},
- {"sco", AUDIO_STREAM_BLUETOOTH_SCO,
+ {"sco", AUDIO_STREAM_BLUETOOTH_SCO, "AUDIO_STREAM_BLUETOOTH_SCO",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_SCO,
""}},
}
@@ -37,18 +37,18 @@
},
{"STRATEGY_SONIFICATION",
{
- {"ring", AUDIO_STREAM_RING,
+ {"ring", AUDIO_STREAM_RING, "AUDIO_STREAM_RING",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
AUDIO_SOURCE_DEFAULT, 0, ""}}
},
- {"alarm", AUDIO_STREAM_ALARM,
+ {"alarm", AUDIO_STREAM_ALARM, "AUDIO_STREAM_ALARM",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, 0, ""}},
}
},
},
{"STRATEGY_ENFORCED_AUDIBLE",
{
- {"", AUDIO_STREAM_ENFORCED_AUDIBLE,
+ {"", AUDIO_STREAM_ENFORCED_AUDIBLE, "AUDIO_STREAM_ENFORCED_AUDIBLE",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
AUDIO_FLAG_AUDIBILITY_ENFORCED, ""}}
}
@@ -56,7 +56,7 @@
},
{"STRATEGY_ACCESSIBILITY",
{
- {"", AUDIO_STREAM_ACCESSIBILITY,
+ {"", AUDIO_STREAM_ACCESSIBILITY, "AUDIO_STREAM_ACCESSIBILITY",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
AUDIO_SOURCE_DEFAULT, 0, ""}}
}
@@ -64,7 +64,7 @@
},
{"STRATEGY_SONIFICATION_RESPECTFUL",
{
- {"", AUDIO_STREAM_NOTIFICATION,
+ {"", AUDIO_STREAM_NOTIFICATION, "AUDIO_STREAM_NOTIFICATION",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT, 0, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
@@ -81,7 +81,7 @@
},
{"STRATEGY_MEDIA",
{
- {"music", AUDIO_STREAM_MUSIC,
+ {"music", AUDIO_STREAM_MUSIC, "AUDIO_STREAM_MUSIC",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT, 0, ""},
@@ -91,7 +91,7 @@
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}
},
},
- {"system", AUDIO_STREAM_SYSTEM,
+ {"system", AUDIO_STREAM_SYSTEM, "AUDIO_STREAM_SYSTEM",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_SONIFICATION,
AUDIO_SOURCE_DEFAULT, 0, ""}}
}
@@ -99,7 +99,7 @@
},
{"STRATEGY_DTMF",
{
- {"", AUDIO_STREAM_DTMF,
+ {"", AUDIO_STREAM_DTMF, "AUDIO_STREAM_DTMF",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
AUDIO_SOURCE_DEFAULT, 0, ""}
@@ -109,7 +109,7 @@
},
{"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
{
- {"", AUDIO_STREAM_TTS,
+ {"", AUDIO_STREAM_TTS, "AUDIO_STREAM_TTS",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
AUDIO_FLAG_BEACON, ""}}
}
@@ -117,14 +117,14 @@
},
{"STRATEGY_REROUTING",
{
- {"", AUDIO_STREAM_REROUTING,
+ {"", AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
}
},
},
{"STRATEGY_PATCH",
{
- {"", AUDIO_STREAM_PATCH,
+ {"", AUDIO_STREAM_PATCH, "AUDIO_STREAM_PATCH",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
}
},
@@ -135,6 +135,7 @@
1.0,
gOrderedStrategies,
{},
+ {},
{}
};
} // namespace android
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index 71607d1..16e6690 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -44,7 +44,7 @@
{
std::vector<android::AudioAttributes> androidAa;
for (const auto &attr : mAttributesVector) {
- androidAa.push_back({attr.mGroupId, attr.mStream, attr.mAttributes});
+ androidAa.push_back({attr.mVolumeGroup, attr.mStream, attr.mAttributes});
}
return androidAa;
}
@@ -69,7 +69,8 @@
}) != end(mAttributesVector);
}
-audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(const audio_attributes_t &attr) const
+audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(
+ const audio_attributes_t &attr) const
{
const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
[&attr](const auto &supportedAttr) {
@@ -110,6 +111,33 @@
return supportedAttr.mStream == streamType; }) != end(mAttributesVector);
}
+volume_group_t ProductStrategy::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr)) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (supportedAttr.mStream == stream) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getDefaultVolumeGroup() const
+{
+ const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [](const auto &attr) {return attr.mAttributes == defaultAttr;});
+ return iter != end(mAttributesVector) ? iter->mVolumeGroup : VOLUME_GROUP_NONE;
+}
+
void ProductStrategy::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
@@ -121,7 +149,7 @@
deviceLiteral.c_str(), mDeviceAddress.c_str());
for (const auto &attr : mAttributesVector) {
- dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mGroupId,
+ dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mVolumeGroup,
android::toString(attr.mStream).c_str());
dst->appendFormat("%*s Attributes: ", spaces + 3, "");
std::string attStr =
@@ -172,6 +200,9 @@
product_strategy_t ProductStrategyMap::getDefault() const
{
+ if (mDefaultStrategy != PRODUCT_STRATEGY_NONE) {
+ return mDefaultStrategy;
+ }
for (const auto &iter : *this) {
if (iter.second->isDefault()) {
ALOGV("%s: using default %s", __FUNCTION__, iter.second->getName().c_str());
@@ -231,6 +262,42 @@
return at(psId)->getDeviceAddress();
}
+volume_group_t ProductStrategyMap::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForAttributes(attr);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return VOLUME_GROUP_NONE;
+ }
+ return at(defaultStrategy)->getDefaultVolumeGroup();
+}
+
+volume_group_t ProductStrategyMap::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForStreamType(stream);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return VOLUME_GROUP_NONE;
+ }
+ return at(defaultStrategy)->getDefaultVolumeGroup();
+}
+
+void ProductStrategyMap::initialize()
+{
+ mDefaultStrategy = getDefault();
+ ALOG_ASSERT(mDefaultStrategy != PRODUCT_STRATEGY_NONE, "No default product strategy found");
+}
+
void ProductStrategyMap::dump(String8 *dst, int spaces) const
{
dst->appendFormat("%*sProduct Strategies dump:", spaces, "");
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
similarity index 61%
rename from services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
rename to services/audiopolicy/engine/common/src/VolumeCurve.cpp
index 2625733..c352578 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -19,13 +19,17 @@
#include "VolumeCurve.h"
#include "TypeConverter.h"
+#include <media/TypeConverter.h>
namespace android {
float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
{
ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
-
+ if (volIndexMin < 0 || volIndexMax < 0) {
+ // In order to let AudioService initialize the min and max, convention is to use -1
+ return NAN;
+ }
if (indexInUi < volIndexMin) {
// an index of 0 means mute request when volIndexMin > 0
if (indexInUi == 0) {
@@ -64,8 +68,7 @@
((float)(mCurvePoints[indexInUiPosition].mIndex -
mCurvePoints[indexInUiPosition - 1].mIndex)) );
- ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
- mDeviceCategory, mStreamType,
+ ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
mCurvePoints[indexInUiPosition].mIndex,
((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
@@ -74,55 +77,52 @@
return decibels;
}
-void VolumeCurve::dump(String8 *dst) const
+void VolumeCurve::dump(String8 *dst, int spaces, bool curvePoints) const
{
+ if (!curvePoints) {
+ return;
+ }
dst->append(" {");
for (size_t i = 0; i < mCurvePoints.size(); i++) {
- dst->appendFormat("(%3d, %5d)",
- mCurvePoints[i].mIndex, mCurvePoints[i].mAttenuationInMb);
- dst->append(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
+ dst->appendFormat("%*s(%3d, %5d)", spaces, "", mCurvePoints[i].mIndex,
+ mCurvePoints[i].mAttenuationInMb);
+ dst->appendFormat(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
}
}
-void VolumeCurvesForStream::dump(String8 *dst, int spaces = 0, bool curvePoints) const
+void VolumeCurves::dump(String8 *dst, int spaces, bool curvePoints) const
{
if (!curvePoints) {
- dst->appendFormat("%s %02d %02d ",
- mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
- for (size_t i = 0; i < mIndexCur.size(); i++) {
- dst->appendFormat("%04x : %02d, ", mIndexCur.keyAt(i), mIndexCur.valueAt(i));
+// dst->appendFormat("%*s%02d %s %03d %03d ", spaces, "",
+// mStream, mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ dst->appendFormat("%*s Can be muted Index Min Index Max Index Cur [device : index]...\n",
+ spaces + 1, "");
+ dst->appendFormat("%*s %s %02d %02d ", spaces + 1, "",
+ mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ for (const auto &pair : mIndexCur) {
+ dst->appendFormat("%04x : %02d, ", pair.first, pair.second);
}
- dst->append("\n");
+ dst->appendFormat("\n");
return;
}
-
+ std::string streamNames;
+ for (const auto &stream : mStreams) {
+ streamNames += android::toString(stream) + "("+std::to_string(stream)+") ";
+ }
+ dst->appendFormat("%*sVolume Curves Streams/Attributes, Curve points Streams for device"
+ " category (index, attenuation in millibel)\n", spaces, "");
+ dst->appendFormat("%*s Streams: %s \n", spaces, "", streamNames.c_str());
+ if (!mAttributes.empty()) dst->appendFormat("%*s Attributes:", spaces, "");
+ for (const auto &attributes : mAttributes) {
+ std::string attStr = attributes == defaultAttr ? "{ Any }" : android::toString(attributes);
+ dst->appendFormat("%*s %s\n", attributes == mAttributes.front() ? 0 : spaces + 13, "",
+ attStr.c_str());
+ }
for (size_t i = 0; i < size(); i++) {
std::string deviceCatLiteral;
DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
- dst->appendFormat("%*s %s :",
- spaces, "", deviceCatLiteral.c_str());
- valueAt(i)->dump(dst);
- }
- dst->append("\n");
-}
-
-void VolumeCurvesCollection::dump(String8 *dst) const
-{
- dst->append("\nStreams dump:\n");
- dst->append(
- " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat(" %02zu ", i);
- valueAt(i).dump(dst);
- }
- dst->append("\nVolume Curves for Use Cases (aka Stream types) dump:\n");
- for (size_t i = 0; i < size(); i++) {
- std::string streamTypeLiteral;
- StreamTypeConverter::toString(keyAt(i), streamTypeLiteral);
- dst->appendFormat(
- " %s (%02zu): Curve points for device category (index, attenuation in millibel)\n",
- streamTypeLiteral.c_str(), i);
- valueAt(i).dump(dst, 2, true);
+ dst->appendFormat("%*s %s :", spaces, "", deviceCatLiteral.c_str());
+ valueAt(i)->dump(dst, 1, true);
}
}
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
new file mode 100644
index 0000000..e189807
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/VolumeGroup"
+//#define LOG_NDEBUG 0
+
+#include "VolumeGroup.h"
+#include <media/TypeConverter.h>
+#include <utils/String8.h>
+#include <cstdint>
+#include <string>
+
+#include <log/log.h>
+
+
+namespace android {
+
+//
+// VolumeGroup implementation
+//
+VolumeGroup::VolumeGroup(const std::string &name, int indexMin, int indexMax) :
+ mName(name), mId(static_cast<volume_group_t>(HandleGenerator<uint32_t>::getNextHandle())),
+ mGroupVolumeCurves(VolumeCurves(indexMin, indexMax))
+{
+}
+
+void VolumeGroup::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
+ mGroupVolumeCurves.dump(dst, spaces + 2, true);
+ mGroupVolumeCurves.dump(dst, spaces + 2, false);
+ dst->appendFormat("\n");
+}
+
+void VolumeGroup::add(const sp<VolumeCurve> &curve)
+{
+ mGroupVolumeCurves.add(curve);
+}
+
+void VolumeGroup::addSupportedAttributes(const audio_attributes_t &attr)
+{
+ mGroupVolumeCurves.addAttributes(attr);
+}
+
+void VolumeGroup::addSupportedStream(audio_stream_type_t stream)
+{
+ mGroupVolumeCurves.addStreamType(stream);
+}
+
+//
+// VolumeGroupMap implementation
+//
+void VolumeGroupMap::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*sVolume Groups dump:", spaces, "");
+ for (const auto &iter : *this) {
+ iter.second->dump(dst, spaces + 2);
+ }
+}
+
+} // namespace android
+
diff --git a/services/audiopolicy/engine/config/Android.mk b/services/audiopolicy/engine/config/Android.mk
index fe7d961..0b292a5 100644
--- a/services/audiopolicy/engine/config/Android.mk
+++ b/services/audiopolicy/engine/config/Android.mk
@@ -23,7 +23,8 @@
libandroidicu \
libxml2 \
libutils \
- liblog
+ liblog \
+ libcutils
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
index e18f687..7f5ed5e 100644
--- a/services/audiopolicy/engine/config/include/EngineConfig.h
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -40,11 +40,32 @@
struct AttributesGroup {
std::string name;
audio_stream_type_t stream;
+ std::string volumeGroup;
AttributesVector attributesVect;
};
using AttributesGroups = std::vector<AttributesGroup>;
+struct CurvePoint {
+ int index;
+ int attenuationInMb;
+};
+using CurvePoints = std::vector<CurvePoint>;
+
+struct VolumeCurve {
+ std::string deviceCategory;
+ CurvePoints curvePoints;
+};
+using VolumeCurves = std::vector<VolumeCurve>;
+
+struct VolumeGroup {
+ std::string name;
+ int indexMin;
+ int indexMax;
+ VolumeCurves volumeCurves;
+};
+using VolumeGroups = std::vector<VolumeGroup>;
+
struct ProductStrategy {
std::string name;
AttributesGroups attributesGroups;
@@ -78,6 +99,7 @@
ProductStrategies productStrategies;
Criteria criteria;
CriterionTypes criterionTypes;
+ VolumeGroups volumeGroups;
};
/** Result of `parse(const char*)` */
@@ -91,6 +113,7 @@
* @return audio policy usage @see Config
*/
ParsingResult parse(const char* path = DEFAULT_PATH);
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups);
} // namespace engineConfig
} // namespace android
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 3aa38cf..1ad7739 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -19,6 +19,7 @@
#include "EngineConfig.h"
#include <policy.h>
+#include <cutils/properties.h>
#include <media/TypeConverter.h>
#include <media/convert.h>
#include <utils/Log.h>
@@ -26,6 +27,7 @@
#include <libxml/xinclude.h>
#include <string>
#include <vector>
+#include <map>
#include <sstream>
#include <istream>
@@ -57,6 +59,7 @@
struct Attributes {
static constexpr const char *name = "name";
static constexpr const char *streamType = "streamType";
+ static constexpr const char *volumeGroup = "volumeGroup";
};
static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
};
@@ -107,6 +110,34 @@
static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
Collection &collection);
};
+struct VolumeTraits : public BaseSerializerTraits<VolumeCurve, VolumeCurves> {
+ static constexpr const char *tag = "volume";
+ static constexpr const char *collectionTag = "volumes";
+ static constexpr const char *volumePointTag = "point";
+
+ struct Attributes {
+ static constexpr const char *deviceCategory = "deviceCategory";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *reference = "ref"; /**< For volume curves factorization. */
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct VolumeGroupTraits : public BaseSerializerTraits<VolumeGroup, VolumeGroups> {
+ static constexpr const char *tag = "volumeGroup";
+ static constexpr const char *collectionTag = "volumeGroups";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *indexMin = "indexMin";
+ static constexpr const char *indexMax = "indexMax";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
@@ -273,6 +304,12 @@
}
ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+ std::string volumeGroup = getXmlAttribute(child, Attributes::volumeGroup);
+ if (volumeGroup.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::volumeGroup);
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::volumeGroup, volumeGroup.c_str());
+
audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
std::string streamTypeXml = getXmlAttribute(child, Attributes::streamType);
if (streamTypeXml.empty()) {
@@ -287,7 +324,7 @@
AttributesVector attributesVect;
deserializeAttributesCollection(doc, child, attributesVect);
- attributesGroup.push_back({name, streamType, attributesVect});
+ attributesGroup.push_back({name, streamType, volumeGroup, attributesVect});
return NO_ERROR;
}
@@ -383,6 +420,189 @@
return NO_ERROR;
}
+status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string deviceCategory = getXmlAttribute(root, Attributes::deviceCategory);
+ if (deviceCategory.empty()) {
+ ALOGW("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
+ }
+ std::string referenceName = getXmlAttribute(root, Attributes::reference);
+ const _xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, collectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ }
+ // Retrieve curve point from reference element if found or directly from current curve
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ root->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, tag, reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ volumes.push_back({ deviceCategory, curvePoints });
+ return NO_ERROR;
+}
+
+status_t VolumeGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string name;
+ int indexMin = 0;
+ int indexMax = 0;
+ StreamVector streams = {};
+ AttributesVector attributesVect = {};
+
+ for (const xmlNode *child = root->xmlChildrenNode; child != NULL; child = child->next) {
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::name)) {
+ xmlCharUnique nameXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (nameXml == nullptr) {
+ return BAD_VALUE;
+ }
+ name = reinterpret_cast<const char*>(nameXml.get());
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMin)) {
+ xmlCharUnique indexMinXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMinXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMinLiteral(reinterpret_cast<const char*>(indexMinXml.get()));
+ if (!convertTo(indexMinLiteral, indexMin)) {
+ return BAD_VALUE;
+ }
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMax)) {
+ xmlCharUnique indexMaxXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMaxXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMaxLiteral(reinterpret_cast<const char*>(indexMaxXml.get()));
+ if (!convertTo(indexMaxLiteral, indexMax)) {
+ return BAD_VALUE;
+ }
+ }
+ }
+ deserializeAttributesCollection(doc, root, attributesVect);
+
+ std::string streamNames;
+ for (const auto &stream : streams) {
+ streamNames += android::toString(stream) + " ";
+ }
+ std::string attrmNames;
+ for (const auto &attr : attributesVect) {
+ attrmNames += android::toString(attr) + "\n";
+ }
+ ALOGV("%s: group=%s indexMin=%d, indexMax=%d streams=%s attributes=%s",
+ __func__, name.c_str(), indexMin, indexMax, streamNames.c_str(), attrmNames.c_str( ));
+
+ VolumeCurves groupVolumeCurves;
+ size_t skipped = 0;
+ deserializeCollection<VolumeTraits>(doc, root, groupVolumeCurves, skipped);
+ volumes.push_back({ name, indexMin, indexMax, groupVolumeCurves });
+ return NO_ERROR;
+}
+
+static constexpr const char *legacyVolumecollectionTag = "volumes";
+static constexpr const char *legacyVolumeTag = "volume";
+
+status_t deserializeLegacyVolume(_xmlDoc *doc, const _xmlNode *cur,
+ std::map<std::string, VolumeCurves> &legacyVolumes)
+{
+ std::string streamTypeLiteral = getXmlAttribute(cur, "stream");
+ if (streamTypeLiteral.empty()) {
+ ALOGE("%s: No attribute stream found", __func__);
+ return BAD_VALUE;
+ }
+ std::string deviceCategoryLiteral = getXmlAttribute(cur, "deviceCategory");
+ if (deviceCategoryLiteral.empty()) {
+ ALOGE("%s: No attribute deviceCategory found", __func__);
+ return BAD_VALUE;
+ }
+ std::string referenceName = getXmlAttribute(cur, "ref");
+ const xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, legacyVolumecollectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ ALOGV("%s: reference found for %s", __func__, referenceName.c_str());
+ }
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ cur->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)VolumeTraits::volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, legacyVolumeTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, VolumeTraits::volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ legacyVolumes[streamTypeLiteral].push_back({ deviceCategoryLiteral, curvePoints });
+ return NO_ERROR;
+}
+
+static status_t deserializeLegacyVolumeCollection(_xmlDoc *doc, const _xmlNode *cur,
+ VolumeGroups &volumeGroups,
+ size_t &nbSkippedElement)
+{
+ std::map<std::string, VolumeCurves> legacyVolumeMap;
+ for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if (xmlStrcmp(cur->name, (const xmlChar *)legacyVolumecollectionTag)) {
+ continue;
+ }
+ const xmlNode *child = cur->xmlChildrenNode;
+ for (; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)legacyVolumeTag)) {
+
+ status_t status = deserializeLegacyVolume(doc, child, legacyVolumeMap);
+ if (status != NO_ERROR) {
+ nbSkippedElement += 1;
+ }
+ }
+ }
+ }
+ for (const auto &volumeMapIter : legacyVolumeMap) {
+ // In order to let AudioService setting the min and max (compatibility), set Min and Max
+ // to -1 except for private streams
+ audio_stream_type_t streamType;
+ if (!StreamTypeConverter::fromString(volumeMapIter.first, streamType)) {
+ ALOGE("%s: Invalid stream %s", __func__, volumeMapIter.first.c_str());
+ return BAD_VALUE;
+ }
+ int indexMin = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 0 : -1;
+ int indexMax = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 100 : -1;
+ volumeGroups.push_back({ volumeMapIter.first, indexMin, indexMax, volumeMapIter.second });
+ }
+ return NO_ERROR;
+}
+
ParsingResult parse(const char* path) {
xmlDocPtr doc;
doc = xmlParseFile(path);
@@ -414,8 +634,66 @@
doc, cur, config->criteria, nbSkippedElements);
deserializeCollection<CriterionTypeTraits>(
doc, cur, config->criterionTypes, nbSkippedElements);
+ deserializeCollection<VolumeGroupTraits>(
+ doc, cur, config->volumeGroups, nbSkippedElements);
+
return {std::move(config), nbSkippedElements};
}
+android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
+ xmlDocPtr doc;
+ doc = xmlParseFile(path);
+ if (doc == NULL) {
+ ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ xmlNodePtr cur = xmlDocGetRootElement(doc);
+ if (cur == NULL) {
+ ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+ xmlFreeDoc(doc);
+ return BAD_VALUE;
+ }
+ if (xmlXIncludeProcess(doc) < 0) {
+ ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ size_t nbSkippedElements = 0;
+ return deserializeLegacyVolumeCollection(doc, cur, volumeGroups, nbSkippedElements);
+}
+
+static const char *kConfigLocationList[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
+static const int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+static const int gApmXmlConfigFilePathMaxLength = 128;
+
+static constexpr const char *apmXmlConfigFileName = "audio_policy_configuration.xml";
+static constexpr const char *apmA2dpOffloadDisabledXmlConfigFileName =
+ "audio_policy_configuration_a2dp_offload_disabled.xml";
+
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups) {
+ char audioPolicyXmlConfigFile[gApmXmlConfigFilePathMaxLength];
+ std::vector<const char *> fileNames;
+ status_t ret;
+
+ if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
+ fileNames.push_back(apmA2dpOffloadDisabledXmlConfigFileName);
+ }
+ fileNames.push_back(apmXmlConfigFileName);
+
+ for (const char* fileName : fileNames) {
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile),
+ "%s/%s", kConfigLocationList[i], fileName);
+ ret = parseLegacyVolumeFile(audioPolicyXmlConfigFile, volumeGroups);
+ if (ret == NO_ERROR) {
+ return ret;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
} // namespace engineConfig
} // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index 498cc3b..07acd2e 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -18,6 +18,7 @@
#include <AudioPolicyManagerObserver.h>
#include <media/AudioProductStrategy.h>
+#include <IVolumeCurves.h>
#include <policy.h>
#include <Volume.h>
#include <HwModule.h>
@@ -31,7 +32,7 @@
using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
using StrategyVector = std::vector<product_strategy_t>;
-
+using VolumeGroupVector = std::vector<volume_group_t>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -181,6 +182,7 @@
/**
* @brief getAttributesForStream get the audio attributes from legacy stream type
+ * Attributes returned might only be used to check upon routing decision, not volume decisions.
* @param stream to consider
* @return audio attributes matching the legacy stream type
*/
@@ -234,6 +236,56 @@
*/
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const = 0;
+ /**
+ * @brief getVolumeCurvesForAttributes retrieves the Volume Curves interface for the
+ * requested Audio Attributes.
+ * @param attr to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForStreamType retrieves the Volume Curves interface for the stream
+ * @param stream to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForVolumeGroup retrieves the Volume Curves interface for volume group
+ * @param group to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const = 0;
+
+ /**
+ * @brief getVolumeGroups retrieves the collection of volume groups.
+ * @return vector of volume groups
+ */
+ virtual VolumeGroupVector getVolumeGroups() const = 0;
+
+ /**
+ * @brief getVolumeGroupForAttributes gets the appropriate volume group to be used for a given
+ * Audio Attributes.
+ * @param attr to be considered
+ * @return volume group associated to the given audio attributes, default group if none
+ * applicable, VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeGroupForStreamType gets the appropriate volume group to be used for a given
+ * legacy stream type
+ * @param stream type to be considered
+ * @return volume group associated to the given stream type, default group if none applicable,
+ * VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const = 0;
+
+ virtual StreamTypeVector getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const = 0;
+
+ virtual AttributesVector getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const = 0;
+
virtual void dump(String8 *dst) const = 0;
protected:
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index b7902cf..43ba625 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
#pragma once
-#include <IVolumeCurvesCollection.h>
#include <AudioGain.h>
#include <AudioPort.h>
#include <AudioPatch.h>
@@ -51,8 +50,6 @@
virtual const DeviceVector &getAvailableInputDevices() const = 0;
- virtual IVolumeCurvesCollection &getVolumeCurves() = 0;
-
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
protected:
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index bbd9688..4eff6e6 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -12,6 +12,8 @@
src/EngineInstance.cpp \
src/Stream.cpp \
src/InputSource.cpp \
+ ../engine/common/src/VolumeCurve.cpp \
+ ../engine/common/src/VolumeGroup.cpp \
../engine/common/src/ProductStrategy.cpp \
../engine/common/src/EngineBase.cpp
diff --git a/services/audiopolicy/engineconfigurable/config/example/Android.mk b/services/audiopolicy/engineconfigurable/config/example/Android.mk
index 95a2ecc..45419f0 100644
--- a/services/audiopolicy/engineconfigurable/config/example/Android.mk
+++ b/services/audiopolicy/engineconfigurable/config/example/Android.mk
@@ -20,6 +20,8 @@
LOCAL_REQUIRED_MODULES := \
audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml \
audio_policy_engine_criteria.xml \
audio_policy_engine_criterion_types.xml
@@ -34,6 +36,22 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
@@ -54,7 +72,8 @@
LOCAL_REQUIRED_MODULES := \
audio_policy_engine_product_strategies_automotive.xml \
audio_policy_engine_criteria.xml \
- audio_policy_engine_criterion_types.xml
+ audio_policy_engine_criterion_types.xml \
+ audio_policy_engine_volumes.xml
include $(BUILD_PREBUILT)
@@ -71,6 +90,14 @@
LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
index e2fb02b..28a140a 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
@@ -19,6 +19,7 @@
<xi:include href="audio_policy_engine_product_strategies.xml"/>
<xi:include href="audio_policy_engine_criterion_types.xml"/>
<xi:include href="audio_policy_engine_criteria.xml"/>
+ <xi:include href="audio_policy_engine_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
index 543a2f0..c487da9 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
@@ -31,7 +31,7 @@
-->
<ProductStrategy name="oem_traffic_anouncement">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_traffic_anouncement">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<!-- traffic_annoucement = 1 -->
@@ -39,14 +39,14 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="oem_strategy_1">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_adas_2">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<Bundle key="oem" value="2"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="oem_strategy_2">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_adas_3">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<Bundle key="oem" value="3"/>
@@ -70,21 +70,21 @@
( type == CAR_AUDIO_TYPE_RADIO ) )
-->
<ProductStrategy name="radio">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="media_car_audio_type_3">
<ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
<Usage value="AUDIO_USAGE_MEDIA"/>
<Bundle key="car_audio_type" value="3"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="ext_audio_source">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="media_car_audio_type_7">
<ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
<Usage value="AUDIO_USAGE_MEDIA"/>
<Bundle key="car_audio_type" value="7"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="voice_command">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="speech">
<Attributes>
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
@@ -96,7 +96,7 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="safety_alert">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="system">
<ContentType value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
<Usage value="AUDIO_USAGE_NOTIFICATION"/>
<!-- CAR_AUDIO_TYPE_SAFETY_ALERT = 2 -->
@@ -112,7 +112,7 @@
<!-- Generic Usages -->
<ProductStrategy name="music">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="media">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<!-- Default product strategy has empty attributes -->
@@ -121,29 +121,31 @@
</ProductStrategy>
<ProductStrategy name="nav_guidance">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="speech">
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="voice_call">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="phone">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="phone">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="alarm">
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="ring">
<Usage value="AUDIO_USAGE_ALARM"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="ring">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="notification">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
@@ -152,10 +154,17 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="system">
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
</AttributesGroup>
</ProductStrategy>
-
+ <ProductStrategy name="tts">
+ <!-- TTS stream MUST BE MANAGED OUTSIDE default product strategy if NO DEDICATED OUTPUT
+ for TTS, otherwise when beacon happens, default strategy is ... muted.
+ If it is media, it is annoying... -->
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
+ <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
</ProductStrategies>
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
new file mode 100644
index 0000000..b326b50
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
@@ -0,0 +1,192 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<!-- Volume Groups Tables included by Audio Policy Configuration file -->
+<!-- Note:
+ It is VALID to have a group without attributes if a product strategy is following
+ this group for all attributes.
+ Otherwise, attributes must be specified
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>oem_traffic_anouncement</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- OEM ADAS is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy excluisve, so the volume
+ will be the one of the currently acitve stream, otherwise a priority must be given by
+ any mean. -->
+ <volumeGroup>
+ <name>oem_adas_2</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>oem_adas_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- MEDIA is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy exclusive, so the volume
+ will be the one of the active stream with highest priority (ORDER MATTERS) unless the curves
+ followed will the the curves for the requested attributes.-->
+ <volumeGroup>
+ <name>media_car_audio_type_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media_car_audio_type_7</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>speech</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>phone</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-0</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
index ab61d8a..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
@@ -17,6 +17,8 @@
<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index f72e379..9398743 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -25,37 +25,37 @@
enforced. -->
<ProductStrategy name="STRATEGY_PHONE">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
<Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
- <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
<Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ACCESSIBILITY">
- <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
@@ -65,20 +65,20 @@
</ProductStrategy>
<ProductStrategy name="STRATEGY_MEDIA">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
<Attributes></Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_DTMF">
- <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
@@ -86,21 +86,21 @@
<!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
(TTS) of the device -->
<ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
- <AttributesGroup streamType="AUDIO_STREAM_TTS">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
<Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Routing Strategy rerouting may be removed as following media??? -->
<ProductStrategy name="STRATEGY_REROUTING">
- <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Default product strategy has empty attributes -->
<ProductStrategy name="STRATEGY_PATCH">
- <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index f486dca..89a1694 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -113,7 +113,7 @@
const audio_stream_type_t &profile)
{
if (setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream, profile)) {
- getApmObserver()->getVolumeCurves().switchVolumeCurve(profile, stream);
+ switchVolumeCurve(profile, stream);
return true;
}
return false;
@@ -224,16 +224,16 @@
audio_devices_t devices = AUDIO_DEVICE_NONE;
if (ps == getProductStrategyForStream(AUDIO_STREAM_NOTIFICATION) &&
!is_state_in_call(getPhoneState()) &&
- !outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
- SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
- outputs.isStreamActive(AUDIO_STREAM_MUSIC,
- SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+ !outputs.isActiveRemotely(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
product_strategy_t strategyForMedia =
getProductStrategyForStream(AUDIO_STREAM_MUSIC);
devices = productStrategies.getDeviceTypesForProductStrategy(strategyForMedia);
} else if (ps == getProductStrategyForStream(AUDIO_STREAM_ACCESSIBILITY) &&
- (outputs.isStreamActive(AUDIO_STREAM_RING) ||
- outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
+ (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM)))) {
// do not route accessibility prompts to a digital output currently configured with a
// compressed format as they would likely not be mixed and dropped.
// Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index 95eac1c..ebf383b 100644
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -8,8 +8,10 @@
LOCAL_SRC_FILES := \
src/Engine.cpp \
src/EngineInstance.cpp \
+ ../engine/common/src/VolumeCurve.cpp \
../engine/common/src/ProductStrategy.cpp \
- ../engine/common/src/EngineBase.cpp
+ ../engine/common/src/EngineBase.cpp \
+ ../engine/common/src/VolumeGroup.cpp
audio_policy_engine_includes_common := \
$(LOCAL_PATH)/include
diff --git a/services/audiopolicy/enginedefault/config/example/Android.mk b/services/audiopolicy/enginedefault/config/example/Android.mk
index 866466f..f06ee4c 100644
--- a/services/audiopolicy/enginedefault/config/example/Android.mk
+++ b/services/audiopolicy/enginedefault/config/example/Android.mk
@@ -16,7 +16,9 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
LOCAL_REQUIRED_MODULES := \
- audio_policy_engine_product_strategies_phone.xml
+ audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml
include $(BUILD_PREBUILT)
@@ -29,4 +31,20 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
index ab61d8a..4ca33b4 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
@@ -17,6 +17,8 @@
<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index f72e379..9398743 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -25,37 +25,37 @@
enforced. -->
<ProductStrategy name="STRATEGY_PHONE">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
<Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
- <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
<Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ACCESSIBILITY">
- <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
@@ -65,20 +65,20 @@
</ProductStrategy>
<ProductStrategy name="STRATEGY_MEDIA">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
<Attributes></Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_DTMF">
- <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
@@ -86,21 +86,21 @@
<!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
(TTS) of the device -->
<ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
- <AttributesGroup streamType="AUDIO_STREAM_TTS">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
<Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Routing Strategy rerouting may be removed as following media??? -->
<ProductStrategy name="STRATEGY_REROUTING">
- <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Default product strategy has empty attributes -->
<ProductStrategy name="STRATEGY_PATCH">
- <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 93af8a6..f191738 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -66,35 +66,6 @@
}
}
-status_t Engine::setPhoneState(audio_mode_t state)
-{
- ALOGV("setPhoneState() state %d", state);
-
- if (state < 0 || state >= AUDIO_MODE_CNT) {
- ALOGW("setPhoneState() invalid state %d", state);
- return BAD_VALUE;
- }
-
- if (state == getPhoneState()) {
- ALOGW("setPhoneState() setting same state %d", state);
- return BAD_VALUE;
- }
-
- // store previous phone state for management of sonification strategy below
- int oldState = getPhoneState();
- EngineBase::setPhoneState(state);
-
- if (!is_state_in_call(oldState) && is_state_in_call(state)) {
- ALOGV(" Entering call in setPhoneState()");
- getApmObserver()->getVolumeCurves().switchVolumeCurve(AUDIO_STREAM_VOICE_CALL,
- AUDIO_STREAM_DTMF);
- } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
- ALOGV(" Exiting call in setPhoneState()");
- getApmObserver()->getVolumeCurves().restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
- }
- return NO_ERROR;
-}
-
status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
{
switch(usage) {
@@ -182,16 +153,17 @@
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
- if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+ if (isInCall() || outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
device = getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
} else {
bool media_active_locally =
- outputs.isStreamActiveLocally(
- AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
- || outputs.isStreamActiveLocally(
- AUDIO_STREAM_ACCESSIBILITY, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
+ outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
+ || outputs.isActiveLocally(
+ streamToVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
// routing is same as media without the "remote" device
device = getDeviceForStrategyInt(STRATEGY_MEDIA,
availableOutputDevices,
@@ -324,7 +296,8 @@
case STRATEGY_SONIFICATION:
// If incall, just select the STRATEGY_PHONE device
- if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+ if (isInCall() ||
+ outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -397,8 +370,8 @@
}
availableOutputDevices =
availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
- if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
- outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+ if (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM))) {
return getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -429,7 +402,9 @@
outputDeviceTypesToIgnore);
break;
}
- if (device2 == AUDIO_DEVICE_NONE) {
+ // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
+ if ((device2 == AUDIO_DEVICE_NONE) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 15fc358..d8a3698 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -55,8 +55,6 @@
///
/// from EngineBase, so from AudioPolicyManagerInterface
///
- status_t setPhoneState(audio_mode_t mode) override;
-
status_t setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config) override;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 32cc380..4540c4d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,8 +33,8 @@
#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
#define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
"audio_policy_configuration_a2dp_offload_disabled.xml"
-#define AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME \
- "audio_policy_configuration_bluetooth_hal_enabled.xml"
+#define AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME \
+ "audio_policy_configuration_bluetooth_legacy_hal.xml"
#include <inttypes.h>
#include <math.h>
@@ -209,7 +209,26 @@
return BAD_VALUE;
}
- checkForDeviceAndOutputChanges([&]() {
+ // No need to evaluate playback routing when connecting a remote submix
+ // output device used by a dynamic policy of type recorder as no
+ // playback use case is affected.
+ bool doCheckForDeviceAndOutputChanges = true;
+ if (device->type() == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ && strncmp(device_address, "0", AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) {
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+ if (desc->mPolicyMix != nullptr
+ && desc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS
+ && strncmp(device_address,
+ desc->mPolicyMix->mDeviceAddress.string(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ doCheckForDeviceAndOutputChanges = false;
+ break;
+ }
+ }
+ }
+
+ auto checkCloseOutputs = [&]() {
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
for (audio_io_handle_t output : outputs) {
@@ -218,7 +237,7 @@
// been opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
+ (desc->mDirectOpenCount == 0))) {
closeOutput(output);
}
}
@@ -226,7 +245,13 @@
return true;
}
return false;
- });
+ };
+
+ if (doCheckForDeviceAndOutputChanges) {
+ checkForDeviceAndOutputChanges(checkCloseOutputs);
+ } else {
+ checkCloseOutputs();
+ }
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
@@ -352,7 +377,7 @@
(strlen(device_address) != 0)/*matchAddress*/);
if (devDesc == 0) {
- ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
+ ALOGV("getDeviceConnectionState() undeclared device, type %08x, address: %s",
device, device_address);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
@@ -1078,6 +1103,7 @@
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
sanitizedRequestedPortId, *stream,
mEngine->getProductStrategyForAttributes(resultAttr),
+ streamToVolumeSource(*stream),
*flags, isRequestedDeviceForExclusiveUse,
std::move(weakSecondaryOutputDescs));
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
@@ -1569,11 +1595,13 @@
*delayMs = 0;
audio_stream_type_t stream = client->stream();
+ auto clientVolSrc = client->volumeSource();
auto clientStrategy = client->strategy();
auto clientAttr = client->attributes();
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
- if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+ if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(
+ streamToVolumeSource(AUDIO_STREAM_TTS) /*sourceToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -1628,7 +1656,7 @@
selectOutputForMusicEffects();
}
- if (outputDesc->streamActiveCount(stream) == 1 || !devices.isEmpty()) {
+ if (outputDesc->getActivityCount(clientVolSrc) == 1 || !devices.isEmpty()) {
// starting an output being rerouted?
if (devices.isEmpty()) {
devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
@@ -1682,7 +1710,7 @@
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, outputDesc->devices().types()),
+ getVolumeCurves(stream).getVolumeIndex(outputDesc->devices().types()),
outputDesc,
outputDesc->devices().types());
@@ -1754,11 +1782,12 @@
{
// always handle stream stop, check which stream type is stopping
audio_stream_type_t stream = client->stream();
+ auto clientVolSrc = client->volumeSource();
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
- if (outputDesc->streamActiveCount(stream) > 0) {
- if (outputDesc->streamActiveCount(stream) == 1) {
+ if (outputDesc->getActivityCount(clientVolSrc) > 0) {
+ if (outputDesc->getActivityCount(clientVolSrc) == 1) {
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
@@ -1780,7 +1809,7 @@
outputDesc->setClientActive(client, false);
// store time at which the stream was stopped - see isStreamActive()
- if (outputDesc->streamActiveCount(stream) == 0 || forceDeviceUpdate) {
+ if (outputDesc->getActivityCount(clientVolSrc) == 0 || forceDeviceUpdate) {
outputDesc->setStopTime(client, systemTime());
DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
// delay the device switch by twice the latency because stopOutput() is executed when
@@ -2344,23 +2373,21 @@
}
}
-void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax)
+void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
if (indexMin < 0 || indexMax < 0) {
ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
return;
}
- mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
+ getVolumeCurves(stream).initVolume(indexMin, indexMax);
// initialize other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
+ getVolumeCurves((audio_stream_type_t)curStream).initVolume(indexMin, indexMax);
}
}
@@ -2368,13 +2395,13 @@
int index,
audio_devices_t device)
{
-
+ auto &curves = getVolumeCurves(stream);
// VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
// can be muted directly by an app that has MODIFY_PHONE_STATE permission.
- if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
+ if (((index < curves.getVolumeIndexMin()) &&
!((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
index == 0)) ||
- (index > mVolumeCurves->getVolumeIndexMax(stream))) {
+ (index > curves.getVolumeIndexMax())) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
@@ -2382,7 +2409,7 @@
}
// Force max volume if stream cannot be muted
- if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
+ if (!curves.canBeMuted()) index = curves.getVolumeIndexMax();
ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
stream, device, index);
@@ -2392,7 +2419,8 @@
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
+ auto &curCurves = getVolumeCurves(static_cast<audio_stream_type_t>(curStream));
+ curCurves.addCurrentVolumeIndex(device, index);
}
// update volume on all outputs and streams matching the following:
@@ -2411,7 +2439,7 @@
if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
continue;
}
- if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
+ if (!(desc->isActive(streamToVolumeSource((audio_stream_type_t)curStream)) || isInCall())) {
continue;
}
audio_devices_t curStreamDevice = Volume::getDeviceForVolume(
@@ -2426,8 +2454,7 @@
curStreamDevice |= device;
applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
} else {
- applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
- stream, curStreamDevice);
+ applyVolume = !curves.hasVolumeIndexForDevice(curStreamDevice);
}
// rescale index before applying to curStream as ranges may be different for
// stream and curStream
@@ -2436,9 +2463,10 @@
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
- status_t volStatus =
- checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
- (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
+ status_t volStatus = checkAndSetVolume(
+ (audio_stream_type_t)curStream, idx, desc, curDevice,
+ (stream == AUDIO_STREAM_SYSTEM) ?
+ TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
@@ -2465,7 +2493,7 @@
}
device = Volume::getDeviceForVolume(device);
- *index = mVolumeCurves->getVolumeIndex(stream, device);
+ *index = getVolumeCurves(stream).getVolumeIndex(device);
ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
return NO_ERROR;
}
@@ -2499,7 +2527,7 @@
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
- if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
+ if (activeOnly && !desc->isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC))) {
continue;
}
ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
@@ -2593,14 +2621,14 @@
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
+ active = mOutputs.isActive(streamToVolumeSource((audio_stream_type_t)curStream), inPastMs);
}
return active;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isStreamActiveRemotely(stream, inPastMs);
+ return mOutputs.isActiveRemotely(streamToVolumeSource((audio_stream_type_t)stream), inPastMs);
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -2898,7 +2926,6 @@
mHwModulesAll.dump(dst);
mOutputs.dump(dst);
mInputs.dump(dst);
- mVolumeCurves->dump(dst);
mEffects.dump(dst);
mAudioPatches.dump(dst);
mPolicyMixes.dump(dst);
@@ -3630,10 +3657,11 @@
struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
- sp<SourceClientDescriptor> sourceDesc =
- new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDevice,
- mEngine->getStreamTypeForAttributes(*attributes),
- mEngine->getProductStrategyForAttributes(*attributes));
+ sp<SourceClientDescriptor> sourceDesc = new SourceClientDescriptor(
+ *portId, uid, *attributes, patchDesc, srcDevice,
+ mEngine->getStreamTypeForAttributes(*attributes),
+ mEngine->getProductStrategyForAttributes(*attributes),
+ streamToVolumeSource(mEngine->getStreamTypeForAttributes(*attributes)));
status_t status = connectAudioSource(sourceDesc);
if (status == NO_ERROR) {
@@ -4048,14 +4076,18 @@
std::vector<const char*> fileNames;
status_t ret;
- if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
- property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
- // A2DP offload supported but disabled: try to use special XML file
- if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
- fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
- } else {
+ if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) {
+ if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // Both BluetoothAudio@2.0 and BluetoothA2dp@1.0 (Offlaod) are disabled, and uses
+ // the legacy hardware module for A2DP and hearing aid.
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
+ } else if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
}
+ } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false)) {
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
}
fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
@@ -4080,9 +4112,7 @@
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
- mVolumeCurves(new VolumeCurvesCollection()),
- mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
- mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
+ mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices, mDefaultOutputDevice),
mAudioPortGeneration(1),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
@@ -4116,8 +4146,6 @@
}
status_t AudioPolicyManager::initialize() {
- mVolumeCurves->initializeVolumeCurves(getConfig().isSpeakerDrcEnabled());
-
// Once policy config has been parsed, retrieve an instance of the engine and initialize it.
audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
if (!engineInstance) {
@@ -4704,37 +4732,31 @@
{
ALOGV("closeOutput(%d)", output);
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc == NULL) {
+ sp<SwAudioOutputDescriptor> closingOutput = mOutputs.valueFor(output);
+ if (closingOutput == NULL) {
ALOGW("closeOutput() unknown output %d", output);
return;
}
- mPolicyMixes.closeOutput(outputDesc);
+ mPolicyMixes.closeOutput(closingOutput);
// look for duplicated outputs connected to the output being removed.
for (size_t i = 0; i < mOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i);
- if (dupOutputDesc->isDuplicated() &&
- (dupOutputDesc->mOutput1 == outputDesc ||
- dupOutputDesc->mOutput2 == outputDesc)) {
- sp<SwAudioOutputDescriptor> outputDesc2;
- if (dupOutputDesc->mOutput1 == outputDesc) {
- outputDesc2 = dupOutputDesc->mOutput2;
- } else {
- outputDesc2 = dupOutputDesc->mOutput1;
- }
+ sp<SwAudioOutputDescriptor> dupOutput = mOutputs.valueAt(i);
+ if (dupOutput->isDuplicated() &&
+ (dupOutput->mOutput1 == closingOutput || dupOutput->mOutput2 == closingOutput)) {
+ sp<SwAudioOutputDescriptor> remainingOutput =
+ dupOutput->mOutput1 == closingOutput ? dupOutput->mOutput2 : dupOutput->mOutput1;
// As all active tracks on duplicated output will be deleted,
// and as they were also referenced on the other output, the reference
// count for their stream type must be adjusted accordingly on
// the other output.
- const bool wasActive = outputDesc2->isActive();
- for (const auto &clientPair : dupOutputDesc->getActiveClients()) {
- outputDesc2->changeStreamActiveCount(clientPair.first, -clientPair.second);
- }
+ const bool wasActive = remainingOutput->isActive();
+ // Note: no-op on the closing output where all clients has already been set inactive
+ dupOutput->setAllClientsInactive();
// stop() will be a no op if the output is still active but is needed in case all
// active streams refcounts where cleared above
if (wasActive) {
- outputDesc2->stop();
+ remainingOutput->stop();
}
audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
@@ -4746,7 +4768,7 @@
nextAudioPortGeneration();
- ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
+ ssize_t index = mAudioPatches.indexOfKey(closingOutput->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
(void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -4754,7 +4776,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- outputDesc->close();
+ closingOutput->close();
removeOutput(output);
mPreviousOutputs = mOutputs;
@@ -5105,7 +5127,7 @@
devices.merge(curDevices);
for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
+ if (outputDesc->isActive(streamToVolumeSource((audio_stream_type_t)curStream))) {
activeDevices.merge(outputDesc->devices());
}
}
@@ -5504,7 +5526,8 @@
int index,
audio_devices_t device)
{
- float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
+ auto &curves = getVolumeCurves(stream);
+ float volumeDB = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
// handle the case of accessibility active while a ringtone is playing: if the ringtone is much
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
@@ -5519,7 +5542,7 @@
// in-call: always cap volume by voice volume + some low headroom
if ((stream != AUDIO_STREAM_VOICE_CALL) &&
- (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
+ (isInCall() || mOutputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL)))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
@@ -5529,8 +5552,7 @@
case AUDIO_STREAM_ENFORCED_AUDIBLE:
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
- int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
+ int voiceVolumeIndex = getVolumeCurves(AUDIO_STREAM_VOICE_CALL).getVolumeIndex(device);
const float maxVoiceVolDb =
computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
@@ -5564,7 +5586,7 @@
|| ((stream == AUDIO_STREAM_ENFORCED_AUDIBLE) &&
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
AUDIO_POLICY_FORCE_NONE))) &&
- mVolumeCurves->canBeMuted(stream)) {
+ getVolumeCurves(stream).canBeMuted()) {
// when the phone is ringing we must consider that music could have been paused just before
// by the music application and behave as if music was active if the last music track was
// just stopped
@@ -5575,9 +5597,8 @@
mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
nullptr, true /*fromCache*/).types();
float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
- musicDevice),
- musicDevice);
+ getVolumeCurves(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice),
+ musicDevice);
float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
if (volumeDB > minVolDB) {
@@ -5612,10 +5633,12 @@
if (srcStream == dstStream) {
return srcIndex;
}
- float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
- float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
- float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
- float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+ auto &srcCurves = getVolumeCurves(srcStream);
+ auto &dstCurves = getVolumeCurves(dstStream);
+ float minSrc = (float)srcCurves.getVolumeIndexMin();
+ float maxSrc = (float)srcCurves.getVolumeIndexMax();
+ float minDst = (float)dstCurves.getVolumeIndexMin();
+ float maxDst = (float)dstCurves.getVolumeIndexMax();
// preserve mute request or correct range
if (srcIndex < minSrc) {
@@ -5630,16 +5653,15 @@
}
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
- int index,
- const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
- int delayMs,
- bool force)
+ int index,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ audio_devices_t device,
+ int delayMs,
+ bool force)
{
// do not change actual stream volume if the stream is muted
- if (outputDesc->mMuteCount[stream] != 0) {
- ALOGVV("checkAndSetVolume() stream %d muted count %d",
- stream, outputDesc->mMuteCount[stream]);
+ if (outputDesc->isMuted(streamToVolumeSource(stream))) {
+ ALOGVV("%s() stream %d muted count %d", __func__, stream, outputDesc->getMuteCount(stream));
return NO_ERROR;
}
audio_policy_forced_cfg_t forceUseForComm =
@@ -5671,7 +5693,7 @@
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AUDIO_STREAM_VOICE_CALL) {
- voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
+ voiceVolume = (float)index/(float)getVolumeCurves(stream).getVolumeIndexMax();
} else {
voiceVolume = 1.0;
}
@@ -5694,7 +5716,7 @@
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
checkAndSetVolume((audio_stream_type_t)stream,
- mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
+ getVolumeCurves((audio_stream_type_t)stream).getVolumeIndex(device),
outputDesc,
device,
delayMs,
@@ -5726,26 +5748,26 @@
}
ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
- stream, on, outputDesc->mMuteCount[stream], device);
-
+ stream, on, outputDesc->getMuteCount(stream), device);
+ auto &curves = getVolumeCurves(stream);
if (on) {
- if (outputDesc->mMuteCount[stream] == 0) {
- if (mVolumeCurves->canBeMuted(stream) &&
+ if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
+ if (curves.canBeMuted() &&
((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
}
}
// increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
- outputDesc->mMuteCount[stream]++;
+ outputDesc->incMuteCount(streamToVolumeSource(stream));
} else {
- if (outputDesc->mMuteCount[stream] == 0) {
+ if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
ALOGV("setStreamMute() unmuting non muted stream!");
return;
}
- if (--outputDesc->mMuteCount[stream] == 0) {
+ if (outputDesc->decMuteCount(streamToVolumeSource(stream)) == 0) {
checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, device),
+ curves.getVolumeIndex(device),
outputDesc,
device,
delayMs);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 70ad6ac..3e4d885 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -49,7 +49,7 @@
#include <AudioPolicyMix.h>
#include <EffectDescriptor.h>
#include <SoundTriggerSession.h>
-#include <VolumeCurve.h>
+#include "TypeConverter.h"
namespace android {
@@ -143,9 +143,17 @@
virtual status_t stopInput(audio_port_handle_t portId);
virtual void releaseInput(audio_port_handle_t portId);
virtual void closeAllInputs();
- virtual void initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax);
+ /**
+ * @brief initStreamVolume: even if the engine volume files provides min and max, keep this
+ * api for compatibility reason.
+ * AudioServer will get the min and max and may overwrite them if:
+ * -using property (highest priority)
+ * -not defined (-1 by convention), case when still using apm volume tables XML files
+ * @param stream to be considered
+ * @param indexMin to set
+ * @param indexMax to set
+ */
+ virtual void initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device);
@@ -310,12 +318,24 @@
{
return mAvailableInputDevices;
}
- virtual IVolumeCurvesCollection &getVolumeCurves() { return *mVolumeCurves; }
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
{
return mDefaultOutputDevice;
}
+ IVolumeCurves &getVolumeCurves(const audio_attributes_t &attr)
+ {
+ auto *curves = mEngine->getVolumeCurvesForAttributes(attr);
+ ALOG_ASSERT(curves != nullptr, "No curves for attributes %s", toString(attr).c_str());
+ return *curves;
+ }
+ IVolumeCurves &getVolumeCurves(audio_stream_type_t stream)
+ {
+ auto *curves = mEngine->getVolumeCurvesForStreamType(stream);
+ ALOG_ASSERT(curves != nullptr, "No curves for stream %s", toString(stream).c_str());
+ return *curves;
+ }
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
@@ -624,12 +644,12 @@
float mLastVoiceVolume; // last voice volume value sent to audio HAL
bool mA2dpSuspended; // true if A2DP output is suspended
- std::unique_ptr<IVolumeCurvesCollection> mVolumeCurves; // Volume Curves per use case and device category
EffectDescriptorCollection mEffects; // list of registered audio effects
sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
// dumps
+
AudioPolicyConfig mConfig;
std::atomic<uint32_t> mAudioPortGeneration;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 8ddf824..d31ce53 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -21,6 +21,7 @@
#include "TypeConverter.h"
#include <media/MediaAnalyticsItem.h>
#include <mediautils/ServiceUtilities.h>
+#include <media/AudioPolicy.h>
#include <utils/Log.h>
namespace android {
@@ -1032,9 +1033,14 @@
status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration)
{
Mutex::Autolock _l(mLock);
- if(!modifyAudioRoutingAllowed()) {
+
+ // loopback|render only need a MediaProjection (checked in caller AudioService.java)
+ bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
+ return !is_mix_loopback_render(mix.mRouteFlags); });
+ if (needModifyAudioRouting && !modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
}
+
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 2ca8356..7ec0e4c 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -94,6 +94,7 @@
"libsensorprivacy",
"libstagefright",
"libstagefright_foundation",
+ "libyuv",
"android.frameworks.cameraservice.common@2.0",
"android.frameworks.cameraservice.service@2.0",
"android.frameworks.cameraservice.device@2.0",
@@ -137,6 +138,7 @@
name: "libdepthphoto",
srcs: [
+ "utils/ExifUtils.cpp",
"common/DepthPhotoProcessor.cpp",
],
@@ -150,6 +152,8 @@
"libcutils",
"libjpeg",
"libmemunreachable",
+ "libexif",
+ "libcamera_client",
],
include_dirs: [
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index e002e18..162b50f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1767,14 +1767,13 @@
case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
__FUNCTION__, errorCode, resultExtras.requestId);
+ mCaptureSequencer->notifyError(errorCode, resultExtras);
return;
default:
err = CAMERA_ERROR_UNKNOWN;
break;
}
- mCaptureSequencer->notifyError(errorCode, resultExtras);
-
ALOGE("%s: Error condition %d reported by HAL, requestId %" PRId32, __FUNCTION__, errorCode,
resultExtras.requestId);
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 2eec0f7..9525ad2 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -339,6 +339,21 @@
} else {
depthPhoto.mIsLensDistortionValid = 0;
}
+ entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
+ if (entry.count > 0) {
+ // The camera jpeg orientation values must be within [0, 90, 180, 270].
+ switch (entry.data.i32[0]) {
+ case 0:
+ case 90:
+ case 180:
+ case 270:
+ depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
+ break;
+ default:
+ ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
+ __FUNCTION__, entry.data.i32[0]);
+ }
+ }
size_t actualJpegSize = 0;
res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index a61cdee..9fd0e8b 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -23,6 +23,7 @@
#include <sys/syscall.h>
#include <android/hardware/camera/device/3.5/types.h>
+#include <libyuv.h>
#include <gui/Surface.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -192,6 +193,7 @@
return res;
}
+ initCopyRowFunction(width);
return res;
}
@@ -1373,7 +1375,7 @@
for (auto row = top; row < top+height; row++) {
uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::Y].mOffset +
imageInfo->mPlane[MediaImage2::Y].mRowInc * (row - top);
- memcpy(dst, yuvBuffer.data+row*yuvBuffer.stride+left, width);
+ mFnCopyRow(yuvBuffer.data+row*yuvBuffer.stride+left, dst, width);
}
// U is Cb, V is Cr
@@ -1406,24 +1408,25 @@
for (auto row = top/2; row < (top+height)/2; row++) {
uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);
- memcpy(dst, src+row*yuvBuffer.chromaStride+left, width);
+ mFnCopyRow(src+row*yuvBuffer.chromaStride+left, dst, width);
}
} else if (isCodecUvPlannar && yuvBuffer.chromaStep == 1) {
// U plane
for (auto row = top/2; row < (top+height)/2; row++) {
uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::U].mOffset +
imageInfo->mPlane[MediaImage2::U].mRowInc * (row - top/2);
- memcpy(dst, yuvBuffer.dataCb+row*yuvBuffer.chromaStride+left/2, width/2);
+ mFnCopyRow(yuvBuffer.dataCb+row*yuvBuffer.chromaStride+left/2, dst, width/2);
}
// V plane
for (auto row = top/2; row < (top+height)/2; row++) {
uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::V].mOffset +
imageInfo->mPlane[MediaImage2::V].mRowInc * (row - top/2);
- memcpy(dst, yuvBuffer.dataCr+row*yuvBuffer.chromaStride+left/2, width/2);
+ mFnCopyRow(yuvBuffer.dataCr+row*yuvBuffer.chromaStride+left/2, dst, width/2);
}
} else {
- // Convert between semiplannar and plannar
+ // Convert between semiplannar and plannar, or when UV orders are
+ // different.
uint8_t *dst = codecBuffer->data();
for (auto row = top/2; row < (top+height)/2; row++) {
for (auto col = left/2; col < (left+width)/2; col++) {
@@ -1446,6 +1449,38 @@
return OK;
}
+void HeicCompositeStream::initCopyRowFunction(int32_t width)
+{
+ using namespace libyuv;
+
+ mFnCopyRow = CopyRow_C;
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ mFnCopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ mFnCopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ mFnCopyRow = CopyRow_MIPS;
+ }
+#endif
+}
+
size_t HeicCompositeStream::calcAppSegmentMaxSize(const CameraMetadata& info) {
camera_metadata_ro_entry_t entry = info.find(ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT);
size_t maxAppsSegment = 1;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 4cd9af0..2aa3c38 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -195,6 +195,7 @@
status_t copyOneYuvTile(sp<MediaCodecBuffer>& codecBuffer,
const CpuConsumer::LockedBuffer& yuvBuffer,
size_t top, size_t left, size_t width, size_t height);
+ void initCopyRowFunction(int32_t width);
static size_t calcAppSegmentMaxSize(const CameraMetadata& info);
static const nsecs_t kWaitDuration = 10000000; // 10 ms
@@ -244,6 +245,9 @@
// In most common use case, entries are accessed in order.
std::map<int64_t, InputFrame> mPendingInputFrames;
+
+ // Function pointer of libyuv row copy.
+ void (*mFnCopyRow)(const uint8_t* src, uint8_t* dst, int width);
};
}; // namespace camera3
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index a945aca..6d96163 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -32,9 +32,12 @@
#include <dynamic_depth/profile.h>
#include <dynamic_depth/profiles.h>
#include <jpeglib.h>
+#include <libexif/exif-data.h>
+#include <libexif/exif-system.h>
#include <math.h>
#include <sstream>
#include <utils/Errors.h>
+#include <utils/ExifUtils.h>
#include <utils/Log.h>
#include <xmpmeta/xmp_data.h>
#include <xmpmeta/xmp_writer.h>
@@ -61,8 +64,44 @@
namespace android {
namespace camera3 {
+ExifOrientation getExifOrientation(const unsigned char *jpegBuffer, size_t jpegBufferSize) {
+ if ((jpegBuffer == nullptr) || (jpegBufferSize == 0)) {
+ return ExifOrientation::ORIENTATION_UNDEFINED;
+ }
+
+ auto exifData = exif_data_new();
+ exif_data_load_data(exifData, jpegBuffer, jpegBufferSize);
+ ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+ EXIF_TAG_ORIENTATION);
+ if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+ ALOGV("%s: Orientation EXIF entry invalid!", __FUNCTION__);
+ exif_data_unref(exifData);
+ return ExifOrientation::ORIENTATION_0_DEGREES;
+ }
+
+ auto orientationValue = exif_get_short(orientation->data, exif_data_get_byte_order(exifData));
+ ExifOrientation ret;
+ switch (orientationValue) {
+ case ExifOrientation::ORIENTATION_0_DEGREES:
+ case ExifOrientation::ORIENTATION_90_DEGREES:
+ case ExifOrientation::ORIENTATION_180_DEGREES:
+ case ExifOrientation::ORIENTATION_270_DEGREES:
+ ret = static_cast<ExifOrientation> (orientationValue);
+ break;
+ default:
+ ALOGE("%s: Unexpected EXIF orientation value: %d, defaulting to 0 degrees",
+ __FUNCTION__, orientationValue);
+ ret = ExifOrientation::ORIENTATION_0_DEGREES;
+ }
+
+ exif_data_unref(exifData);
+
+ return ret;
+}
+
status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
- const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize) {
+ const size_t maxOutSize, uint8_t jpegQuality, ExifOrientation exifOrientation,
+ size_t &actualSize) {
status_t ret;
// libjpeg is a C library so we use C-style "inheritance" by
// putting libjpeg's jpeg_destination_mgr first in our custom
@@ -151,6 +190,23 @@
jpeg_start_compress(&cinfo, TRUE);
+ if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+ std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+ utils->initializeEmpty();
+ utils->setImageWidth(width);
+ utils->setImageHeight(height);
+ utils->setOrientationValue(exifOrientation);
+
+ if (utils->generateApp1()) {
+ const uint8_t* exifBuffer = utils->getApp1Buffer();
+ size_t exifBufferSize = utils->getApp1Length();
+ jpeg_write_marker(&cinfo, JPEG_APP0 + 1, static_cast<const JOCTET*>(exifBuffer),
+ exifBufferSize);
+ } else {
+ ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
+ }
+ }
+
for (size_t i = 0; i < cinfo.image_height; i++) {
auto currentRow = static_cast<JSAMPROW>(in + i*width);
jpeg_write_scanlines(&cinfo, ¤tRow, /*num_lines*/1);
@@ -168,8 +224,106 @@
return ret;
}
+inline void unpackDepth16(uint16_t value, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ // Android densely packed depth map. The units for the range are in
+ // millimeters and need to be scaled to meters.
+ // The confidence value is encoded in the 3 most significant bits.
+ // The confidence data needs to be additionally normalized with
+ // values 1.0f, 0.0f representing maximum and minimum confidence
+ // respectively.
+ auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+ points->push_back(point);
+
+ auto conf = (value >> 13) & 0x7;
+ float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+ confidence->push_back(normConfidence);
+
+ if (*near > point) {
+ *near = point;
+ }
+ if (*far < point) {
+ *far = point;
+ }
+}
+
+// Trivial case, read forward from top,left corner.
+void rotate0AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
+ for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 90 degrees CW rotation can be applied by starting to read from bottom, left corner
+// transposing rows and columns.
+void rotate90AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapWidth; i++) {
+ for (ssize_t j = inputFrame.mDepthMapHeight-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 180 CW degrees rotation can be applied by starting to read backwards from bottom, right corner.
+void rotate180AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapHeight-1; i >= 0; i--) {
+ for (ssize_t j = inputFrame.mDepthMapWidth-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 270 degrees CW rotation can be applied by starting to read from top, right corner
+// transposing rows and columns.
+void rotate270AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapWidth-1; i >= 0; i--) {
+ for (size_t j = 0; j < inputFrame.mDepthMapHeight; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+bool rotateAndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ switch (inputFrame.mOrientation) {
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES:
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES:
+ rotate90AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES:
+ rotate180AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES:
+ rotate270AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ default:
+ ALOGE("%s: Unsupported depth photo rotation: %d, default to 0", __FUNCTION__,
+ inputFrame.mOrientation);
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ }
+
+ return false;
+}
+
std::unique_ptr<dynamic_depth::DepthMap> processDepthMapFrame(DepthPhotoInputFrame inputFrame,
- std::vector<std::unique_ptr<Item>> *items /*out*/) {
+ ExifOrientation exifOrientation, std::vector<std::unique_ptr<Item>> *items /*out*/,
+ bool *switchDimensions /*out*/) {
+ if ((items == nullptr) || (switchDimensions == nullptr)) {
+ return nullptr;
+ }
+
std::vector<float> points, confidence;
size_t pointCount = inputFrame.mDepthMapWidth * inputFrame.mDepthMapHeight;
@@ -177,29 +331,21 @@
confidence.reserve(pointCount);
float near = UINT16_MAX;
float far = .0f;
- for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
- for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
- // Android densely packed depth map. The units for the range are in
- // millimeters and need to be scaled to meters.
- // The confidence value is encoded in the 3 most significant bits.
- // The confidence data needs to be additionally normalized with
- // values 1.0f, 0.0f representing maximum and minimum confidence
- // respectively.
- auto value = inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j];
- auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
- points.push_back(point);
+ *switchDimensions = false;
+ // Physical rotation of depth and confidence maps may be needed in case
+ // the EXIF orientation is set to 0 degrees and the depth photo orientation
+ // (source color image) has some different value.
+ if (exifOrientation == ExifOrientation::ORIENTATION_0_DEGREES) {
+ *switchDimensions = rotateAndUnpack(inputFrame, &points, &confidence, &near, &far);
+ } else {
+ rotate0AndUnpack(inputFrame, &points, &confidence, &near, &far);
+ }
- auto conf = (value >> 13) & 0x7;
- float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
- confidence.push_back(normConfidence);
-
- if (near > point) {
- near = point;
- }
- if (far < point) {
- far = point;
- }
- }
+ size_t width = inputFrame.mDepthMapWidth;
+ size_t height = inputFrame.mDepthMapHeight;
+ if (*switchDimensions) {
+ width = inputFrame.mDepthMapHeight;
+ height = inputFrame.mDepthMapWidth;
}
if (near == far) {
@@ -225,18 +371,18 @@
depthParams.depth_image_data.resize(inputFrame.mMaxJpegSize);
depthParams.confidence_data.resize(inputFrame.mMaxJpegSize);
size_t actualJpegSize;
- auto ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
- pointsQuantized.data(), depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
- inputFrame.mJpegQuality, actualJpegSize);
+ auto ret = encodeGrayscaleJpeg(width, height, pointsQuantized.data(),
+ depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
if (ret != NO_ERROR) {
ALOGE("%s: Depth map compression failed!", __FUNCTION__);
return nullptr;
}
depthParams.depth_image_data.resize(actualJpegSize);
- ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
- confidenceQuantized.data(), depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
- inputFrame.mJpegQuality, actualJpegSize);
+ ret = encodeGrayscaleJpeg(width, height, confidenceQuantized.data(),
+ depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
if (ret != NO_ERROR) {
ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
return nullptr;
@@ -262,7 +408,12 @@
return BAD_VALUE;
}
- cameraParams->depth_map = processDepthMapFrame(inputFrame, &items);
+ ExifOrientation exifOrientation = getExifOrientation(
+ reinterpret_cast<const unsigned char*> (inputFrame.mMainJpegBuffer),
+ inputFrame.mMainJpegSize);
+ bool switchDimensions;
+ cameraParams->depth_map = processDepthMapFrame(inputFrame, exifOrientation, &items,
+ &switchDimensions);
if (cameraParams->depth_map == nullptr) {
ALOGE("%s: Depth map processing failed!", __FUNCTION__);
return BAD_VALUE;
@@ -274,7 +425,13 @@
// [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
inputFrame.mInstrinsicCalibration[1]);
- const Dimension imageSize(inputFrame.mMainJpegWidth, inputFrame.mMainJpegHeight);
+ size_t width = inputFrame.mMainJpegWidth;
+ size_t height = inputFrame.mMainJpegHeight;
+ if (switchDimensions) {
+ width = inputFrame.mMainJpegHeight;
+ height = inputFrame.mMainJpegWidth;
+ }
+ const Dimension imageSize(width, height);
ImagingModelParams imagingParams(focalLength, imageSize);
imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
index 19889a1..6a2fbff 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.h
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -23,19 +23,27 @@
namespace android {
namespace camera3 {
+enum DepthPhotoOrientation {
+ DEPTH_ORIENTATION_0_DEGREES = 0,
+ DEPTH_ORIENTATION_90_DEGREES = 90,
+ DEPTH_ORIENTATION_180_DEGREES = 180,
+ DEPTH_ORIENTATION_270_DEGREES = 270,
+};
+
struct DepthPhotoInputFrame {
- const char* mMainJpegBuffer;
- size_t mMainJpegSize;
- size_t mMainJpegWidth, mMainJpegHeight;
- uint16_t* mDepthMapBuffer;
- size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
- size_t mMaxJpegSize;
- uint8_t mJpegQuality;
- uint8_t mIsLogical;
- float mInstrinsicCalibration[5];
- uint8_t mIsInstrinsicCalibrationValid;
- float mLensDistortion[5];
- uint8_t mIsLensDistortionValid;
+ const char* mMainJpegBuffer;
+ size_t mMainJpegSize;
+ size_t mMainJpegWidth, mMainJpegHeight;
+ uint16_t* mDepthMapBuffer;
+ size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
+ size_t mMaxJpegSize;
+ uint8_t mJpegQuality;
+ uint8_t mIsLogical;
+ float mInstrinsicCalibration[5];
+ uint8_t mIsInstrinsicCalibrationValid;
+ float mLensDistortion[5];
+ uint8_t mIsLensDistortionValid;
+ DepthPhotoOrientation mOrientation;
DepthPhotoInputFrame() :
mMainJpegBuffer(nullptr),
@@ -52,7 +60,8 @@
mInstrinsicCalibration{0.f},
mIsInstrinsicCalibrationValid(0),
mLensDistortion{0.f},
- mIsLensDistortionValid(0) {}
+ mIsLensDistortionValid(0),
+ mOrientation(DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES) {}
};
static const char *kDepthPhotoLibrary = "libdepthphoto.so";
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index f9ef996..923d17a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2151,7 +2151,11 @@
// Pause to reconfigure
status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
- mRequestThread->setPaused(true);
+ if (mRequestThread.get() != nullptr) {
+ mRequestThread->setPaused(true);
+ } else {
+ return NO_INIT;
+ }
ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
maxExpectedDuration);
@@ -4558,7 +4562,7 @@
return;
}
- auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter);
+ auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter - 1);
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
return;
@@ -5890,16 +5894,16 @@
if (mPaused == false) {
ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
mPaused = true;
- // Let the tracker know
- sp<StatusTracker> statusTracker = mStatusTracker.promote();
- if (statusTracker != 0) {
- statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
- }
if (mNotifyPipelineDrain) {
mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
mNotifyPipelineDrain = false;
mStreamIdsToBeDrained.clear();
}
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
parent->mRequestBufferSM.onRequestThreadPaused();
@@ -5983,16 +5987,16 @@
if (mPaused == false) {
mPaused = true;
ALOGV("%s: RequestThread: Paused", __FUNCTION__);
- // Let the tracker know
- sp<StatusTracker> statusTracker = mStatusTracker.promote();
- if (statusTracker != 0) {
- statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
- }
if (mNotifyPipelineDrain) {
mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
mNotifyPipelineDrain = false;
mStreamIdsToBeDrained.clear();
}
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
parent->mRequestBufferSM.onRequestThreadPaused();
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index d777ca1..b4e7c32 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -27,6 +27,8 @@
libcamera_client \
libcamera_metadata \
libutils \
+ libjpeg \
+ libexif \
android.hardware.camera.common@1.0 \
android.hardware.camera.provider@2.4 \
android.hardware.camera.provider@2.5 \
@@ -36,6 +38,8 @@
LOCAL_C_INCLUDES += \
system/media/private/camera/include \
+ external/dynamic_depth/includes \
+ external/dynamic_depth/internal \
LOCAL_CFLAGS += -Wall -Wextra -Werror
diff --git a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
new file mode 100644
index 0000000..2162514
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "DepthProcessorTest"
+
+#include <array>
+#include <random>
+
+#include <dlfcn.h>
+#include <gtest/gtest.h>
+
+#include "../common/DepthPhotoProcessor.h"
+#include "../utils/ExifUtils.h"
+#include "NV12Compressor.h"
+
+using namespace android;
+using namespace android::camera3;
+
+static const size_t kTestBufferWidth = 640;
+static const size_t kTestBufferHeight = 480;
+static const size_t kTestBufferNV12Size ((((kTestBufferWidth) * (kTestBufferHeight)) * 3) / 2);
+static const size_t kTestBufferDepthSize (kTestBufferWidth * kTestBufferHeight);
+static const size_t kSeed = 1234;
+
+void linkToDepthPhotoLibrary(void **libHandle /*out*/,
+ process_depth_photo_frame *processFrameFunc /*out*/) {
+ ASSERT_NE(libHandle, nullptr);
+ ASSERT_NE(processFrameFunc, nullptr);
+
+ *libHandle = dlopen(kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (*libHandle != nullptr) {
+ *processFrameFunc = reinterpret_cast<camera3::process_depth_photo_frame> (
+ dlsym(*libHandle, kDepthPhotoProcessFunction));
+ ASSERT_NE(*processFrameFunc, nullptr);
+ }
+}
+
+void generateColorJpegBuffer(int jpegQuality, ExifOrientation orientationValue, bool includeExif,
+ bool switchDimensions, std::vector<uint8_t> *colorJpegBuffer /*out*/) {
+ ASSERT_NE(colorJpegBuffer, nullptr);
+
+ std::array<uint8_t, kTestBufferNV12Size> colorSourceBuffer;
+ std::default_random_engine gen(kSeed);
+ std::uniform_int_distribution<int> uniDist(0, UINT8_MAX - 1);
+ for (size_t i = 0; i < colorSourceBuffer.size(); i++) {
+ colorSourceBuffer[i] = uniDist(gen);
+ }
+
+ size_t width = kTestBufferWidth;
+ size_t height = kTestBufferHeight;
+ if (switchDimensions) {
+ width = kTestBufferHeight;
+ height = kTestBufferWidth;
+ }
+
+ NV12Compressor jpegCompressor;
+ if (includeExif) {
+ ASSERT_TRUE(jpegCompressor.compressWithExifOrientation(
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality, orientationValue));
+ } else {
+ ASSERT_TRUE(jpegCompressor.compress(
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality));
+ }
+
+ *colorJpegBuffer = std::move(jpegCompressor.getCompressedData());
+ ASSERT_FALSE(colorJpegBuffer->empty());
+}
+
+void generateDepth16Buffer(std::array<uint16_t, kTestBufferDepthSize> *depth16Buffer /*out*/) {
+ ASSERT_NE(depth16Buffer, nullptr);
+ std::default_random_engine gen(kSeed+1);
+ std::uniform_int_distribution<int> uniDist(0, UINT16_MAX - 1);
+ for (size_t i = 0; i < depth16Buffer->size(); i++) {
+ (*depth16Buffer)[i] = uniDist(gen);
+ }
+}
+
+TEST(DepthProcessorTest, LinkToLibray) {
+ void *libHandle;
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle != nullptr) {
+ dlclose(libHandle);
+ }
+}
+
+TEST(DepthProcessorTest, BadInput) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ DepthPhotoInputFrame inputFrame;
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), nullptr,
+ &actualDepthPhotoSize), 0);
+
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(), nullptr),
+ 0);
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, BasicDepthPhotoValidation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) && (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ // The final depth photo must consist of three jpeg images:
+ // - the main color image
+ // - the depth map image
+ // - the confidence map image
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDepthPhotoExifOrientation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ ExifOrientation exifOrientations[] = { ExifOrientation::ORIENTATION_UNDEFINED,
+ ExifOrientation::ORIENTATION_0_DEGREES, ExifOrientation::ORIENTATION_90_DEGREES,
+ ExifOrientation::ORIENTATION_180_DEGREES, ExifOrientation::ORIENTATION_270_DEGREES };
+ for (auto exifOrientation : exifOrientations) {
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+ /*switchDimensions*/ false, &colorJpegBuffer);
+ if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+ auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(),
+ colorJpegBuffer.size(), &jpegExifOrientation), OK);
+ ASSERT_EQ(exifOrientation, jpegExifOrientation);
+ }
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+ (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+ size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+ //Depth and confidence images must have the same EXIF orientation as the source
+ auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthJpegExifOrientation), OK);
+ if (exifOrientation == ORIENTATION_UNDEFINED) {
+ // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+ // depth map.
+ ASSERT_EQ(depthJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+ } else {
+ ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+ }
+
+ auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize,
+ confidenceMapSize, &confidenceJpegExifOrientation), OK);
+ if (exifOrientation == ORIENTATION_UNDEFINED) {
+ // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+ // confidence map.
+ ASSERT_EQ(confidenceJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+ } else {
+ ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+ }
+ }
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDephtPhotoPhysicalRotation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ // In case of physical rotation, the EXIF orientation must always be 0.
+ auto exifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+ DepthPhotoOrientation depthOrientations[] = {
+ DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES };
+ for (auto depthOrientation : depthOrientations) {
+ std::vector<uint8_t> colorJpegBuffer;
+ bool switchDimensions = false;
+ size_t expectedWidth = kTestBufferWidth;
+ size_t expectedHeight = kTestBufferHeight;
+ if ((depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES) ||
+ (depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES)) {
+ switchDimensions = true;
+ expectedWidth = kTestBufferHeight;
+ expectedHeight = kTestBufferWidth;
+ }
+ generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+ switchDimensions, &colorJpegBuffer);
+ auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(), colorJpegBuffer.size(),
+ &jpegExifOrientation), OK);
+ ASSERT_EQ(exifOrientation, jpegExifOrientation);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+ inputFrame.mOrientation = depthOrientation;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+ (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+ size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+ //Depth and confidence images must have the same EXIF orientation as the source
+ auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthJpegExifOrientation), OK);
+ ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+ size_t depthMapWidth, depthMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthMapWidth, &depthMapHeight), OK);
+ ASSERT_EQ(depthMapWidth, expectedWidth);
+ ASSERT_EQ(depthMapHeight, expectedHeight);
+
+ auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceJpegExifOrientation), OK);
+ ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+ size_t confidenceMapWidth, confidenceMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceMapWidth, &confidenceMapHeight), OK);
+ ASSERT_EQ(confidenceMapWidth, expectedWidth);
+ ASSERT_EQ(confidenceMapHeight, expectedHeight);
+ }
+
+ dlclose(libHandle);
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.cpp b/services/camera/libcameraservice/tests/NV12Compressor.cpp
new file mode 100644
index 0000000..0a41a1f
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.cpp
@@ -0,0 +1,379 @@
+/*
+* Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "Test_NV12Compressor"
+
+#include "NV12Compressor.h"
+
+#include <libexif/exif-data.h>
+#include <netinet/in.h>
+
+using namespace android;
+using namespace android::camera3;
+
+namespace std {
+template <>
+struct default_delete<ExifEntry> {
+ inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+template <>
+struct default_delete<ExifData> {
+ inline void operator()(ExifData* data) const { exif_data_unref(data); }
+};
+
+} // namespace std
+
+bool NV12Compressor::compress(const unsigned char* data, int width, int height, int quality) {
+ if (!configureCompressor(width, height, quality)) {
+ // the method will have logged a more detailed error message than we can
+ // provide here so just return.
+ return false;
+ }
+
+ return compressData(data, /*exifData*/ nullptr);
+}
+
+bool NV12Compressor::compressWithExifOrientation(const unsigned char* data, int width, int height,
+ int quality, android::camera3::ExifOrientation exifValue) {
+ std::unique_ptr<ExifData> exifData(exif_data_new());
+ if (exifData.get() == nullptr) {
+ return false;
+ }
+
+ exif_data_set_option(exifData.get(), EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exifData.get(), EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exifData.get(), EXIF_BYTE_ORDER_INTEL);
+ std::unique_ptr<ExifEntry> exifEntry(exif_entry_new());
+ if (exifEntry.get() == nullptr) {
+ return false;
+ }
+
+ exifEntry->tag = EXIF_TAG_ORIENTATION;
+ exif_content_add_entry(exifData->ifd[EXIF_IFD_0], exifEntry.get());
+ exif_entry_initialize(exifEntry.get(), exifEntry->tag);
+ exif_set_short(exifEntry->data, EXIF_BYTE_ORDER_INTEL, exifValue);
+
+ if (!configureCompressor(width, height, quality)) {
+ return false;
+ }
+
+ return compressData(data, exifData.get());
+}
+
+const std::vector<uint8_t>& NV12Compressor::getCompressedData() const {
+ return mDestManager.mBuffer;
+}
+
+bool NV12Compressor::configureCompressor(int width, int height, int quality) {
+ mCompressInfo.err = jpeg_std_error(&mErrorManager);
+ // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+ // The compiler will not generate code to destroy them during the return
+ // below so they will leak. Additionally, do not place any calls to libjpeg
+ // that can fail above this line or any error will cause undefined behavior.
+ if (setjmp(mErrorManager.mJumpBuffer)) {
+ // This is where the error handler will jump in case setup fails
+ // The error manager will ALOG an appropriate error message
+ return false;
+ }
+
+ jpeg_create_compress(&mCompressInfo);
+
+ mCompressInfo.image_width = width;
+ mCompressInfo.image_height = height;
+ mCompressInfo.input_components = 3;
+ mCompressInfo.in_color_space = JCS_YCbCr;
+ jpeg_set_defaults(&mCompressInfo);
+
+ jpeg_set_quality(&mCompressInfo, quality, TRUE);
+ // It may seem weird to set color space here again but this will also set
+ // other fields. These fields might be overwritten by jpeg_set_defaults
+ jpeg_set_colorspace(&mCompressInfo, JCS_YCbCr);
+ mCompressInfo.raw_data_in = TRUE;
+ mCompressInfo.dct_method = JDCT_IFAST;
+ // Set sampling factors
+ mCompressInfo.comp_info[0].h_samp_factor = 2;
+ mCompressInfo.comp_info[0].v_samp_factor = 2;
+ mCompressInfo.comp_info[1].h_samp_factor = 1;
+ mCompressInfo.comp_info[1].v_samp_factor = 1;
+ mCompressInfo.comp_info[2].h_samp_factor = 1;
+ mCompressInfo.comp_info[2].v_samp_factor = 1;
+
+ mCompressInfo.dest = &mDestManager;
+
+ return true;
+}
+
+static void deinterleave(const uint8_t* vuPlanar, std::vector<uint8_t>& uRows,
+ std::vector<uint8_t>& vRows, int rowIndex, int width, int height, int stride) {
+ int numRows = (height - rowIndex) / 2;
+ if (numRows > 8) numRows = 8;
+ for (int row = 0; row < numRows; ++row) {
+ int offset = ((rowIndex >> 1) + row) * stride;
+ const uint8_t* vu = vuPlanar + offset;
+ for (int i = 0; i < (width >> 1); ++i) {
+ int index = row * (width >> 1) + i;
+ uRows[index] = vu[1];
+ vRows[index] = vu[0];
+ vu += 2;
+ }
+ }
+}
+
+bool NV12Compressor::compressData(const unsigned char* data, ExifData* exifData) {
+ const uint8_t* y[16];
+ const uint8_t* cb[8];
+ const uint8_t* cr[8];
+ const uint8_t** planes[3] = { y, cb, cr };
+
+ int i, offset;
+ int width = mCompressInfo.image_width;
+ int height = mCompressInfo.image_height;
+ const uint8_t* yPlanar = data;
+ const uint8_t* vuPlanar = data + (width * height);
+ std::vector<uint8_t> uRows(8 * (width >> 1));
+ std::vector<uint8_t> vRows(8 * (width >> 1));
+
+ // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+ // The compiler will not generate code to destroy them during the return
+ // below so they will leak. Additionally, do not place any calls to libjpeg
+ // that can fail above this line or any error will cause undefined behavior.
+ if (setjmp(mErrorManager.mJumpBuffer)) {
+ // This is where the error handler will jump in case compression fails
+ // The error manager will ALOG an appropriate error message
+ return false;
+ }
+
+ jpeg_start_compress(&mCompressInfo, TRUE);
+
+ attachExifData(exifData);
+
+ // process 16 lines of Y and 8 lines of U/V each time.
+ while (mCompressInfo.next_scanline < mCompressInfo.image_height) {
+ //deinterleave u and v
+ deinterleave(vuPlanar, uRows, vRows, mCompressInfo.next_scanline,
+ width, height, width);
+
+ // Jpeg library ignores the rows whose indices are greater than height.
+ for (i = 0; i < 16; i++) {
+ // y row
+ y[i] = yPlanar + (mCompressInfo.next_scanline + i) * width;
+
+ // construct u row and v row
+ if ((i & 1) == 0) {
+ // height and width are both halved because of downsampling
+ offset = (i >> 1) * (width >> 1);
+ cb[i/2] = &uRows[offset];
+ cr[i/2] = &vRows[offset];
+ }
+ }
+ jpeg_write_raw_data(&mCompressInfo, const_cast<JSAMPIMAGE>(planes), 16);
+ }
+
+ jpeg_finish_compress(&mCompressInfo);
+ jpeg_destroy_compress(&mCompressInfo);
+
+ return true;
+}
+
+bool NV12Compressor::attachExifData(ExifData* exifData) {
+ if (exifData == nullptr) {
+ // This is not an error, we don't require EXIF data
+ return true;
+ }
+
+ // Save the EXIF data to memory
+ unsigned char* rawData = nullptr;
+ unsigned int size = 0;
+ exif_data_save_data(exifData, &rawData, &size);
+ if (rawData == nullptr) {
+ ALOGE("Failed to create EXIF data block");
+ return false;
+ }
+
+ jpeg_write_marker(&mCompressInfo, JPEG_APP0 + 1, rawData, size);
+ free(rawData);
+ return true;
+}
+
+NV12Compressor::ErrorManager::ErrorManager() {
+ error_exit = &onJpegError;
+}
+
+void NV12Compressor::ErrorManager::onJpegError(j_common_ptr cinfo) {
+ // NOTE! Do not construct any non-trivial objects in this method at the top
+ // scope. Their destructors will not be called. If you do need such an
+ // object create a local scope that does not include the longjmp call,
+ // that ensures the object is destroyed before longjmp is called.
+ ErrorManager* errorManager = reinterpret_cast<ErrorManager*>(cinfo->err);
+
+ // Format and log error message
+ char errorMessage[JMSG_LENGTH_MAX];
+ (*errorManager->format_message)(cinfo, errorMessage);
+ errorMessage[sizeof(errorMessage) - 1] = '\0';
+ ALOGE("JPEG compression error: %s", errorMessage);
+ jpeg_destroy(cinfo);
+
+ // And through the looking glass we go
+ longjmp(errorManager->mJumpBuffer, 1);
+}
+
+NV12Compressor::DestinationManager::DestinationManager() {
+ init_destination = &initDestination;
+ empty_output_buffer = &emptyOutputBuffer;
+ term_destination = &termDestination;
+}
+
+void NV12Compressor::DestinationManager::initDestination(j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Start out with some arbitrary but not too large buffer size
+ manager->mBuffer.resize(16 * 1024);
+ manager->next_output_byte = &manager->mBuffer[0];
+ manager->free_in_buffer = manager->mBuffer.size();
+}
+
+boolean NV12Compressor::DestinationManager::emptyOutputBuffer(
+ j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Keep doubling the size of the buffer for a very low, amortized
+ // performance cost of the allocations
+ size_t oldSize = manager->mBuffer.size();
+ manager->mBuffer.resize(oldSize * 2);
+ manager->next_output_byte = &manager->mBuffer[oldSize];
+ manager->free_in_buffer = manager->mBuffer.size() - oldSize;
+ return manager->free_in_buffer != 0;
+}
+
+void NV12Compressor::DestinationManager::termDestination(j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Resize down to the exact size of the output, that is remove as many
+ // bytes as there are left in the buffer
+ manager->mBuffer.resize(manager->mBuffer.size() - manager->free_in_buffer);
+}
+
+status_t NV12Compressor::findJpegSize(uint8_t *jpegBuffer, size_t maxSize, size_t *size /*out*/) {
+ if ((size == nullptr) || (jpegBuffer == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ if (checkJpegStart(jpegBuffer) == 0) {
+ return BAD_VALUE;
+ }
+
+ // Read JFIF segment markers, skip over segment data
+ *size = kMarkerLength; //jump to Start Of Image
+ while (*size <= maxSize - kMarkerLength) {
+ segment_t *segment = (segment_t*)(jpegBuffer + *size);
+ uint8_t type = checkJpegMarker(segment->marker);
+ if (type == 0) { // invalid marker, no more segments, begin JPEG data
+ break;
+ }
+ if (type == kEndOfImage || *size > maxSize - sizeof(segment_t)) {
+ return BAD_VALUE;
+ }
+
+ size_t length = ntohs(segment->length);
+ *size += length + kMarkerLength;
+ }
+
+ // Find End of Image
+ // Scan JPEG buffer until End of Image
+ bool foundEnd = false;
+ for ( ; *size <= maxSize - kMarkerLength; (*size)++) {
+ if (checkJpegEnd(jpegBuffer + *size)) {
+ foundEnd = true;
+ *size += kMarkerLength;
+ break;
+ }
+ }
+
+ if (!foundEnd) {
+ return BAD_VALUE;
+ }
+
+ if (*size > maxSize) {
+ *size = maxSize;
+ }
+
+ return OK;
+}
+
+status_t NV12Compressor::getJpegImageDimensions(uint8_t *jpegBuffer,
+ size_t jpegBufferSize, size_t *width /*out*/, size_t *height /*out*/) {
+ if ((jpegBuffer == nullptr) || (width == nullptr) || (height == nullptr) ||
+ (jpegBufferSize == 0u)) {
+ return BAD_VALUE;
+ }
+
+ // Scan JPEG buffer until Start of Frame
+ bool foundSOF = false;
+ size_t currentPos;
+ for (currentPos = 0; currentPos <= jpegBufferSize - kMarkerLength; currentPos++) {
+ if (checkStartOfFrame(jpegBuffer + currentPos)) {
+ foundSOF = true;
+ currentPos += kMarkerLength;
+ break;
+ }
+ }
+
+ if (!foundSOF) {
+ ALOGE("%s: Start of Frame not found", __func__);
+ return BAD_VALUE;
+ }
+
+ sof_t *startOfFrame = reinterpret_cast<sof_t *> (jpegBuffer + currentPos);
+ *width = ntohs(startOfFrame->width);
+ *height = ntohs(startOfFrame->height);
+
+ return OK;
+}
+
+status_t NV12Compressor::getExifOrientation(uint8_t *jpegBuffer, size_t jpegBufferSize,
+ ExifOrientation *exifValue /*out*/) {
+ if ((jpegBuffer == nullptr) || (exifValue == nullptr) || (jpegBufferSize == 0u)) {
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<ExifData> exifData(exif_data_new());
+ exif_data_load_data(exifData.get(), jpegBuffer, jpegBufferSize);
+ ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+ EXIF_TAG_ORIENTATION);
+ if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+ return BAD_VALUE;
+ }
+
+ auto orientationValue = exif_get_short(orientation->data,
+ exif_data_get_byte_order(exifData.get()));
+ status_t ret;
+ switch (orientationValue) {
+ case ExifOrientation::ORIENTATION_0_DEGREES:
+ case ExifOrientation::ORIENTATION_90_DEGREES:
+ case ExifOrientation::ORIENTATION_180_DEGREES:
+ case ExifOrientation::ORIENTATION_270_DEGREES:
+ *exifValue = static_cast<ExifOrientation> (orientationValue);
+ ret = OK;
+ break;
+ default:
+ ALOGE("%s: Unexpected EXIF orientation value: %u", __FUNCTION__, orientationValue);
+ ret = BAD_VALUE;
+ }
+
+ return ret;
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
new file mode 100644
index 0000000..ee22d5e
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+#define TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
+#include <setjmp.h>
+#include <stdlib.h>
+extern "C" {
+#include <jpeglib.h>
+#include <jerror.h>
+}
+
+#include <utils/Errors.h>
+#include <vector>
+
+#include "../utils/ExifUtils.h"
+
+struct _ExifData;
+typedef _ExifData ExifData;
+
+class NV12Compressor {
+public:
+ NV12Compressor() {}
+
+ /* Compress |data| which represents raw NV21 encoded data of dimensions
+ * |width| * |height|.
+ */
+ bool compress(const unsigned char* data, int width, int height, int quality);
+ bool compressWithExifOrientation(const unsigned char* data, int width, int height, int quality,
+ android::camera3::ExifOrientation exifValue);
+
+ /* Get a reference to the compressed data, this will return an empty vector
+ * if compress has not been called yet
+ */
+ const std::vector<unsigned char>& getCompressedData() const;
+
+ // Utility methods
+ static android::status_t findJpegSize(uint8_t *jpegBuffer, size_t maxSize,
+ size_t *size /*out*/);
+
+ static android::status_t getExifOrientation(uint8_t *jpegBuffer,
+ size_t jpegBufferSize, android::camera3::ExifOrientation *exifValue /*out*/);
+
+ /* Get Jpeg image dimensions from the first Start Of Frame. Please note that due to the
+ * way the jpeg buffer is scanned if the image contains a thumbnail, then the size returned
+ * will be of the thumbnail and not the main image.
+ */
+ static android::status_t getJpegImageDimensions(uint8_t *jpegBuffer, size_t jpegBufferSize,
+ size_t *width /*out*/, size_t *height /*out*/);
+
+private:
+
+ struct DestinationManager : jpeg_destination_mgr {
+ DestinationManager();
+
+ static void initDestination(j_compress_ptr cinfo);
+ static boolean emptyOutputBuffer(j_compress_ptr cinfo);
+ static void termDestination(j_compress_ptr cinfo);
+
+ std::vector<unsigned char> mBuffer;
+ };
+
+ struct ErrorManager : jpeg_error_mgr {
+ ErrorManager();
+
+ static void onJpegError(j_common_ptr cinfo);
+
+ jmp_buf mJumpBuffer;
+ };
+
+ static const size_t kMarkerLength = 2; // length of a marker
+ static const uint8_t kMarker = 0xFF; // First byte of marker
+ static const uint8_t kStartOfImage = 0xD8; // Start of Image
+ static const uint8_t kEndOfImage = 0xD9; // End of Image
+ static const uint8_t kStartOfFrame = 0xC0; // Start of Frame
+
+ struct __attribute__((packed)) segment_t {
+ uint8_t marker[kMarkerLength];
+ uint16_t length;
+ };
+
+ struct __attribute__((packed)) sof_t {
+ uint16_t length;
+ uint8_t precision;
+ uint16_t height;
+ uint16_t width;
+ };
+
+ // check for start of image marker
+ static bool checkStartOfFrame(uint8_t* buf) {
+ return buf[0] == kMarker && buf[1] == kStartOfFrame;
+ }
+
+ // check for start of image marker
+ static bool checkJpegStart(uint8_t* buf) {
+ return buf[0] == kMarker && buf[1] == kStartOfImage;
+ }
+
+ // check for End of Image marker
+ static bool checkJpegEnd(uint8_t *buf) {
+ return buf[0] == kMarker && buf[1] == kEndOfImage;
+ }
+
+ // check for arbitrary marker, returns marker type (second byte)
+ // returns 0 if no marker found. Note: 0x00 is not a valid marker type
+ static uint8_t checkJpegMarker(uint8_t *buf) {
+ return (buf[0] == kMarker) ? buf[1] : 0;
+ }
+
+ jpeg_compress_struct mCompressInfo;
+ DestinationManager mDestManager;
+ ErrorManager mErrorManager;
+
+ bool configureCompressor(int width, int height, int quality);
+ bool compressData(const unsigned char* data, ExifData* exifData);
+ bool attachExifData(ExifData* exifData);
+};
+
+#endif // TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
index 4dea8b5..c0afdc1 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.cpp
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -55,6 +55,7 @@
// Initialize() can be called multiple times. The setting of Exif tags will be
// cleared.
virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize);
+ virtual bool initializeEmpty();
// set all known fields from a metadata structure
virtual bool setFromMetadata(const CameraMetadata& metadata,
@@ -150,7 +151,11 @@
// sets image orientation.
// Returns false if memory allocation fails.
- virtual bool setOrientation(uint16_t orientation);
+ virtual bool setOrientation(uint16_t degrees);
+
+ // sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientationValue(ExifOrientation orientationValue);
// sets the shutter speed.
// Returns false if memory allocation fails.
@@ -314,6 +319,26 @@
return true;
}
+bool ExifUtilsImpl::initializeEmpty() {
+ reset();
+ exif_data_ = exif_data_new();
+ if (exif_data_ == nullptr) {
+ ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+ return false;
+ }
+ // set the image options.
+ exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+ // set exif version to 2.2.
+ if (!setExifVersion("0220")) {
+ return false;
+ }
+
+ return true;
+}
+
bool ExifUtilsImpl::setAperture(float aperture) {
float apexValue = convertToApex(aperture);
SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_APERTURE_VALUE,
@@ -609,32 +634,26 @@
return true;
}
-bool ExifUtilsImpl::setOrientation(uint16_t orientation) {
- /*
- * Orientation value:
- * 1 2 3 4 5 6 7 8
- *
- * 888888 888888 88 88 8888888888 88 88 8888888888
- * 88 88 88 88 88 88 88 88 88 88 88 88
- * 8888 8888 8888 8888 88 8888888888 8888888888 88
- * 88 88 88 88
- * 88 88 888888 888888
- */
- int value = 1;
- switch (orientation) {
+bool ExifUtilsImpl::setOrientation(uint16_t degrees) {
+ ExifOrientation value = ExifOrientation::ORIENTATION_0_DEGREES;
+ switch (degrees) {
case 90:
- value = 6;
+ value = ExifOrientation::ORIENTATION_90_DEGREES;
break;
case 180:
- value = 3;
+ value = ExifOrientation::ORIENTATION_180_DEGREES;
break;
case 270:
- value = 8;
+ value = ExifOrientation::ORIENTATION_270_DEGREES;
break;
default:
break;
}
- SET_SHORT(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
+ return setOrientationValue(value);
+}
+
+bool ExifUtilsImpl::setOrientationValue(ExifOrientation orientationValue) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_ORIENTATION, orientationValue);
return true;
}
diff --git a/services/camera/libcameraservice/utils/ExifUtils.h b/services/camera/libcameraservice/utils/ExifUtils.h
index c78bab9..f1d0205 100644
--- a/services/camera/libcameraservice/utils/ExifUtils.h
+++ b/services/camera/libcameraservice/utils/ExifUtils.h
@@ -22,6 +22,24 @@
namespace android {
namespace camera3 {
+/*
+ * Orientation value:
+ * 1 2 3 4 5 6 7 8
+ *
+ * 888888 888888 88 88 8888888888 88 88 8888888888
+ * 88 88 88 88 88 88 88 88 88 88 88 88
+ * 8888 8888 8888 8888 88 8888888888 8888888888 88
+ * 88 88 88 88
+ * 88 88 888888 888888
+ */
+enum ExifOrientation : uint16_t {
+ ORIENTATION_UNDEFINED = 0x0,
+ ORIENTATION_0_DEGREES = 0x1,
+ ORIENTATION_90_DEGREES = 0x6,
+ ORIENTATION_180_DEGREES = 0x3,
+ ORIENTATION_270_DEGREES = 0x8,
+};
+
// This is based on the camera HIDL shim implementation, which was in turned
// based on original ChromeOS ARC implementation of a V4L2 HAL
@@ -49,6 +67,7 @@
// Initialize() can be called multiple times. The setting of Exif tags will be
// cleared.
virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize) = 0;
+ virtual bool initializeEmpty() = 0;
// Set all known fields from a metadata structure
virtual bool setFromMetadata(const CameraMetadata& metadata,
@@ -142,7 +161,11 @@
// Sets image orientation.
// Returns false if memory allocation fails.
- virtual bool setOrientation(uint16_t orientation) = 0;
+ virtual bool setOrientation(uint16_t degrees) = 0;
+
+ // Sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientationValue(ExifOrientation orientationValue) = 0;
// Sets the shutter speed.
// Returns false if memory allocation fails.
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 80d3630..1470de2 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -28,6 +28,7 @@
"libcodec2_soft_amrwbdec",
"libcodec2_soft_amrwbenc",
"libcodec2_soft_hevcdec",
+ "libcodec2_soft_hevcenc",
"libcodec2_soft_g711alawdec",
"libcodec2_soft_g711mlawdec",
"libcodec2_soft_mpeg2dec",