Merge "Add OnMediaTimeDiscontinuity notification" into pi-dev
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index a1a8cd6..c59d0e7 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -221,7 +221,7 @@
mCallbacks.erase(cb);
}
-void CameraManagerGlobal::getCameraIdList(std::vector<String8> *cameraIds) {
+void CameraManagerGlobal::getCameraIdList(std::vector<String8>* cameraIds) {
// Ensure that we have initialized/refreshed the list of available devices
auto cs = getCameraService();
Mutex::Autolock _l(mLock);
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 4a172f3..cc42f77 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -19,6 +19,7 @@
#include <camera/NdkCameraManager.h>
+#include <android-base/parseint.h>
#include <android/hardware/ICameraService.h>
#include <android/hardware/BnCameraServiceListener.h>
#include <camera/CameraMetadata.h>
@@ -140,8 +141,29 @@
static bool validStatus(int32_t status);
static bool isStatusAvailable(int32_t status);
+ // The sort logic must match the logic in
+ // libcameraservice/common/CameraProviderManager.cpp::getAPI1CompatibleCameraDeviceIds
+ struct CameraIdComparator {
+ bool operator()(const String8& a, const String8& b) const {
+ uint32_t aUint = 0, bUint = 0;
+ bool aIsUint = base::ParseUint(a.c_str(), &aUint);
+ bool bIsUint = base::ParseUint(b.c_str(), &bUint);
+
+ // Uint device IDs first
+ if (aIsUint && bIsUint) {
+ return aUint < bUint;
+ } else if (aIsUint) {
+ return true;
+ } else if (bIsUint) {
+ return false;
+ }
+ // Simple string compare if both id are not uint
+ return a < b;
+ }
+ };
+
// Map camera_id -> status
- std::map<String8, int32_t> mDeviceStatusMap;
+ std::map<String8, int32_t, CameraIdComparator> mDeviceStatusMap;
// For the singleton instance
static Mutex sLock;
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2829b90..c7d2545 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1913,8 +1913,8 @@
* the thumbnail data will also be rotated.</p>
* <p>Note that this orientation is relative to the orientation of the camera sensor, given
* by ACAMERA_SENSOR_ORIENTATION.</p>
- * <p>To translate from the device orientation given by the Android sensor APIs, the following
- * sample code may be used:</p>
+ * <p>To translate from the device orientation given by the Android sensor APIs for camera
+ * sensors which are not EXTERNAL, the following sample code may be used:</p>
* <pre><code>private int getJpegOrientation(CameraCharacteristics c, int deviceOrientation) {
* if (deviceOrientation == android.view.OrientationEventListener.ORIENTATION_UNKNOWN) return 0;
* int sensorOrientation = c.get(CameraCharacteristics.SENSOR_ORIENTATION);
@@ -1933,6 +1933,8 @@
* return jpegOrientation;
* }
* </code></pre>
+ * <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
+ * also be set to EXTERNAL. The above code is not relevant in such case.</p>
*
* @see ACAMERA_SENSOR_ORIENTATION
*/
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 40aeb9f..73ecda1 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -341,10 +341,10 @@
return OK;
}
- CryptoPlugin::SubSample *subSamples =
- new CryptoPlugin::SubSample[numSubSamples];
+ std::unique_ptr<CryptoPlugin::SubSample[]> subSamples =
+ std::make_unique<CryptoPlugin::SubSample[]>(numSubSamples);
- data.read(subSamples,
+ data.read(subSamples.get(),
sizeof(CryptoPlugin::SubSample) * numSubSamples);
DestinationBuffer destination;
@@ -402,7 +402,7 @@
result = -EINVAL;
} else {
result = decrypt(key, iv, mode, pattern, source, offset,
- subSamples, numSubSamples, destination, &errorDetailMsg);
+ subSamples.get(), numSubSamples, destination, &errorDetailMsg);
}
reply->writeInt32(result);
@@ -421,9 +421,7 @@
}
}
- delete[] subSamples;
- subSamples = NULL;
-
+ subSamples.reset();
return OK;
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index 300c688..d51e29d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -400,7 +400,7 @@
if (level > SecurityLevel::SW_SECURE_CRYPTO) {
ALOGE("Cannot set security level > max");
- return Status::BAD_VALUE;
+ return Status::ERROR_DRM_CANNOT_HANDLE;
}
std::vector<uint8_t> sid = toVector(sessionId);
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 07ef0e3..99f32d5 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1123,19 +1123,33 @@
case FOURCC('t', 'r', 'e', 'f'):
{
- *offset += chunk_size;
-
- if (mLastTrack == NULL) {
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (*offset != stop_offset) {
return ERROR_MALFORMED;
}
+ break;
+ }
- // Skip thumbnail track for now since we don't have an
- // API to retrieve it yet.
- // The thumbnail track can't be accessed by negative index or time,
- // because each timed sample has its own corresponding thumbnail
- // in the thumbnail track. We'll need a dedicated API to retrieve
- // thumbnail at time instead.
- mLastTrack->skipTrack = true;
+ case FOURCC('t', 'h', 'm', 'b'):
+ {
+ *offset += chunk_size;
+
+ if (mLastTrack != NULL) {
+ // Skip thumbnail track for now since we don't have an
+ // API to retrieve it yet.
+ // The thumbnail track can't be accessed by negative index or time,
+ // because each timed sample has its own corresponding thumbnail
+ // in the thumbnail track. We'll need a dedicated API to retrieve
+ // thumbnail at time instead.
+ mLastTrack->skipTrack = true;
+ }
break;
}
@@ -2353,7 +2367,9 @@
// This means that the file should have moov box.
// It could be any iso files (mp4, heifs, etc.)
mHasMoovBox = true;
- ALOGV("identified HEIF image with other tracks");
+ if (mIsHeif) {
+ ALOGV("identified HEIF image with other tracks");
+ }
}
}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 50c1295..ac2e46e 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -2248,6 +2248,16 @@
staticPosition = mStaticProxy->getPosition().unsignedValue();
}
+ // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
+ // causes a lot of churn on the service side, and it can reject starting
+ // playback of a previously created track. May also apply to other cases.
+ const int INITIAL_RETRIES = 3;
+ int retries = INITIAL_RETRIES;
+retry:
+ if (retries < INITIAL_RETRIES) {
+ // See the comment for clearAudioConfigCache at the start of the function.
+ AudioSystem::clearAudioConfigCache();
+ }
mFlags = mOrigFlags;
// If a new IAudioTrack is successfully created, createTrack_l() will modify the
@@ -2256,7 +2266,10 @@
// If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
status_t result = createTrack_l();
- if (result == NO_ERROR) {
+ if (result != NO_ERROR) {
+ ALOGW("%s(): createTrack_l failed, do not retry", __func__);
+ retries = 0;
+ } else {
// take the frames that will be lost by track recreation into account in saved position
// For streaming tracks, this is the amount we obtained from the user/client
// (not the number actually consumed at the server - those are already lost).
@@ -2301,7 +2314,10 @@
mFramesWrittenAtRestore = mFramesWrittenServerOffset;
}
if (result != NO_ERROR) {
- ALOGW("restoreTrack_l() failed status %d", result);
+ ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+ if (--retries > 0) {
+ goto retry;
+ }
mState = STATE_STOPPED;
mReleased = 0;
}
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index a20f1f2..77cfe4d 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -87,7 +87,7 @@
GET_AUDIO_HW_SYNC_FOR_SESSION,
SYSTEM_READY,
FRAME_COUNT_HAL,
- LIST_MICROPHONES,
+ GET_MICROPHONES,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -849,7 +849,7 @@
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- status_t status = remote()->transact(LIST_MICROPHONES, data, &reply);
+ status_t status = remote()->transact(GET_MICROPHONES, data, &reply);
if (status != NO_ERROR ||
(status = (status_t)reply.readInt32()) != NO_ERROR) {
return status;
@@ -1444,7 +1444,7 @@
reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
return NO_ERROR;
} break;
- case LIST_MICROPHONES: {
+ case GET_MICROPHONES: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
std::vector<media::MicrophoneInfo> microphones;
status_t status = getMicrophones(µphones);
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
index 0d9c6c4..5b99d70 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/2.0/DeviceHalHidl.cpp
@@ -53,7 +53,7 @@
audio_devices_t device, const char* halAddress, DeviceAddress* address) {
address->device = AudioDevice(device);
- if (address == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
return OK;
}
const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
@@ -346,6 +346,12 @@
return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
}
+status_t DeviceHalHidl::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+ if (mDevice == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+
status_t DeviceHalHidl::dump(int fd) {
if (mDevice == 0) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/2.0/DeviceHalHidl.h
index 8651b51..3c1cb59 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.h
+++ b/media/libaudiohal/2.0/DeviceHalHidl.h
@@ -107,6 +107,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
virtual status_t dump(int fd);
private:
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
index fc098f5..ec3bf78 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/2.0/DeviceHalLocal.cpp
@@ -184,6 +184,11 @@
return INVALID_OPERATION;
}
+status_t DeviceHalLocal::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+
status_t DeviceHalLocal::dump(int fd) {
return mDev->dump(mDev, fd);
}
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/2.0/DeviceHalLocal.h
index 865f296..aec201a 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.h
+++ b/media/libaudiohal/2.0/DeviceHalLocal.h
@@ -100,6 +100,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
virtual status_t dump(int fd);
void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
index 0cafa36..9869cd2 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/2.0/StreamHalHidl.cpp
@@ -555,6 +555,11 @@
}
}
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+ // Audio HAL V2.0 does not support propagating source metadata
+ return INVALID_OPERATION;
+}
+
void StreamOutHalHidl::onWriteReady() {
sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
if (callback == 0) return;
@@ -749,4 +754,15 @@
}
}
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ if (mStream == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+ // Audio HAL V2.0 does not support propagating sink metadata
+ return INVALID_OPERATION;
+}
+
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/2.0/StreamHalHidl.h
index d4ab943..ebad8ae 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.h
+++ b/media/libaudiohal/2.0/StreamHalHidl.h
@@ -161,6 +161,9 @@
// Return a recent count of the number of audio frames presented to an external observer.
virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
// Methods used by StreamOutCallback (HIDL).
void onWriteReady();
void onDrainReady();
@@ -210,6 +213,12 @@
// the clock time associated with that frame count.
virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
private:
friend class DeviceHalHidl;
typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
index 8d61e24..98107e5 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/2.0/StreamHalLocal.cpp
@@ -231,6 +231,19 @@
return mStream->get_presentation_position(mStream, frames, timestamp);
}
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ if (mStream->update_source_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const source_metadata_t metadata {
+ .track_count = sourceMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+ };
+ mStream->update_source_metadata(mStream, &metadata);
+ return OK;
+}
+
status_t StreamOutHalLocal::start() {
if (mStream->start == NULL) return INVALID_OPERATION;
return mStream->start(mStream);
@@ -292,6 +305,19 @@
return mStream->get_capture_position(mStream, frames, time);
}
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ if (mStream->update_sink_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const sink_metadata_t metadata {
+ .track_count = sinkMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+ };
+ mStream->update_sink_metadata(mStream, &metadata);
+ return OK;
+}
+
status_t StreamInHalLocal::start() {
if (mStream->start == NULL) return INVALID_OPERATION;
return mStream->start(mStream);
@@ -313,4 +339,9 @@
return mStream->get_mmap_position(mStream, position);
}
+status_t StreamInHalLocal::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+
} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/2.0/StreamHalLocal.h
index c7136df..cda8d0c 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.h
+++ b/media/libaudiohal/2.0/StreamHalLocal.h
@@ -149,6 +149,9 @@
// Get current read/write position in the mmap buffer
virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
private:
audio_stream_out_t *mStream;
wp<StreamOutHalInterfaceCallback> mCallback;
@@ -194,6 +197,12 @@
// Get current read/write position in the mmap buffer
virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
private:
audio_stream_in_t *mStream;
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/4.0/Android.bp
index 3d104ab..833defa 100644
--- a/media/libaudiohal/4.0/Android.bp
+++ b/media/libaudiohal/4.0/Android.bp
@@ -26,6 +26,7 @@
shared_libs: [
"libaudiohal_deathhandler",
"libaudioutils",
+ "libbinder",
"libcutils",
"liblog",
"libutils",
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
index a3cc28f..fe27504 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
@@ -22,6 +22,11 @@
#include "ConversionHelperHidl.h"
+using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
+using ::android::hardware::audio::V4_0::DeviceAddress;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
using ::android::hardware::audio::V4_0::Result;
namespace android {
@@ -101,5 +106,132 @@
ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
}
+// TODO: Use the same implementation in the hal when it moves to a util library.
+std::string deviceAddressToHal(const DeviceAddress& address) {
+ // HAL assumes that the address is NUL-terminated.
+ char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+ memset(halAddress, 0, sizeof(halAddress));
+ audio_devices_t halDevice = static_cast<audio_devices_t>(address.device);
+ const bool isInput = (halDevice & AUDIO_DEVICE_BIT_IN) != 0;
+ if (isInput) halDevice &= ~AUDIO_DEVICE_BIT_IN;
+ if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%02X:%02X:%02X:%02X:%02X:%02X",
+ address.address.mac[0], address.address.mac[1], address.address.mac[2],
+ address.address.mac[3], address.address.mac[4], address.address.mac[5]);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_IP) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_IP) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%d.%d.%d.%d", address.address.ipv4[0],
+ address.address.ipv4[1], address.address.ipv4[2], address.address.ipv4[3]);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_USB) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_ALL_USB) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "card=%d;device=%d", address.address.alsa.card,
+ address.address.alsa.device);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_BUS) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_BUS) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0 ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.rSubmixAddress.c_str());
+ } else {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+ }
+ return halAddress;
+}
+
+//local conversion helpers
+
+audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+ switch (mapping) {
+ case AudioMicrophoneChannelMapping::UNUSED:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+ case AudioMicrophoneChannelMapping::DIRECT:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT;
+ case AudioMicrophoneChannelMapping::PROCESSED:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED;
+ default:
+ LOG_ALWAYS_FATAL("Unknown channelMappingToHal conversion %d", mapping);
+ }
+}
+
+audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+ switch (location) {
+ case AudioMicrophoneLocation::UNKNOWN:
+ return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
+ case AudioMicrophoneLocation::MAINBODY:
+ return AUDIO_MICROPHONE_LOCATION_MAINBODY;
+ case AudioMicrophoneLocation::MAINBODY_MOVABLE:
+ return AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE;
+ case AudioMicrophoneLocation::PERIPHERAL:
+ return AUDIO_MICROPHONE_LOCATION_PERIPHERAL;
+ default:
+ LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
+ }
+}
+audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+ switch (dir) {
+ case AudioMicrophoneDirectionality::UNKNOWN:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
+ case AudioMicrophoneDirectionality::OMNI:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
+ case AudioMicrophoneDirectionality::BI_DIRECTIONAL:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL;
+ case AudioMicrophoneDirectionality::CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID;
+ case AudioMicrophoneDirectionality::HYPER_CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID;
+ case AudioMicrophoneDirectionality::SUPER_CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID;
+ default:
+ LOG_ALWAYS_FATAL("Unknown directionalityToHal conversion %d", dir);
+ }
+}
+
+// static
+void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst) {
+ if (pDst != NULL) {
+ snprintf(pDst->device_id, sizeof(pDst->device_id),
+ "%s", src.deviceId.c_str());
+ pDst->device = static_cast<audio_devices_t>(src.deviceAddress.device);
+ snprintf(pDst->address, sizeof(pDst->address),
+ "%s", deviceAddressToHal(src.deviceAddress).c_str());
+ if (src.channelMapping.size() > AUDIO_CHANNEL_COUNT_MAX) {
+ ALOGW("microphoneInfoToStruct found %zu channelMapping elements. Max expected is %d",
+ src.channelMapping.size(), AUDIO_CHANNEL_COUNT_MAX);
+ }
+ size_t ch;
+ for (ch = 0; ch < src.channelMapping.size() && ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+ pDst->channel_mapping[ch] = channelMappingToHal(src.channelMapping[ch]);
+ }
+ for (; ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+ pDst->channel_mapping[ch] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+ }
+ pDst->location = locationToHal(src.location);
+ pDst->group = (audio_microphone_group_t)src.group;
+ pDst->index_in_the_group = (unsigned int)src.indexInTheGroup;
+ pDst->sensitivity = src.sensitivity;
+ pDst->max_spl = src.maxSpl;
+ pDst->min_spl = src.minSpl;
+ pDst->directionality = directionalityToHal(src.directionality);
+ pDst->num_frequency_responses = (unsigned int)src.frequencyResponse.size();
+ if (pDst->num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGW("microphoneInfoToStruct found %d frequency responses. Max expected is %d",
+ pDst->num_frequency_responses, AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES);
+ pDst->num_frequency_responses = AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES;
+ }
+ for (size_t k = 0; k < pDst->num_frequency_responses; k++) {
+ pDst->frequency_responses[0][k] = src.frequencyResponse[k].frequency;
+ pDst->frequency_responses[1][k] = src.frequencyResponse[k].level;
+ }
+ pDst->geometric_location.x = src.position.x;
+ pDst->geometric_location.y = src.position.y;
+ pDst->geometric_location.z = src.position.z;
+ pDst->orientation.x = src.orientation.x;
+ pDst->orientation.y = src.orientation.y;
+ pDst->orientation.z = src.orientation.z;
+ }
+}
+
} // namespace V4_0
} // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
index ddc8569..8823a8d 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.h
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.h
@@ -19,9 +19,11 @@
#include <android/hardware/audio/4.0/types.h>
#include <hidl/HidlSupport.h>
+#include <system/audio.h>
#include <utils/String8.h>
using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
using ::android::hardware::Return;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
@@ -34,6 +36,8 @@
static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+ static void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst);
ConversionHelperHidl(const char* className);
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/4.0/DeviceHalHidl.cpp
index 8da1051..6facca9 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/4.0/DeviceHalHidl.cpp
@@ -59,7 +59,7 @@
audio_devices_t device, const char* halAddress, DeviceAddress* address) {
address->device = AudioDevice(device);
- if (address == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
return OK;
}
const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
@@ -359,6 +359,23 @@
return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
}
+status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMicrophones(
+ [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+ retval = r;
+ for (size_t k = 0; k < micArrayHal.size(); k++) {
+ audio_microphone_characteristic_t dst;
+ //convert
+ microphoneInfoToHal(micArrayHal[k], &dst);
+ media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+ microphonesInfo->push_back(microphone);
+ }
+ });
+ return processReturn("getMicrophones", ret, retval);
+}
+
status_t DeviceHalHidl::dump(int fd) {
if (mDevice == 0) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
index f460add..0bd2175 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.h
+++ b/media/libaudiohal/4.0/DeviceHalHidl.h
@@ -108,6 +108,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
virtual status_t dump(int fd);
private:
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/4.0/DeviceHalLocal.cpp
index e64eee1..a245dd9 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/4.0/DeviceHalLocal.cpp
@@ -185,6 +185,18 @@
return INVALID_OPERATION;
}
+status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+ if (mDev->get_microphones == NULL) return INVALID_OPERATION;
+ size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+ audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ status_t status = mDev->get_microphones(mDev, &mic_array[0], &actual_mics);
+ for (size_t i = 0; i < actual_mics; i++) {
+ media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+ microphones->push_back(microphoneInfo);
+ }
+ return status;
+}
+
status_t DeviceHalLocal::dump(int fd) {
return mDev->dump(mDev, fd);
}
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
index daafdc7..08341a4 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.h
+++ b/media/libaudiohal/4.0/DeviceHalLocal.h
@@ -101,6 +101,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
virtual status_t dump(int fd);
void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/4.0/StreamHalHidl.cpp
index de16e98..1c2fdb0 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/4.0/StreamHalHidl.cpp
@@ -28,14 +28,20 @@
#include "VersionUtils.h"
using ::android::hardware::audio::common::V4_0::AudioChannelMask;
+using ::android::hardware::audio::common::V4_0::AudioContentType;
using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::common::V4_0::AudioSource;
+using ::android::hardware::audio::common::V4_0::AudioUsage;
using ::android::hardware::audio::common::V4_0::ThreadInfo;
using ::android::hardware::audio::V4_0::AudioDrain;
using ::android::hardware::audio::V4_0::IStreamOutCallback;
using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
using ::android::hardware::audio::V4_0::MmapBufferInfo;
using ::android::hardware::audio::V4_0::MmapPosition;
using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
+using ::android::hardware::audio::V4_0::RecordTrackMetadata;
using ::android::hardware::audio::V4_0::Result;
using ::android::hardware::audio::V4_0::TimeSpec;
using ::android::hardware::MQDescriptorSync;
@@ -560,6 +566,28 @@
}
}
+/** Transform a standard collection to an HIDL vector. */
+template <class Values, class ElementConverter>
+static auto transformToHidlVec(const Values& values, ElementConverter converter) {
+ hidl_vec<decltype(converter(*values.begin()))> result{values.size()};
+ using namespace std;
+ transform(begin(values), end(values), begin(result), converter);
+ return result;
+}
+
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ hardware::audio::V4_0::SourceMetadata halMetadata = {
+ .tracks = transformToHidlVec(sourceMetadata.tracks,
+ [](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
+ return {
+ .usage=static_cast<AudioUsage>(metadata.usage),
+ .contentType=static_cast<AudioContentType>(metadata.content_type),
+ .gain=metadata.gain,
+ };
+ })};
+ return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
+}
+
void StreamOutHalHidl::onWriteReady() {
sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
if (callback == 0) return;
@@ -754,5 +782,36 @@
}
}
+
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo) {
+ if (!mStream) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getActiveMicrophones(
+ [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+ retval = r;
+ for (size_t k = 0; k < micArrayHal.size(); k++) {
+ audio_microphone_characteristic_t dst;
+ // convert
+ microphoneInfoToHal(micArrayHal[k], &dst);
+ media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+ microphonesInfo->push_back(microphone);
+ }
+ });
+ return processReturn("getActiveMicrophones", ret, retval);
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ hardware::audio::V4_0::SinkMetadata halMetadata = {
+ .tracks = transformToHidlVec(sinkMetadata.tracks,
+ [](const record_track_metadata& metadata) -> RecordTrackMetadata {
+ return {
+ .source=static_cast<AudioSource>(metadata.source),
+ .gain=metadata.gain,
+ };
+ })};
+ return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
+}
+
} // namespace V4_0
} // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
index 8d4dc8c..2dda0f8 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.h
+++ b/media/libaudiohal/4.0/StreamHalHidl.h
@@ -162,6 +162,9 @@
// Return a recent count of the number of audio frames presented to an external observer.
virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
// Methods used by StreamOutCallback (HIDL).
void onWriteReady();
void onDrainReady();
@@ -211,6 +214,12 @@
// the clock time associated with that frame count.
virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
private:
friend class DeviceHalHidl;
typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/4.0/StreamHalLocal.cpp
index 592a931..e9d96bf 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/4.0/StreamHalLocal.cpp
@@ -233,6 +233,19 @@
return mStream->get_presentation_position(mStream, frames, timestamp);
}
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ if (mStream->update_source_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const source_metadata_t metadata {
+ .track_count = sourceMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+ };
+ mStream->update_source_metadata(mStream, &metadata);
+ return OK;
+}
+
status_t StreamOutHalLocal::start() {
if (mStream->start == NULL) return INVALID_OPERATION;
return mStream->start(mStream);
@@ -294,6 +307,19 @@
return mStream->get_capture_position(mStream, frames, time);
}
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ if (mStream->update_sink_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const sink_metadata_t metadata {
+ .track_count = sinkMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+ };
+ mStream->update_sink_metadata(mStream, &metadata);
+ return OK;
+}
+
status_t StreamInHalLocal::start() {
if (mStream->start == NULL) return INVALID_OPERATION;
return mStream->start(mStream);
@@ -315,5 +341,17 @@
return mStream->get_mmap_position(mStream, position);
}
+status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+ if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
+ size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+ audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ status_t status = mStream->get_active_microphones(mStream, &mic_array[0], &actual_mics);
+ for (size_t i = 0; i < actual_mics; i++) {
+ media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+ microphones->push_back(microphoneInfo);
+ }
+ return status;
+}
+
} // namespace V4_0
} // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
index 076bc4c..7237509 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.h
+++ b/media/libaudiohal/4.0/StreamHalLocal.h
@@ -150,6 +150,9 @@
// Get current read/write position in the mmap buffer
virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
private:
audio_stream_out_t *mStream;
wp<StreamOutHalInterfaceCallback> mCallback;
@@ -195,6 +198,12 @@
// Get current read/write position in the mmap buffer
virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
private:
audio_stream_in_t *mStream;
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index caf01be..7de8eb3 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -105,6 +106,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
virtual status_t dump(int fd) = 0;
protected:
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 7419c34..c969e28 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -17,7 +17,10 @@
#ifndef ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
#define ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+#include <vector>
+
#include <media/audiohal/EffectHalInterface.h>
+#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -142,6 +145,15 @@
// Return a recent count of the number of audio frames presented to an external observer.
virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) = 0;
+ struct SourceMetadata {
+ std::vector<playback_track_metadata_t> tracks;
+ };
+ /**
+ * Called when the metadata of the stream's source has been changed.
+ * @param sourceMetadata Description of the audio that is played by the clients.
+ */
+ virtual status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) = 0;
+
protected:
virtual ~StreamOutHalInterface() {}
};
@@ -161,6 +173,18 @@
// the clock time associated with that frame count.
virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
+ struct SinkMetadata {
+ std::vector<record_track_metadata_t> tracks;
+ };
+ /**
+ * Called when the metadata of the stream's sink has been changed.
+ * @param sinkMetadata Description of the audio that is suggested by the clients.
+ */
+ virtual status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) = 0;
+
protected:
virtual ~StreamInHalInterface() {}
};
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 4ed3ba8..d79501f 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -203,7 +203,7 @@
auto parseProxy = [&xmlEffect, &parseImpl](const char* tag, EffectImpl& proxyLib) {
auto* xmlProxyLib = xmlEffect.FirstChildElement(tag);
if (xmlProxyLib == nullptr) {
- ALOGE("effectProxy must contain a <%s>: %s", tag, dump(*xmlProxyLib));
+ ALOGE("effectProxy must contain a <%s>: %s", tag, dump(xmlEffect));
return false;
}
return parseImpl(*xmlProxyLib, proxyLib);
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index 14a171b..dd729c5 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -38,6 +38,9 @@
loudness_enhancer {
path /vendor/lib/soundfx/libldnhncr.so
}
+ dynamics_processing {
+ path /vendor/lib/soundfx/libdynproc.so
+ }
}
# Default pre-processing library. Add to audio_effect.conf "libraries" section if
@@ -129,6 +132,10 @@
library loudness_enhancer
uuid fa415329-2034-4bea-b5dc-5b381c8d1e2c
}
+ dynamics_processing {
+ library dynamics_processing
+ uuid e0e6539b-1781-7261-676f-6d7573696340
+ }
}
# Default pre-processing effects. Add to audio_effect.conf "effects" section if
diff --git a/media/libeffects/dynamicsproc/Android.mk b/media/libeffects/dynamicsproc/Android.mk
new file mode 100644
index 0000000..7be0c49
--- /dev/null
+++ b/media/libeffects/dynamicsproc/Android.mk
@@ -0,0 +1,43 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+# DynamicsProcessing library
+include $(CLEAR_VARS)
+
+LOCAL_VENDOR_MODULE := true
+
+EIGEN_PATH := external/eigen
+LOCAL_C_INCLUDES += $(EIGEN_PATH)
+
+LOCAL_SRC_FILES:= \
+ EffectDynamicsProcessing.cpp \
+ dsp/DPBase.cpp \
+ dsp/DPFrequency.cpp
+
+LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ liblog \
+
+LOCAL_MODULE_RELATIVE_PATH := soundfx
+LOCAL_MODULE:= libdynproc
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudioeffects
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
new file mode 100644
index 0000000..55383eb
--- /dev/null
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -0,0 +1,1259 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectDP"
+//#define LOG_NDEBUG 0
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <new>
+
+#include <log/log.h>
+
+#include <audio_effects/effect_dynamicsprocessing.h>
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while (false)
+#endif
+
+// union to hold command values
+using value_t = union {
+ int32_t i;
+ float f;
+};
+
+// effect_handle_t interface implementation for DP effect
+extern const struct effect_interface_s gDPInterface;
+
+// AOSP Dynamics Processing UUID: e0e6539b-1781-7261-676f-6d7573696340
+const effect_descriptor_t gDPDescriptor = {
+ {0x7261676f, 0x6d75, 0x7369, 0x6364, {0x28, 0xe2, 0xfd, 0x3a, 0xc3, 0x9e}}, // type
+ {0xe0e6539b, 0x1781, 0x7261, 0x676f, {0x6d, 0x75, 0x73, 0x69, 0x63, 0x40}}, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST),
+ 0, // TODO
+ 1,
+ "Dynamics Processing",
+ "The Android Open Source Project",
+};
+
+enum dp_state_e {
+ DYNAMICS_PROCESSING_STATE_UNINITIALIZED,
+ DYNAMICS_PROCESSING_STATE_INITIALIZED,
+ DYNAMICS_PROCESSING_STATE_ACTIVE,
+};
+
+struct DynamicsProcessingContext {
+ const struct effect_interface_s *mItfe;
+ effect_config_t mConfig;
+ uint8_t mState;
+
+ dp_fx::DPBase * mPDynamics; //the effect (or current effect)
+ int32_t mCurrentVariant;
+ float mPreferredFrameDuration;
+};
+
+// The value offset of an effect parameter is computed by rounding up
+// the parameter size to the next 32 bit alignment.
+static inline uint32_t computeParamVOffset(const effect_param_t *p) {
+ return ((p->psize + sizeof(int32_t) - 1) / sizeof(int32_t)) *
+ sizeof(int32_t);
+}
+
+//--- local function prototypes
+int DP_setParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue);
+int DP_getParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t *pValueSize,
+ void *pValue);
+int DP_getParameterCmdSize(uint32_t paramSize,
+ void *pParam);
+void DP_expectedParamValueSizes(uint32_t paramSize,
+ void *pParam,
+ bool isSet,
+ uint32_t *pCmdSize,
+ uint32_t *pValueSize);
+//
+//--- Local functions (not directly used by effect interface)
+//
+
+void DP_reset(DynamicsProcessingContext *pContext)
+{
+ ALOGV("> DP_reset(%p)", pContext);
+ if (pContext->mPDynamics != NULL) {
+ pContext->mPDynamics->reset();
+ } else {
+ ALOGE("DP_reset(%p): null DynamicsProcessing", pContext);
+ }
+}
+
+//----------------------------------------------------------------------------
+// DP_setConfig()
+//----------------------------------------------------------------------------
+// Purpose: Set input and output audio configuration.
+//
+// Inputs:
+// pContext: effect engine context
+// pConfig: pointer to effect_config_t structure holding input and output
+// configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_setConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+ ALOGV("DP_setConfig(%p)", pContext);
+
+ if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate) return -EINVAL;
+ if (pConfig->inputCfg.channels != pConfig->outputCfg.channels) return -EINVAL;
+ if (pConfig->inputCfg.format != pConfig->outputCfg.format) return -EINVAL;
+ if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
+ pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
+ if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_FLOAT) return -EINVAL;
+
+ pContext->mConfig = *pConfig;
+
+ DP_reset(pContext);
+
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+// DP_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+// pContext: effect engine context
+// pConfig: pointer to effect_config_t structure holding input and output
+// configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void DP_getConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+ *pConfig = pContext->mConfig;
+}
+
+//----------------------------------------------------------------------------
+// DP_init()
+//----------------------------------------------------------------------------
+// Purpose: Initialize engine with default configuration.
+//
+// Inputs:
+// pContext: effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_init(DynamicsProcessingContext *pContext)
+{
+ ALOGV("DP_init(%p)", pContext);
+
+ pContext->mItfe = &gDPInterface;
+ pContext->mPDynamics = NULL;
+ pContext->mState = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
+
+ pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pContext->mConfig.inputCfg.samplingRate = 48000;
+ pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
+ pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
+ pContext->mConfig.inputCfg.bufferProvider.cookie = NULL;
+ pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
+ pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pContext->mConfig.outputCfg.samplingRate = 48000;
+ pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
+ pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
+ pContext->mConfig.outputCfg.bufferProvider.cookie = NULL;
+ pContext->mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+ pContext->mCurrentVariant = -1; //none
+ pContext->mPreferredFrameDuration = 0; //none
+
+ DP_setConfig(pContext, &pContext->mConfig);
+ pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ return 0;
+}
+
+void DP_changeVariant(DynamicsProcessingContext *pContext, int newVariant) {
+ ALOGV("DP_changeVariant from %d to %d", pContext->mCurrentVariant, newVariant);
+ switch(newVariant) {
+ case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+ pContext->mCurrentVariant = VARIANT_FAVOR_FREQUENCY_RESOLUTION;
+ delete pContext->mPDynamics;
+ pContext->mPDynamics = new dp_fx::DPFrequency();
+ break;
+ }
+ default: {
+ ALOGW("DynamicsProcessing variant %d not available for creation", newVariant);
+ break;
+ }
+ } //switch
+}
+
+static inline bool isPowerOf2(unsigned long n) {
+ return (n & (n - 1)) == 0;
+}
+
+void DP_configureVariant(DynamicsProcessingContext *pContext, int newVariant) {
+ ALOGV("DP_configureVariant %d", newVariant);
+ switch(newVariant) {
+ case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+ int32_t minBlockSize = (int32_t)dp_fx::DPFrequency::getMinBockSize();
+ int32_t desiredBlock = pContext->mPreferredFrameDuration *
+ pContext->mConfig.inputCfg.samplingRate / 1000.0f;
+ int32_t currentBlock = desiredBlock;
+ ALOGV(" sampling rate: %d, desiredBlock size %0.2f (%d) samples",
+ pContext->mConfig.inputCfg.samplingRate, pContext->mPreferredFrameDuration,
+ desiredBlock);
+ if (desiredBlock < minBlockSize) {
+ currentBlock = minBlockSize;
+ } else if (!isPowerOf2(desiredBlock)) {
+ //find next highest power of 2.
+ currentBlock = 1 << (32 - __builtin_clz(desiredBlock));
+ }
+ ((dp_fx::DPFrequency*)pContext->mPDynamics)->configure(currentBlock,
+ currentBlock/2,
+ pContext->mConfig.inputCfg.samplingRate);
+ break;
+ }
+ default: {
+ ALOGE("DynamicsProcessing variant %d not available to configure", newVariant);
+ break;
+ }
+ }
+}
+
+//
+//--- Effect Library Interface Implementation
+//
+
+int DPLib_Release(effect_handle_t handle) {
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)handle;
+
+ ALOGV("DPLib_Release %p", handle);
+ if (pContext == NULL) {
+ return -EINVAL;
+ }
+ delete pContext->mPDynamics;
+ delete pContext;
+
+ return 0;
+}
+
+int DPLib_Create(const effect_uuid_t *uuid,
+ int32_t sessionId __unused,
+ int32_t ioId __unused,
+ effect_handle_t *pHandle) {
+ ALOGV("DPLib_Create()");
+
+ if (pHandle == NULL || uuid == NULL) {
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) != 0) {
+ return -EINVAL;
+ }
+
+ DynamicsProcessingContext *pContext = new DynamicsProcessingContext;
+ *pHandle = (effect_handle_t)pContext;
+ int ret = DP_init(pContext);
+ if (ret < 0) {
+ ALOGW("DPLib_Create() init failed");
+ DPLib_Release(*pHandle);
+ return ret;
+ }
+
+ ALOGV("DPLib_Create context is %p", pContext);
+ return 0;
+}
+
+int DPLib_GetDescriptor(const effect_uuid_t *uuid,
+ effect_descriptor_t *pDescriptor) {
+
+ if (pDescriptor == NULL || uuid == NULL){
+ ALOGE("DPLib_GetDescriptor() called with NULL pointer");
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) == 0) {
+ *pDescriptor = gDPDescriptor;
+ return 0;
+ }
+
+ return -EINVAL;
+} /* end DPLib_GetDescriptor */
+
+//
+//--- Effect Control Interface Implementation
+//
+int DP_process(effect_handle_t self, audio_buffer_t *inBuffer,
+ audio_buffer_t *outBuffer) {
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+ if (pContext == NULL) {
+ ALOGE("DP_process() called with NULL context");
+ return -EINVAL;
+ }
+
+ if (inBuffer == NULL || inBuffer->raw == NULL ||
+ outBuffer == NULL || outBuffer->raw == NULL ||
+ inBuffer->frameCount != outBuffer->frameCount ||
+ inBuffer->frameCount == 0) {
+ ALOGE("inBuffer or outBuffer are NULL or have problems with frame count");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ ALOGE("mState is not DYNAMICS_PROCESSING_STATE_ACTIVE. Current mState %d",
+ pContext->mState);
+ return -ENODATA;
+ }
+ //if dynamics exist...
+ if (pContext->mPDynamics != NULL) {
+ int32_t channelCount = (int32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels);
+ pContext->mPDynamics->processSamples(inBuffer->f32, inBuffer->f32,
+ inBuffer->frameCount * channelCount);
+
+ if (inBuffer->raw != outBuffer->raw) {
+ if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < outBuffer->frameCount * channelCount; i++) {
+ outBuffer->f32[i] += inBuffer->f32[i];
+ }
+ } else {
+ memcpy(outBuffer->raw, inBuffer->raw,
+ outBuffer->frameCount * channelCount * sizeof(float));
+ }
+ }
+ } else {
+ //do nothing. no effect created yet. warning.
+ ALOGW("Warning: no DynamicsProcessing engine available");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int DP_command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+ void *pCmdData, uint32_t *replySize, void *pReplyData) {
+
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+ if (pContext == NULL || pContext->mState == DYNAMICS_PROCESSING_STATE_UNINITIALIZED) {
+ ALOGE("DP_command() called with NULL context or uninitialized state.");
+ return -EINVAL;
+ }
+
+ ALOGV("DP_command command %d cmdSize %d",cmdCode, cmdSize);
+ switch (cmdCode) {
+ case EFFECT_CMD_INIT:
+ if (pReplyData == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_INIT wrong replyData or repySize");
+ return -EINVAL;
+ }
+ *(int *) pReplyData = DP_init(pContext);
+ break;
+ case EFFECT_CMD_SET_CONFIG:
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
+ || pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_SET_CONFIG error with pCmdData, cmdSize, pReplyData or replySize");
+ return -EINVAL;
+ }
+ *(int *) pReplyData = DP_setConfig(pContext,
+ (effect_config_t *) pCmdData);
+ break;
+ case EFFECT_CMD_GET_CONFIG:
+ if (pReplyData == NULL ||
+ *replySize != sizeof(effect_config_t)) {
+ ALOGE("EFFECT_CMD_GET_CONFIG wrong replyData or repySize");
+ return -EINVAL;
+ }
+ DP_getConfig(pContext, (effect_config_t *)pReplyData);
+ break;
+ case EFFECT_CMD_RESET:
+ DP_reset(pContext);
+ break;
+ case EFFECT_CMD_ENABLE:
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_ENABLE wrong replyData or repySize");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_INITIALIZED) {
+ ALOGE("EFFECT_CMD_ENABLE state not initialized");
+ *(int *)pReplyData = -ENOSYS;
+ } else {
+ pContext->mState = DYNAMICS_PROCESSING_STATE_ACTIVE;
+ ALOGV("EFFECT_CMD_ENABLE() OK");
+ *(int *)pReplyData = 0;
+ }
+ break;
+ case EFFECT_CMD_DISABLE:
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_DISABLE wrong replyData or repySize");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ ALOGE("EFFECT_CMD_DISABLE state not active");
+ *(int *)pReplyData = -ENOSYS;
+ } else {
+ pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ ALOGV("EFFECT_CMD_DISABLE() OK");
+ *(int *)pReplyData = 0;
+ }
+ break;
+ case EFFECT_CMD_GET_PARAM: {
+ if (pCmdData == NULL || pReplyData == NULL || replySize == NULL) {
+ ALOGE("null pCmdData or pReplyData or replySize");
+ return -EINVAL;
+ }
+ effect_param_t *pEffectParam = (effect_param_t *) pCmdData;
+ uint32_t expectedCmdSize = DP_getParameterCmdSize(pEffectParam->psize,
+ pEffectParam->data);
+ if (cmdSize != expectedCmdSize || *replySize < expectedCmdSize) {
+ ALOGE("error cmdSize: %d, expetedCmdSize: %d, replySize: %d",
+ cmdSize, expectedCmdSize, *replySize);
+ return -EINVAL;
+ }
+
+ ALOGVV("DP_command expectedCmdSize: %d", expectedCmdSize);
+ memcpy(pReplyData, pCmdData, expectedCmdSize);
+ effect_param_t *p = (effect_param_t *)pReplyData;
+
+ uint32_t voffset = computeParamVOffset(p);
+
+ p->status = DP_getParameter(pContext,
+ p->psize,
+ p->data,
+ &p->vsize,
+ p->data + voffset);
+ *replySize = sizeof(effect_param_t) + voffset + p->vsize;
+
+ ALOGVV("DP_command replysize %u, status %d" , *replySize, p->status);
+ break;
+ }
+ case EFFECT_CMD_SET_PARAM: {
+ if (pCmdData == NULL ||
+ cmdSize < (sizeof(effect_param_t) + sizeof(int32_t) + sizeof(int32_t)) ||
+ pReplyData == NULL || replySize == NULL || *replySize != sizeof(int32_t)) {
+ ALOGE("\tLVM_ERROR : DynamicsProcessing cmdCode Case: "
+ "EFFECT_CMD_SET_PARAM: ERROR");
+ return -EINVAL;
+ }
+
+ effect_param_t * const p = (effect_param_t *) pCmdData;
+ const uint32_t voffset = computeParamVOffset(p);
+
+ *(int *)pReplyData = DP_setParameter(pContext,
+ p->psize,
+ (void *)p->data,
+ p->vsize,
+ p->data + voffset);
+ break;
+ }
+ case EFFECT_CMD_SET_DEVICE:
+ case EFFECT_CMD_SET_VOLUME:
+ case EFFECT_CMD_SET_AUDIO_MODE:
+ break;
+
+ default:
+ ALOGW("DP_command invalid command %d",cmdCode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+//register expected cmd size
+int DP_getParameterCmdSize(uint32_t paramSize,
+ void *pParam) {
+ if (paramSize < sizeof(int32_t)) {
+ return 0;
+ }
+ int32_t param = *(int32_t*)pParam;
+ switch(param) {
+ case DP_PARAM_GET_CHANNEL_COUNT: //paramcmd
+ case DP_PARAM_ENGINE_ARCHITECTURE:
+ //effect + param
+ return (int)(sizeof(effect_param_t) + sizeof(uint32_t));
+ case DP_PARAM_INPUT_GAIN: //paramcmd + param
+ case DP_PARAM_LIMITER:
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ:
+ case DP_PARAM_MBC:
+ //effect + param
+ return (int)(sizeof(effect_param_t) + 2 * sizeof(uint32_t));
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND:
+ case DP_PARAM_MBC_BAND:
+ return (int)(sizeof(effect_param_t) + 3 * sizeof(uint32_t));
+ }
+ return 0;
+}
+
+//helper function
+bool DP_checkSizesInt(uint32_t paramSize, uint32_t valueSize, uint32_t expectedParams,
+ uint32_t expectedValues) {
+ if (paramSize < expectedParams * sizeof(int32_t)) {
+ ALOGE("Invalid paramSize: %u expected %u", paramSize,
+ (uint32_t) (expectedParams * sizeof(int32_t)));
+ return false;
+ }
+ if (valueSize < expectedValues * sizeof(int32_t)) {
+ ALOGE("Invalid valueSize %u expected %u", valueSize,
+ (uint32_t)(expectedValues * sizeof(int32_t)));
+ return false;
+ }
+ return true;
+}
+
+static dp_fx::DPChannel* DP_getChannel(DynamicsProcessingContext *pContext,
+ int32_t channel) {
+ if (pContext->mPDynamics == NULL) {
+ return NULL;
+ }
+ dp_fx::DPChannel *pChannel = pContext->mPDynamics->getChannel(channel);
+ ALOGE_IF(pChannel == NULL, "DPChannel NULL. invalid channel %d", channel);
+ return pChannel;
+}
+
+static dp_fx::DPEq* DP_getEq(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t eqType) {
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ return NULL;
+ }
+ dp_fx::DPEq *pEq = (eqType == DP_PARAM_PRE_EQ ? pChannel->getPreEq() :
+ (eqType == DP_PARAM_POST_EQ ? pChannel->getPostEq() : NULL));
+ ALOGE_IF(pEq == NULL,"DPEq NULL invalid eq");
+ return pEq;
+}
+
+static dp_fx::DPEqBand* DP_getEqBand(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t eqType, int32_t band) {
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqType);
+ if (pEq == NULL) {
+ return NULL;
+ }
+ dp_fx::DPEqBand *pEqBand = pEq->getBand(band);
+ ALOGE_IF(pEqBand == NULL, "DPEqBand NULL. invalid band %d", band);
+ return pEqBand;
+}
+
+static dp_fx::DPMbc* DP_getMbc(DynamicsProcessingContext *pContext, int32_t channel) {
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ return NULL;
+ }
+ dp_fx::DPMbc *pMbc = pChannel->getMbc();
+ ALOGE_IF(pMbc == NULL, "DPMbc NULL invalid MBC");
+ return pMbc;
+}
+
+static dp_fx::DPMbcBand* DP_getMbcBand(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t band) {
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ return NULL;
+ }
+ dp_fx::DPMbcBand *pMbcBand = pMbc->getBand(band);
+ ALOGE_IF(pMbcBand == NULL, "pMbcBand NULL. invalid band %d", band);
+ return pMbcBand;
+}
+
+int DP_getParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t *pValueSize,
+ void *pValue) {
+ int status = 0;
+ int32_t *params = (int32_t *)pParam;
+ static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+ alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+ "Size/alignment mismatch for float/int32_t/value_t");
+ value_t *values = reinterpret_cast<value_t*>(pValue);
+
+ ALOGVV("%s start", __func__);
+#ifdef VERY_VERY_VERBOSE_LOGGING
+ for (size_t i = 0; i < paramSize/sizeof(int32_t); i++) {
+ ALOGVV("Param[%zu] %d", i, params[i]);
+ }
+#endif
+ if (paramSize < sizeof(int32_t)) {
+ ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+ return -EINVAL;
+ }
+ const int32_t command = params[0];
+ switch (command) {
+ case DP_PARAM_GET_CHANNEL_COUNT: {
+ if (!DP_checkSizesInt(paramSize,*pValueSize, 1 /*params*/, 1 /*values*/)) {
+ ALOGE("%s DP_PARAM_GET_CHANNEL_COUNT (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+ *pValueSize = sizeof(uint32_t);
+ *(uint32_t *)pValue = (uint32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels);
+ ALOGVV("%s DP_PARAM_GET_CHANNEL_COUNT channels %d", __func__, *(int32_t *)pValue);
+ break;
+ }
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 1 /*params*/, 9 /*values*/)) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+// Number[] values = { 0 /*0 variant */,
+// 0.0f /* 1 preferredFrameDuration */,
+// 0 /*2 preEqInUse */,
+// 0 /*3 preEqBandCount */,
+// 0 /*4 mbcInUse */,
+// 0 /*5 mbcBandCount*/,
+// 0 /*6 postEqInUse */,
+// 0 /*7 postEqBandCount */,
+// 0 /*8 limiterInUse */};
+ if (pContext->mPDynamics == NULL) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error mPDynamics is NULL", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pContext->mCurrentVariant;
+ values[1].f = pContext->mPreferredFrameDuration;
+ values[2].i = pContext->mPDynamics->isPreEQInUse();
+ values[3].i = pContext->mPDynamics->getPreEqBandCount();
+ values[4].i = pContext->mPDynamics->isMbcInUse();
+ values[5].i = pContext->mPDynamics->getMbcBandCount();
+ values[6].i = pContext->mPDynamics->isPostEqInUse();
+ values[7].i = pContext->mPDynamics->getPostEqBandCount();
+ values[8].i = pContext->mPDynamics->isLimiterInUse();
+
+ *pValueSize = sizeof(value_t) * 9;
+
+ ALOGVV(" variant %d, preferredFrameDuration: %f, preEqInuse %d, bands %d, mbcinuse %d,"
+ "mbcbands %d, posteqInUse %d, bands %d, limiterinuse %d",
+ values[0].i, values[1].f, values[2].i, values[3].i, values[4].i, values[5].i,
+ values[6].i, values[7].i, values[8].i);
+ break;
+ }
+ case DP_PARAM_INPUT_GAIN: {
+ ALOGVV("engine get PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 1 /*values*/)) {
+ ALOGE("%s get PARAM_INPUT_GAIN invalid sizes.", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ const int32_t channel = params[1];
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s get PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ values[0].f = pChannel->getInputGain();
+ *pValueSize = sizeof(value_t) * 1;
+
+ ALOGVV(" channel: %d, input gain %f\n", channel, values[0].f);
+ break;
+ }
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ: {
+ ALOGVV("engine get PARAM_*_EQ paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet == PARAM_PRE_EQ ? PARAM_PRE_EQ : PARAM_POST_EQ,
+// channelIndex};
+// Number[] values = {0 /*0 in use */,
+// 0 /*1 enabled*/,
+// 0 /*2 band count */};
+ const int32_t channel = params[1];
+
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+ if (pEq == NULL) {
+ ALOGE("%s get PARAM_*_EQ invalid eq", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pEq->isInUse();
+ values[1].i = pEq->isEnabled();
+ values[2].i = pEq->getBandCount();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+ (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel,
+ values[0].i, values[1].i, values[2].i);
+ break;
+ }
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND: {
+ ALOGVV("engine get PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+ int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+ (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+
+ dp_fx::DPEqBand *pEqBand = DP_getEqBand(pContext, channel, eqCommand, band);
+ if (pEqBand == NULL) {
+ ALOGE("%s get PARAM_*_EQ_BAND invalid channel %d or band %d", __func__, channel, band);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pEqBand->isEnabled();
+ values[1].f = pEqBand->getCutoffFrequency();
+ values[2].f = pEqBand->getGain();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV("%s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+ (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+ values[0].i, values[1].f, values[2].f);
+ break;
+ }
+ case DP_PARAM_MBC: {
+ ALOGVV("engine get PDP_PARAM_MBC paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PDP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+
+// Number[] params = {PARAM_MBC,
+// channelIndex};
+// Number[] values = {0 /*0 in use */,
+// 0 /*1 enabled*/,
+// 0 /*2 band count */};
+
+ const int32_t channel = params[1];
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s get PDP_PARAM_MBC invalid MBC", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pMbc->isInUse();
+ values[1].i = pMbc->isEnabled();
+ values[2].i = pMbc->getBandCount();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV("DP_PARAM_MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel,
+ values[0].i, values[1].i, values[2].i);
+ break;
+ }
+ case DP_PARAM_MBC_BAND: {
+ ALOGVV("engine get DP_PARAM_MBC_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 11 /*values*/)) {
+ ALOGE("%s get DP_PARAM_MBC_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {PARAM_MBC_BAND,
+// channelIndex,
+// bandIndex};
+// Number[] values = {0 /*0 enabled */,
+// 0.0f /*1 cutoffFrequency */,
+// 0.0f /*2 AttackTime */,
+// 0.0f /*3 ReleaseTime */,
+// 0.0f /*4 Ratio */,
+// 0.0f /*5 Threshold */,
+// 0.0f /*6 KneeWidth */,
+// 0.0f /*7 NoiseGateThreshold */,
+// 0.0f /*8 ExpanderRatio */,
+// 0.0f /*9 PreGain */,
+// 0.0f /*10 PostGain*/};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ dp_fx::DPMbcBand *pMbcBand = DP_getMbcBand(pContext, channel, band);
+ if (pMbcBand == NULL) {
+ ALOGE("%s get PARAM_MBC_BAND invalid channel %d or band %d", __func__, channel, band);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pMbcBand->isEnabled();
+ values[1].f = pMbcBand->getCutoffFrequency();
+ values[2].f = pMbcBand->getAttackTime();
+ values[3].f = pMbcBand->getReleaseTime();
+ values[4].f = pMbcBand->getRatio();
+ values[5].f = pMbcBand->getThreshold();
+ values[6].f = pMbcBand->getKneeWidth();
+ values[7].f = pMbcBand->getNoiseGateThreshold();
+ values[8].f = pMbcBand->getExpanderRatio();
+ values[9].f = pMbcBand->getPreGain();
+ values[10].f = pMbcBand->getPostGain();
+
+ *pValueSize = sizeof(value_t) * 11;
+ ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+ "expanderRatio:%f, preGain:%f, postGain:%f\n", channel, band, values[0].i,
+ values[1].f, values[2].f, values[3].f, values[4].f, values[5].f, values[6].f,
+ values[7].f, values[8].f, values[9].f, values[10].f);
+ break;
+ }
+ case DP_PARAM_LIMITER: {
+ ALOGVV("engine get DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 8 /*values*/)) {
+ ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+
+ int32_t channel = params[1];
+// Number[] values = {0 /*0 in use (int)*/,
+// 0 /*1 enabled (int)*/,
+// 0 /*2 link group (int)*/,
+// 0.0f /*3 attack time (float)*/,
+// 0.0f /*4 release time (float)*/,
+// 0.0f /*5 ratio (float)*/,
+// 0.0f /*6 threshold (float)*/,
+// 0.0f /*7 post gain(float)*/};
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ dp_fx::DPLimiter *pLimiter = pChannel->getLimiter();
+ if (pLimiter == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER null LIMITER", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pLimiter->isInUse();
+ values[1].i = pLimiter->isEnabled();
+ values[2].i = pLimiter->getLinkGroup();
+ values[3].f = pLimiter->getAttackTime();
+ values[4].f = pLimiter->getReleaseTime();
+ values[5].f = pLimiter->getRatio();
+ values[6].f = pLimiter->getThreshold();
+ values[7].f = pLimiter->getPostGain();
+
+ *pValueSize = sizeof(value_t) * 8;
+
+ ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n",
+ channel, values[0].i/*inUse*/, values[1].i/*enabled*/, values[2].i/*linkGroup*/,
+ values[3].f/*attackTime*/, values[4].f/*releaseTime*/,
+ values[5].f/*ratio*/, values[6].f/*threshold*/,
+ values[7].f/*postGain*/);
+ break;
+ }
+ default:
+ ALOGE("%s invalid param %d", __func__, params[0]);
+ status = -EINVAL;
+ break;
+ }
+
+ ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+ return status;
+} /* end DP_getParameter */
+
+int DP_setParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue) {
+ int status = 0;
+ int32_t *params = (int32_t *)pParam;
+ static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+ alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+ "Size/alignment mismatch for float/int32_t/value_t");
+ value_t *values = reinterpret_cast<value_t*>(pValue);
+
+ ALOGVV("%s start", __func__);
+ if (paramSize < sizeof(int32_t)) {
+ ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+ return -EINVAL;
+ }
+ const int32_t command = params[0];
+ switch (command) {
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 1 /*params*/, 9 /*values*/)) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+// Number[] values = { variant /* variant */,
+// preferredFrameDuration,
+// (preEqInUse ? 1 : 0),
+// preEqBandCount,
+// (mbcInUse ? 1 : 0),
+// mbcBandCount,
+// (postEqInUse ? 1 : 0),
+// postEqBandCount,
+// (limiterInUse ? 1 : 0)};
+ const int32_t variant = values[0].i;
+ const float preferredFrameDuration = values[1].f;
+ const int32_t preEqInUse = values[2].i;
+ const int32_t preEqBandCount = values[3].i;
+ const int32_t mbcInUse = values[4].i;
+ const int32_t mbcBandCount = values[5].i;
+ const int32_t postEqInUse = values[6].i;
+ const int32_t postEqBandCount = values[7].i;
+ const int32_t limiterInUse = values[8].i;
+ ALOGVV("variant %d, preEqInuse %d, bands %d, mbcinuse %d, mbcbands %d, posteqInUse %d,"
+ "bands %d, limiterinuse %d", variant, preEqInUse, preEqBandCount, mbcInUse,
+ mbcBandCount, postEqInUse, postEqBandCount, limiterInUse);
+
+ //set variant (instantiate effect)
+ //initArchitecture for effect
+ DP_changeVariant(pContext, variant);
+ if (pContext->mPDynamics == NULL) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error setting variant %d", __func__, variant);
+ status = -EINVAL;
+ break;
+ }
+ pContext->mPreferredFrameDuration = preferredFrameDuration;
+ pContext->mPDynamics->init((uint32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels),
+ preEqInUse != 0, (uint32_t)preEqBandCount,
+ mbcInUse != 0, (uint32_t)mbcBandCount,
+ postEqInUse != 0, (uint32_t)postEqBandCount,
+ limiterInUse != 0);
+
+ DP_configureVariant(pContext, variant);
+ break;
+ }
+ case DP_PARAM_INPUT_GAIN: {
+ ALOGVV("engine DP_PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 1 /*values*/)) {
+ ALOGE("%s DP_PARAM_INPUT_GAIN invalid sizes.", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ const int32_t channel = params[1];
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ const float gain = values[0].f;
+ ALOGVV("%s DP_PARAM_INPUT_GAIN channel %d, level %f", __func__, channel, gain);
+ pChannel->setInputGain(gain);
+ break;
+ }
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ: {
+ ALOGVV("engine DP_PARAM_*_EQ paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s DP_PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet,
+// channelIndex};
+// Number[] values = { (eq.isInUse() ? 1 : 0),
+// (eq.isEnabled() ? 1 : 0),
+// bandCount};
+ const int32_t channel = params[1];
+
+ const int32_t enabled = values[1].i;
+ const int32_t bandCount = values[2].i;
+ ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+ (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel, values[0].i,
+ values[2].i, bandCount);
+
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+ if (pEq == NULL) {
+ ALOGE("%s set PARAM_*_EQ invalid channel %d or command %d", __func__, channel,
+ command);
+ status = -EINVAL;
+ break;
+ }
+
+ pEq->setEnabled(enabled != 0);
+ //fail if bandcountis different? maybe.
+ if ((int32_t)pEq->getBandCount() != bandCount) {
+ ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+ pEq->getBandCount(), bandCount);
+ }
+ break;
+ }
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND: {
+ ALOGVV("engine set PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 3 /*values*/)) {
+ ALOGE("%s PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] values = { channelIndex,
+// bandIndex,
+// (eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+
+// Number[] params = {paramSet,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ const int32_t enabled = values[0].i;
+ const float cutoffFrequency = values[1].f;
+ const float gain = values[2].f;
+
+
+ ALOGVV(" %s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+ (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+ enabled, cutoffFrequency, gain);
+
+ int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+ (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqCommand);
+ if (pEq == NULL) {
+ ALOGE("%s set PARAM_*_EQ_BAND invalid channel %d or command %d", __func__, channel,
+ command);
+ status = -EINVAL;
+ break;
+ }
+
+ dp_fx::DPEqBand eqBand;
+ eqBand.init(enabled != 0, cutoffFrequency, gain);
+ pEq->setBand(band, eqBand);
+ break;
+ }
+ case DP_PARAM_MBC: {
+ ALOGVV("engine DP_PARAM_MBC paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s DP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_MBC,
+// channelIndex};
+// Number[] values = {(mbc.isInUse() ? 1 : 0),
+// (mbc.isEnabled() ? 1 : 0),
+// bandCount};
+ const int32_t channel = params[1];
+
+ const int32_t enabled = values[1].i;
+ const int32_t bandCount = values[2].i;
+ ALOGVV("MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel, values[0].i,
+ enabled, bandCount);
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s set DP_PARAM_MBC invalid channel %d ", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+
+ pMbc->setEnabled(enabled != 0);
+ //fail if bandcountis different? maybe.
+ if ((int32_t)pMbc->getBandCount() != bandCount) {
+ ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+ pMbc->getBandCount(), bandCount);
+ }
+ break;
+ }
+ case DP_PARAM_MBC_BAND: {
+ ALOGVV("engine set DP_PARAM_MBC_BAND paramsize: %d valuesize %d ",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 11 /*values*/)) {
+ ALOGE("%s DP_PARAM_MBC_BAND: (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_MBC_BAND,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(mbcBand.isEnabled() ? 1 : 0),
+// mbcBand.getCutoffFrequency(),
+// mbcBand.getAttackTime(),
+// mbcBand.getReleaseTime(),
+// mbcBand.getRatio(),
+// mbcBand.getThreshold(),
+// mbcBand.getKneeWidth(),
+// mbcBand.getNoiseGateThreshold(),
+// mbcBand.getExpanderRatio(),
+// mbcBand.getPreGain(),
+// mbcBand.getPostGain()};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ const int32_t enabled = values[0].i;
+ const float cutoffFrequency = values[1].f;
+ const float attackTime = values[2].f;
+ const float releaseTime = values[3].f;
+ const float ratio = values[4].f;
+ const float threshold = values[5].f;
+ const float kneeWidth = values[6].f;
+ const float noiseGateThreshold = values[7].f;
+ const float expanderRatio = values[8].f;
+ const float preGain = values[9].f;
+ const float postGain = values[10].f;
+
+ ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+ "expanderRatio:%f, preGain:%f, postGain:%f\n",
+ channel, band, enabled, cutoffFrequency, attackTime, releaseTime, ratio,
+ threshold, kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s set DP_PARAM_MBC_BAND invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+
+ dp_fx::DPMbcBand mbcBand;
+ mbcBand.init(enabled != 0, cutoffFrequency, attackTime, releaseTime, ratio, threshold,
+ kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+ pMbc->setBand(band, mbcBand);
+ break;
+ }
+ case DP_PARAM_LIMITER: {
+ ALOGVV("engine DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 8 /*values*/)) {
+ ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_LIMITER,
+// channelIndex};
+// Number[] values = {(limiter.isInUse() ? 1 : 0),
+// (limiter.isEnabled() ? 1 : 0),
+// limiter.getLinkGroup(),
+// limiter.getAttackTime(),
+// limiter.getReleaseTime(),
+// limiter.getRatio(),
+// limiter.getThreshold(),
+// limiter.getPostGain()};
+
+ const int32_t channel = params[1];
+
+ const int32_t inUse = values[0].i;
+ const int32_t enabled = values[1].i;
+ const int32_t linkGroup = values[2].i;
+ const float attackTime = values[3].f;
+ const float releaseTime = values[4].f;
+ const float ratio = values[5].f;
+ const float threshold = values[6].f;
+ const float postGain = values[7].f;
+
+ ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n", channel, inUse,
+ enabled, linkGroup, attackTime, releaseTime, ratio, threshold, postGain);
+
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ dp_fx::DPLimiter limiter;
+ limiter.init(inUse != 0, enabled != 0, linkGroup, attackTime, releaseTime, ratio,
+ threshold, postGain);
+ pChannel->setLimiter(limiter);
+ break;
+ }
+ default:
+ ALOGE("%s invalid param %d", __func__, params[0]);
+ status = -EINVAL;
+ break;
+ }
+
+ ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+ return status;
+} /* end DP_setParameter */
+
+/* Effect Control Interface Implementation: get_descriptor */
+int DP_getDescriptor(effect_handle_t self,
+ effect_descriptor_t *pDescriptor)
+{
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *) self;
+
+ if (pContext == NULL || pDescriptor == NULL) {
+ ALOGE("DP_getDescriptor() invalid param");
+ return -EINVAL;
+ }
+
+ *pDescriptor = gDPDescriptor;
+
+ return 0;
+} /* end DP_getDescriptor */
+
+
+// effect_handle_t interface implementation for Dynamics Processing effect
+const struct effect_interface_s gDPInterface = {
+ DP_process,
+ DP_command,
+ DP_getDescriptor,
+ NULL,
+};
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+ .tag = AUDIO_EFFECT_LIBRARY_TAG,
+ .version = EFFECT_LIBRARY_API_VERSION,
+ .name = "Dynamics Processing Library",
+ .implementor = "The Android Open Source Project",
+ .create_effect = DPLib_Create,
+ .release_effect = DPLib_Release,
+ .get_descriptor = DPLib_GetDescriptor,
+};
+
+}; // extern "C"
+
diff --git a/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2 b/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
diff --git a/media/libeffects/dynamicsproc/NOTICE b/media/libeffects/dynamicsproc/NOTICE
new file mode 100644
index 0000000..31cc6e9
--- /dev/null
+++ b/media/libeffects/dynamicsproc/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2005-2018, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.cpp b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
new file mode 100644
index 0000000..8b79991
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPBase"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPBase.h"
+#include "DPFrequency.h"
+
+namespace dp_fx {
+
+DPStage::DPStage() : mInUse(DP_DEFAULT_STAGE_INUSE),
+ mEnabled(DP_DEFAULT_STAGE_ENABLED) {
+}
+
+void DPStage::init(bool inUse, bool enabled) {
+ mInUse = inUse;
+ mEnabled = enabled;
+}
+
+//----
+DPBandStage::DPBandStage() : mBandCount(0) {
+}
+
+void DPBandStage::init(bool inUse, bool enabled, int bandCount) {
+ DPStage::init(inUse, enabled);
+ mBandCount = inUse ? bandCount : 0;
+}
+
+//---
+DPBandBase::DPBandBase() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ);
+}
+
+void DPBandBase::init(bool enabled, float cutoffFrequency){
+ mEnabled = enabled;
+ mCutoofFrequencyHz = cutoffFrequency;
+}
+
+//-----
+DPEqBand::DPEqBand() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPEqBand::init(bool enabled, float cutoffFrequency, float gain) {
+ DPBandBase::init(enabled, cutoffFrequency);
+ setGain(gain);
+}
+
+float DPEqBand::getGain() const{
+ return mGainDb;
+}
+
+void DPEqBand::setGain(float gain) {
+ mGainDb = gain;
+}
+
+//------
+DPMbcBand::DPMbcBand() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+ DP_DEFAULT_ATTACK_TIME_MS,
+ DP_DEFAULT_RELEASE_TIME_MS,
+ DP_DEFAULT_RATIO,
+ DP_DEFAULT_THRESHOLD_DB,
+ DP_DEFAULT_KNEE_WIDTH_DB,
+ DP_DEFAULT_NOISE_GATE_THRESHOLD_DB,
+ DP_DEFAULT_EXPANDER_RATIO,
+ DP_DEFAULT_GAIN_DB,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPMbcBand::init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+ float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+ float expanderRatio, float preGain, float postGain) {
+ DPBandBase::init(enabled, cutoffFrequency);
+ setAttackTime(attackTime);
+ setReleaseTime(releaseTime);
+ setRatio(ratio);
+ setThreshold(threshold);
+ setKneeWidth(kneeWidth);
+ setNoiseGateThreshold(noiseGateThreshold);
+ setExpanderRatio(expanderRatio);
+ setPreGain(preGain);
+ setPostGain(postGain);
+}
+
+//------
+DPEq::DPEq() {
+}
+
+void DPEq::init(bool inUse, bool enabled, uint32_t bandCount) {
+ DPBandStage::init(inUse, enabled, bandCount);
+ mBands.resize(getBandCount());
+}
+
+DPEqBand * DPEq::getBand(uint32_t band) {
+ if (band < getBandCount()) {
+ return &mBands[band];
+ }
+ return NULL;
+}
+
+void DPEq::setBand(uint32_t band, DPEqBand &src) {
+ if (band < getBandCount()) {
+ mBands[band] = src;
+ }
+}
+
+//------
+DPMbc::DPMbc() {
+}
+
+void DPMbc::init(bool inUse, bool enabled, uint32_t bandCount) {
+ DPBandStage::init(inUse, enabled, bandCount);
+ if (isInUse()) {
+ mBands.resize(bandCount);
+ } else {
+ mBands.resize(0);
+ }
+}
+
+DPMbcBand * DPMbc::getBand(uint32_t band) {
+ if (band < getBandCount()) {
+ return &mBands[band];
+ }
+ return NULL;
+}
+
+void DPMbc::setBand(uint32_t band, DPMbcBand &src) {
+ if (band < getBandCount()) {
+ mBands[band] = src;
+ }
+}
+
+//------
+DPLimiter::DPLimiter() {
+ init(DP_DEFAULT_STAGE_INUSE,
+ DP_DEFAULT_STAGE_ENABLED,
+ DP_DEFAULT_LINK_GROUP,
+ DP_DEFAULT_ATTACK_TIME_MS,
+ DP_DEFAULT_RELEASE_TIME_MS,
+ DP_DEFAULT_RATIO,
+ DP_DEFAULT_THRESHOLD_DB,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPLimiter::init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+ float ratio, float threshold, float postGain) {
+ DPStage::init(inUse, enabled);
+ setLinkGroup(linkGroup);
+ setAttackTime(attackTime);
+ setReleaseTime(releaseTime);
+ setRatio(ratio);
+ setThreshold(threshold);
+ setPostGain(postGain);
+}
+
+//----
+DPChannel::DPChannel() : mInitialized(false), mInputGainDb(0), mPreEqInUse(false), mMbcInUse(false),
+ mPostEqInUse(false), mLimiterInUse(false) {
+}
+
+void DPChannel::init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse) {
+ setInputGain(inputGain);
+ mPreEqInUse = preEqInUse;
+ mMbcInUse = mbcInUse;
+ mPostEqInUse = postEqInUse;
+ mLimiterInUse = limiterInUse;
+
+ mPreEq.init(mPreEqInUse, false, preEqBandCount);
+ mMbc.init(mMbcInUse, false, mbcBandCount);
+ mPostEq.init(mPostEqInUse, false, postEqBandCount);
+ mLimiter.init(mLimiterInUse, false, 0, 50, 120, 2, -30, 0);
+ mInitialized = true;
+}
+
+DPEq* DPChannel::getPreEq() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mPreEq;
+}
+
+DPMbc* DPChannel::getMbc() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mMbc;
+}
+
+DPEq* DPChannel::getPostEq() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mPostEq;
+}
+
+DPLimiter* DPChannel::getLimiter() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mLimiter;
+}
+
+void DPChannel::setLimiter(DPLimiter &limiter) {
+ if (!mInitialized) {
+ return;
+ }
+ mLimiter = limiter;
+}
+
+//----
+DPBase::DPBase() : mInitialized(false), mChannelCount(0), mPreEqInUse(false), mPreEqBandCount(0),
+ mMbcInUse(false), mMbcBandCount(0), mPostEqInUse(false), mPostEqBandCount(0),
+ mLimiterInUse(false) {
+}
+
+void DPBase::init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse) {
+ ALOGV("DPBase::init");
+ mChannelCount = channelCount;
+ mPreEqInUse = preEqInUse;
+ mPreEqBandCount = preEqBandCount;
+ mMbcInUse = mbcInUse;
+ mMbcBandCount = mbcBandCount;
+ mPostEqInUse = postEqInUse;
+ mPostEqBandCount = postEqBandCount;
+ mLimiterInUse = limiterInUse;
+ mChannel.resize(mChannelCount);
+ for (size_t ch = 0; ch < mChannelCount; ch++) {
+ mChannel[ch].init(0, preEqInUse, preEqBandCount, mbcInUse, mbcBandCount,
+ postEqInUse, postEqBandCount, limiterInUse);
+ }
+ mInitialized = true;
+}
+
+DPChannel* DPBase::getChannel(uint32_t channelIndex) {
+ if (!mInitialized || channelIndex < 0 || channelIndex >= mChannel.size()) {
+ return NULL;
+ }
+ return & mChannel[channelIndex];
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.h b/media/libeffects/dynamicsproc/dsp/DPBase.h
new file mode 100644
index 0000000..355f64b
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DPBASE_H_
+#define DPBASE_H_
+
+
+#include <stdint.h>
+#include <cmath>
+#include <vector>
+#include <android/log.h>
+
+namespace dp_fx {
+
+#define DP_DEFAULT_BAND_ENABLED false
+#define DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ 1000
+#define DP_DEFAULT_ATTACK_TIME_MS 50
+#define DP_DEFAULT_RELEASE_TIME_MS 120
+#define DP_DEFAULT_RATIO 2
+#define DP_DEFAULT_THRESHOLD_DB -30
+#define DP_DEFAULT_KNEE_WIDTH_DB 0
+#define DP_DEFAULT_NOISE_GATE_THRESHOLD_DB -90
+#define DP_DEFAULT_EXPANDER_RATIO 1
+#define DP_DEFAULT_GAIN_DB 0
+#define DP_DEFAULT_STAGE_INUSE false
+#define DP_DEFAULT_STAGE_ENABLED false
+#define DP_DEFAULT_LINK_GROUP 0
+
+
+
+class DPStage {
+public:
+ DPStage();
+ ~DPStage() = default;
+ void init(bool inUse, bool enabled);
+ bool isInUse() const {
+ return mInUse;
+ }
+ bool isEnabled() const {
+ return mEnabled;
+ }
+ void setEnabled(bool enabled) {
+ mEnabled = enabled;
+ }
+private:
+ bool mInUse;
+ bool mEnabled;
+};
+
+class DPBandStage : public DPStage {
+public:
+ DPBandStage();
+ ~DPBandStage() = default;
+ void init(bool inUse, bool enabled, int bandCount);
+ uint32_t getBandCount() const {
+ return mBandCount;
+ }
+ void setBandCount(uint32_t bandCount) {
+ mBandCount = bandCount;
+ }
+private:
+ uint32_t mBandCount;
+};
+
+class DPBandBase {
+public:
+ DPBandBase();
+ ~DPBandBase() = default;
+ void init(bool enabled, float cutoffFrequency);
+ bool isEnabled() const {
+ return mEnabled;
+ }
+ void setEnabled(bool enabled) {
+ mEnabled = enabled;
+ }
+ float getCutoffFrequency() const {
+ return mCutoofFrequencyHz;
+ }
+ void setCutoffFrequency(float cutoffFrequency) {
+ mCutoofFrequencyHz = cutoffFrequency;
+ }
+private:
+ bool mEnabled;
+ float mCutoofFrequencyHz;
+};
+
+class DPEqBand : public DPBandBase {
+public:
+ DPEqBand();
+ ~DPEqBand() = default;
+ void init(bool enabled, float cutoffFrequency, float gain);
+ float getGain() const;
+ void setGain(float gain);
+private:
+ float mGainDb;
+};
+
+class DPMbcBand : public DPBandBase {
+public:
+ DPMbcBand();
+ ~DPMbcBand() = default;
+ void init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+ float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+ float expanderRatio, float preGain, float postGain);
+ float getAttackTime() const {
+ return mAttackTimeMs;
+ }
+ void setAttackTime(float attackTime) {
+ mAttackTimeMs = attackTime;
+ }
+ float getReleaseTime() const {
+ return mReleaseTimeMs;
+ }
+ void setReleaseTime(float releaseTime) {
+ mReleaseTimeMs = releaseTime;
+ }
+ float getRatio() const {
+ return mRatio;
+ }
+ void setRatio(float ratio) {
+ mRatio = ratio;
+ }
+ float getThreshold() const {
+ return mThresholdDb;
+ }
+ void setThreshold(float threshold) {
+ mThresholdDb = threshold;
+ }
+ float getKneeWidth() const {
+ return mKneeWidthDb;
+ }
+ void setKneeWidth(float kneeWidth) {
+ mKneeWidthDb = kneeWidth;
+ }
+ float getNoiseGateThreshold() const {
+ return mNoiseGateThresholdDb;
+ }
+ void setNoiseGateThreshold(float noiseGateThreshold) {
+ mNoiseGateThresholdDb = noiseGateThreshold;
+ }
+ float getExpanderRatio() const {
+ return mExpanderRatio;
+ }
+ void setExpanderRatio(float expanderRatio) {
+ mExpanderRatio = expanderRatio;
+ }
+ float getPreGain() const {
+ return mPreGainDb;
+ }
+ void setPreGain(float preGain) {
+ mPreGainDb = preGain;
+ }
+ float getPostGain() const {
+ return mPostGainDb;
+ }
+ void setPostGain(float postGain) {
+ mPostGainDb = postGain;
+ }
+private:
+ float mAttackTimeMs;
+ float mReleaseTimeMs;
+ float mRatio;
+ float mThresholdDb;
+ float mKneeWidthDb;
+ float mNoiseGateThresholdDb;
+ float mExpanderRatio;
+ float mPreGainDb;
+ float mPostGainDb;
+};
+
+class DPEq : public DPBandStage {
+public:
+ DPEq();
+ ~DPEq() = default;
+ void init(bool inUse, bool enabled, uint32_t bandCount);
+ DPEqBand * getBand(uint32_t band);
+ void setBand(uint32_t band, DPEqBand &src);
+private:
+ std::vector<DPEqBand> mBands;
+};
+
+class DPMbc : public DPBandStage {
+public:
+ DPMbc();
+ ~DPMbc() = default;
+ void init(bool inUse, bool enabled, uint32_t bandCount);
+ DPMbcBand * getBand(uint32_t band);
+ void setBand(uint32_t band, DPMbcBand &src);
+private:
+ std::vector<DPMbcBand> mBands;
+};
+
+class DPLimiter : public DPStage {
+public:
+ DPLimiter();
+ ~DPLimiter() = default;
+ void init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+ float ratio, float threshold, float postGain);
+ uint32_t getLinkGroup() const {
+ return mLinkGroup;
+ }
+ void setLinkGroup(uint32_t linkGroup) {
+ mLinkGroup = linkGroup;
+ }
+ float getAttackTime() const {
+ return mAttackTimeMs;
+ }
+ void setAttackTime(float attackTime) {
+ mAttackTimeMs = attackTime;
+ }
+ float getReleaseTime() const {
+ return mReleaseTimeMs;
+ }
+ void setReleaseTime(float releaseTime) {
+ mReleaseTimeMs = releaseTime;
+ }
+ float getRatio() const {
+ return mRatio;
+ }
+ void setRatio(float ratio) {
+ mRatio = ratio;
+ }
+ float getThreshold() const {
+ return mThresholdDb;
+ }
+ void setThreshold(float threshold) {
+ mThresholdDb = threshold;
+ }
+ float getPostGain() const {
+ return mPostGainDb;
+ }
+ void setPostGain(float postGain) {
+ mPostGainDb = postGain;
+ }
+private:
+ uint32_t mLinkGroup;
+ float mAttackTimeMs;
+ float mReleaseTimeMs;
+ float mRatio;
+ float mThresholdDb;
+ float mPostGainDb;
+};
+
+class DPChannel {
+public:
+ DPChannel();
+ ~DPChannel() = default;
+ void init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse);
+
+ float getInputGain() const {
+ if (!mInitialized) {
+ return 0;
+ }
+ return mInputGainDb;
+ }
+ void setInputGain(float gain) {
+ mInputGainDb = gain;
+ }
+
+ DPEq* getPreEq();
+ DPMbc* getMbc();
+ DPEq* getPostEq();
+ DPLimiter *getLimiter();
+ void setLimiter(DPLimiter &limiter);
+
+private:
+ bool mInitialized;
+ float mInputGainDb;
+
+ DPEq mPreEq;
+ DPMbc mMbc;
+ DPEq mPostEq;
+ DPLimiter mLimiter;
+
+ bool mPreEqInUse;
+ bool mMbcInUse;
+ bool mPostEqInUse;
+ bool mLimiterInUse;
+};
+
+class DPBase {
+public:
+ DPBase();
+ virtual ~DPBase() = default;
+
+ void init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse);
+ virtual size_t processSamples(const float *in, float *out, size_t samples) = 0;
+ virtual void reset() = 0;
+
+ DPChannel* getChannel(uint32_t channelIndex);
+ uint32_t getChannelCount() const {
+ return mChannelCount;
+ }
+ uint32_t getPreEqBandCount() const {
+ return mPreEqBandCount;
+ }
+ uint32_t getMbcBandCount() const {
+ return mMbcBandCount;
+ }
+ uint32_t getPostEqBandCount() const {
+ return mPostEqBandCount;
+ }
+ bool isPreEQInUse() const {
+ return mPreEqInUse;
+ }
+ bool isMbcInUse() const {
+ return mMbcInUse;
+ }
+ bool isPostEqInUse() const {
+ return mPostEqInUse;
+ }
+ bool isLimiterInUse() const {
+ return mLimiterInUse;
+ }
+
+private:
+ bool mInitialized;
+ //general
+ uint32_t mChannelCount;
+ bool mPreEqInUse;
+ uint32_t mPreEqBandCount;
+ bool mMbcInUse;
+ uint32_t mMbcBandCount;
+ bool mPostEqInUse;
+ uint32_t mPostEqBandCount;
+ bool mLimiterInUse;
+
+ std::vector<DPChannel> mChannel;
+};
+
+} //namespace dp_fx
+
+
+#endif // DPBASE_H_
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
new file mode 100644
index 0000000..59195fc
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPFrequency"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPFrequency.h"
+#include <algorithm>
+
+namespace dp_fx {
+
+using Eigen::MatrixXd;
+#define MAX_BLOCKSIZE 16384 //For this implementation
+#define MIN_BLOCKSIZE 8
+
+#define CIRCULAR_BUFFER_UPSAMPLE 4 //4 times buffer size
+
+static constexpr float MIN_ENVELOPE = 0.000001f;
+//helper functionS
+static inline bool isPowerOf2(unsigned long n) {
+ return (n & (n - 1)) == 0;
+}
+static constexpr float EPSILON = 0.0000001f;
+
+static inline bool isZero(float f) {
+ return fabs(f) <= EPSILON;
+}
+
+template <class T>
+bool compareEquality(T a, T b) {
+ return (a == b);
+}
+
+template <> bool compareEquality<float>(float a, float b) {
+ return isZero(a - b);
+}
+
+//TODO: avoid using macro for estimating change and assignment.
+#define IS_CHANGED(c, a, b) { c |= !compareEquality(a,b); \
+ (a) = (b); }
+
+float dBtoLinear(float valueDb) {
+ return pow (10, valueDb / 20.0);
+}
+
+float linearToDb(float value) {
+ return 20 * log10(value);
+}
+
+//ChannelBuffers helper
+void ChannelBuffer::initBuffers(unsigned int blockSize, unsigned int overlapSize,
+ unsigned int halfFftSize, unsigned int samplingRate, DPBase &dpBase) {
+ ALOGV("ChannelBuffer::initBuffers blockSize %d, overlap %d, halfFft %d",
+ blockSize, overlapSize, halfFftSize);
+
+ mSamplingRate = samplingRate;
+ mBlockSize = blockSize;
+
+ cBInput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+ cBOutput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+
+ //fill input with half block size...
+ for (unsigned int k = 0; k < mBlockSize/2; k++) {
+ cBInput.write(0);
+ }
+
+ //temp vectors
+ input.resize(mBlockSize);
+ output.resize(mBlockSize);
+ outTail.resize(overlapSize);
+
+ //module vectors
+ mPreEqFactorVector.resize(halfFftSize, 1.0);
+ mPostEqFactorVector.resize(halfFftSize, 1.0);
+
+ mPreEqBands.resize(dpBase.getPreEqBandCount());
+ mMbcBands.resize(dpBase.getMbcBandCount());
+ mPostEqBands.resize(dpBase.getPostEqBandCount());
+ ALOGV("mPreEqBands %zu, mMbcBands %zu, mPostEqBands %zu",mPreEqBands.size(),
+ mMbcBands.size(), mPostEqBands.size());
+
+ DPChannel *pChannel = dpBase.getChannel(0);
+ if (pChannel != NULL) {
+ mPreEqInUse = pChannel->getPreEq()->isInUse();
+ mMbcInUse = pChannel->getMbc()->isInUse();
+ mPostEqInUse = pChannel->getPostEq()->isInUse();
+ mLimiterInUse = pChannel->getLimiter()->isInUse();
+ }
+}
+
+void ChannelBuffer::computeBinStartStop(BandParams &bp, size_t binStart) {
+
+ bp.binStart = binStart;
+ bp.binStop = (int)(0.5 + bp.freqCutoffHz * mBlockSize / mSamplingRate);
+}
+
+//== DPFrequency
+
+void DPFrequency::reset() {
+}
+
+size_t DPFrequency::getMinBockSize() {
+ return MIN_BLOCKSIZE;
+}
+
+size_t DPFrequency::getMaxBockSize() {
+ return MAX_BLOCKSIZE;
+}
+
+void DPFrequency::configure(size_t blockSize, size_t overlapSize,
+ size_t samplingRate) {
+ ALOGV("configure");
+ mBlockSize = blockSize;
+ if (mBlockSize > MAX_BLOCKSIZE) {
+ mBlockSize = MAX_BLOCKSIZE;
+ } else if (mBlockSize < MIN_BLOCKSIZE) {
+ mBlockSize = MIN_BLOCKSIZE;
+ } else {
+ if (!isPowerOf2(blockSize)) {
+ //find next highest power of 2.
+ mBlockSize = 1 << (32 - __builtin_clz(blockSize));
+ }
+ }
+
+ mHalfFFTSize = 1 + mBlockSize / 2; //including Nyquist bin
+ mOverlapSize = std::min(overlapSize, mBlockSize/2);
+
+ int channelcount = getChannelCount();
+ mSamplingRate = samplingRate;
+ mChannelBuffers.resize(channelcount);
+ for (int ch = 0; ch < channelcount; ch++) {
+ mChannelBuffers[ch].initBuffers(mBlockSize, mOverlapSize, mHalfFFTSize,
+ mSamplingRate, *this);
+ }
+
+ //dsp
+ fill_window(mVWindow, RDSP_WINDOW_HANNING_FLAT_TOP, mBlockSize, mOverlapSize);
+}
+
+void DPFrequency::updateParameters(ChannelBuffer &cb, int channelIndex) {
+ DPChannel *pChannel = getChannel(channelIndex);
+
+ if (pChannel == NULL) {
+ ALOGE("Error: updateParameters null DPChannel %d", channelIndex);
+ return;
+ }
+
+ //===Input Gain and preEq
+ {
+ bool changed = false;
+ IS_CHANGED(changed, cb.inputGainDb, pChannel->getInputGain());
+ //===EqPre
+ if (cb.mPreEqInUse) {
+ DPEq *pPreEq = pChannel->getPreEq();
+ if (pPreEq == NULL) {
+ ALOGE("Error: updateParameters null PreEq for channel: %d", channelIndex);
+ return;
+ }
+ IS_CHANGED(changed, cb.mPreEqEnabled, pPreEq->isEnabled());
+ if (cb.mPreEqEnabled) {
+ for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+ DPEqBand *pEqBand = pPreEq->getBand(b);
+ if (pEqBand == NULL) {
+ ALOGE("Error: updateParameters null PreEqBand for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+ IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+ IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+ pEqBand->getCutoffFrequency());
+ IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+ }
+ }
+ }
+
+ if (changed) {
+ float inputGainFactor = dBtoLinear(cb.inputGainDb);
+ if (cb.mPreEqInUse && cb.mPreEqEnabled) {
+ ALOGV("preEq changed, recomputing! channel %d", channelIndex);
+ size_t binNext = 0;
+ for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+
+ //frequency translation
+ cb.computeBinStartStop(*pEqBandParams, binNext);
+ binNext = pEqBandParams->binStop + 1;
+ float factor = dBtoLinear(pEqBandParams->gainDb);
+ if (!pEqBandParams->enabled) {
+ factor = inputGainFactor;
+ }
+ for (size_t k = pEqBandParams->binStart;
+ k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+ cb.mPreEqFactorVector[k] = factor * inputGainFactor;
+ }
+ }
+ } else {
+ ALOGV("only input gain changed, recomputing!");
+ //populate PreEq factor with input gain factor.
+ for (size_t k = 0; k < mHalfFFTSize; k++) {
+ cb.mPreEqFactorVector[k] = inputGainFactor;
+ }
+ }
+ }
+ } //inputGain and preEq
+
+ //===EqPost
+ if (cb.mPostEqInUse) {
+ bool changed = false;
+
+ DPEq *pPostEq = pChannel->getPostEq();
+ if (pPostEq == NULL) {
+ ALOGE("Error: updateParameters null postEq for channel: %d", channelIndex);
+ return; //failed.
+ }
+ IS_CHANGED(changed, cb.mPostEqEnabled, pPostEq->isEnabled());
+ if (cb.mPostEqEnabled) {
+ for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+ DPEqBand *pEqBand = pPostEq->getBand(b);
+ if (pEqBand == NULL) {
+ ALOGE("Error: updateParameters PostEqBand NULL for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+ IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+ IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+ pEqBand->getCutoffFrequency());
+ IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+ }
+ if (changed) {
+ ALOGV("postEq changed, recomputing! channel %d", channelIndex);
+ size_t binNext = 0;
+ for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+
+ //frequency translation
+ cb.computeBinStartStop(*pEqBandParams, binNext);
+ binNext = pEqBandParams->binStop + 1;
+ float factor = dBtoLinear(pEqBandParams->gainDb);
+ if (!pEqBandParams->enabled) {
+ factor = 1.0;
+ }
+ for (size_t k = pEqBandParams->binStart;
+ k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+ cb.mPostEqFactorVector[k] = factor;
+ }
+ }
+ }
+ } //enabled
+ }
+
+ //===MBC
+ if (cb.mMbcInUse) {
+ DPMbc *pMbc = pChannel->getMbc();
+ if (pMbc == NULL) {
+ ALOGE("Error: updateParameters Mbc NULL for channel: %d", channelIndex);
+ return;
+ }
+ cb.mMbcEnabled = pMbc->isEnabled();
+ if (cb.mMbcEnabled) {
+ bool changed = false;
+ for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+ DPMbcBand *pMbcBand = pMbc->getBand(b);
+ if (pMbcBand == NULL) {
+ ALOGE("Error: updateParameters MbcBand NULL for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+ pMbcBandParams->enabled = pMbcBand->isEnabled();
+ IS_CHANGED(changed, pMbcBandParams->freqCutoffHz,
+ pMbcBand->getCutoffFrequency());
+
+ pMbcBandParams->gainPreDb = pMbcBand->getPreGain();
+ pMbcBandParams->gainPostDb = pMbcBand->getPostGain();
+ pMbcBandParams->attackTimeMs = pMbcBand->getAttackTime();
+ pMbcBandParams->releaseTimeMs = pMbcBand->getReleaseTime();
+ pMbcBandParams->ratio = pMbcBand->getRatio();
+ pMbcBandParams->thresholdDb = pMbcBand->getThreshold();
+ pMbcBandParams->kneeWidthDb = pMbcBand->getKneeWidth();
+ pMbcBandParams->noiseGateThresholdDb = pMbcBand->getNoiseGateThreshold();
+ pMbcBandParams->expanderRatio = pMbcBand->getExpanderRatio();
+
+ }
+
+ if (changed) {
+ ALOGV("mbc changed, recomputing! channel %d", channelIndex);
+ size_t binNext= 0;
+ for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+
+ pMbcBandParams->previousEnvelope = 0;
+
+ //frequency translation
+ cb.computeBinStartStop(*pMbcBandParams, binNext);
+ binNext = pMbcBandParams->binStop + 1;
+ }
+
+ }
+
+ }
+ }
+}
+
+size_t DPFrequency::processSamples(const float *in, float *out, size_t samples) {
+ const float *pIn = in;
+ float *pOut = out;
+
+ int channelCount = mChannelBuffers.size();
+ if (channelCount < 1) {
+ ALOGW("warning: no Channels ready for processing");
+ return 0;
+ }
+
+ //**Check if parameters have changed and update
+ for (int ch = 0; ch < channelCount; ch++) {
+ updateParameters(mChannelBuffers[ch], ch);
+ }
+
+ //**separate into channels
+ for (size_t k = 0; k < samples; k += channelCount) {
+ for (int ch = 0; ch < channelCount; ch++) {
+ mChannelBuffers[ch].cBInput.write(*pIn++);
+ }
+ }
+
+ //TODO: lookahead limiters
+ //TODO: apply linked limiters to all channels.
+ //**Process each Channel
+ for (int ch = 0; ch < channelCount; ch++) {
+ processMono(mChannelBuffers[ch]);
+ }
+
+ //** estimate how much data is available in ALL channels
+ size_t available = mChannelBuffers[0].cBOutput.availableToRead();
+ for (int ch = 1; ch < channelCount; ch++) {
+ available = std::min(available, mChannelBuffers[ch].cBOutput.availableToRead());
+ }
+
+ //** make sure to output just what the buffer can handle
+ if (available > samples/channelCount) {
+ available = samples/channelCount;
+ }
+
+ //**Prepend zeroes if necessary
+ size_t fill = samples - (channelCount * available);
+ for (size_t k = 0; k < fill; k++) {
+ *pOut++ = 0;
+ }
+
+ //**interleave channels
+ for (size_t k = 0; k < available; k++) {
+ for (int ch = 0; ch < channelCount; ch++) {
+ *pOut++ = mChannelBuffers[ch].cBOutput.read();
+ }
+ }
+
+ return samples;
+}
+
+size_t DPFrequency::processMono(ChannelBuffer &cb) {
+
+ size_t processedSamples = 0;
+
+ size_t available = cb.cBInput.availableToRead();
+ while (available >= mBlockSize - mOverlapSize) {
+
+ //move tail of previous
+ for (unsigned int k = 0; k < mOverlapSize; ++k) {
+ cb.input[k] = cb.input[mBlockSize - mOverlapSize + k];
+ }
+
+ //read new available data
+ for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
+ cb.input[mOverlapSize + k] = cb.cBInput.read();
+ }
+
+ //## Actual process
+ processOneVector(cb.output, cb.input, cb);
+ //##End of Process
+
+ //mix tail (and capture new tail
+ for (unsigned int k = 0; k < mOverlapSize; k++) {
+ cb.output[k] += cb.outTail[k];
+ cb.outTail[k] = cb.output[mBlockSize - mOverlapSize + k]; //new tail
+ }
+
+ //output data
+ for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
+ cb.cBOutput.write(cb.output[k]);
+ }
+
+ available = cb.cBInput.availableToRead();
+ }
+
+ return processedSamples;
+}
+
+size_t DPFrequency::processOneVector(FloatVec & output, FloatVec & input,
+ ChannelBuffer &cb) {
+
+ //##apply window
+ Eigen::Map<Eigen::VectorXf> eWindow(&mVWindow[0], mVWindow.size());
+ Eigen::Map<Eigen::VectorXf> eInput(&input[0], input.size());
+
+ Eigen::VectorXf eWin = eInput.cwiseProduct(eWindow); //apply window
+
+ //##fft //TODO: refactor frequency transformations away from other stages.
+ mFftServer.fwd(mComplexTemp, eWin);
+
+ size_t cSize = mComplexTemp.size();
+ size_t maxBin = std::min(cSize/2, mHalfFFTSize);
+
+ //== EqPre (always runs)
+ for (size_t k = 0; k < maxBin; k++) {
+ mComplexTemp[k] *= cb.mPreEqFactorVector[k];
+ }
+
+ //== MBC
+ if (cb.mMbcInUse && cb.mMbcEnabled) {
+ for (size_t band = 0; band < cb.mMbcBands.size(); band++) {
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[band];
+ float fEnergySum = 0;
+
+ //apply pre gain.
+ float preGainFactor = dBtoLinear(pMbcBandParams->gainPreDb);
+ float preGainSquared = preGainFactor * preGainFactor;
+
+ for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+ float fReal = mComplexTemp[k].real();
+ float fImag = mComplexTemp[k].imag();
+ float fSquare = (fReal * fReal + fImag * fImag) * preGainSquared;
+
+ fEnergySum += fSquare;
+ }
+
+ fEnergySum = sqrt(fEnergySum /2.0);
+ float fTheta = 0.0;
+ float fFAtt = pMbcBandParams->attackTimeMs;
+ float fFRel = pMbcBandParams->releaseTimeMs;
+
+ float fUpdatesPerSecond = 10; //TODO: compute from framerate
+
+
+ if (fEnergySum > pMbcBandParams->previousEnvelope) {
+ fTheta = exp(-1.0 / (fFAtt * fUpdatesPerSecond));
+ } else {
+ fTheta = exp(-1.0 / (fFRel * fUpdatesPerSecond));
+ }
+
+ float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * pMbcBandParams->previousEnvelope;
+
+ //preserve for next iteration
+ pMbcBandParams->previousEnvelope = fEnv;
+
+ float fThreshold = dBtoLinear(pMbcBandParams->thresholdDb);
+ float fNoiseGateThreshold = dBtoLinear(pMbcBandParams->noiseGateThresholdDb);
+
+ float fNewFactor = 1.0;
+
+ if (fEnv > fThreshold) {
+ float fDbAbove = linearToDb(fThreshold / fEnv);
+ float fDbTarget = fDbAbove / pMbcBandParams->ratio;
+ float fDbChange = fDbAbove - fDbTarget;
+ fNewFactor = dBtoLinear(fDbChange);
+ } else if (fEnv < fNoiseGateThreshold) {
+ if (fEnv < MIN_ENVELOPE) {
+ fEnv = MIN_ENVELOPE;
+ }
+ float fDbBelow = linearToDb(fNoiseGateThreshold / fEnv);
+ float fDbTarget = fDbBelow / pMbcBandParams->expanderRatio;
+ float fDbChange = fDbBelow - fDbTarget;
+ fNewFactor = dBtoLinear(fDbChange);
+ }
+
+ //apply post gain.
+ fNewFactor *= dBtoLinear(pMbcBandParams->gainPostDb);
+
+ if (fNewFactor < 0) {
+ fNewFactor = 0;
+ }
+
+ //apply to this band
+ for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+ mComplexTemp[k] *= fNewFactor;
+ }
+
+ } //end per band process
+
+ } //end MBC
+
+ //== EqPost
+ if (cb.mPostEqInUse && cb.mPostEqEnabled) {
+ for (size_t k = 0; k < maxBin; k++) {
+ mComplexTemp[k] *= cb.mPostEqFactorVector[k];
+ }
+ }
+
+ //##ifft directly to output.
+ Eigen::Map<Eigen::VectorXf> eOutput(&output[0], output.size());
+ mFftServer.inv(eOutput, mComplexTemp);
+
+ return mBlockSize;
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.h b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
new file mode 100644
index 0000000..9919142
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef DPFREQUENCY_H_
+#define DPFREQUENCY_H_
+
+#include <Eigen/Dense>
+#include <unsupported/Eigen/FFT>
+
+#include "RDsp.h"
+#include "SHCircularBuffer.h"
+
+#include "DPBase.h"
+
+
+namespace dp_fx {
+
+using FXBuffer = SHCircularBuffer<float>;
+
+class ChannelBuffer {
+public:
+ FXBuffer cBInput; // Circular Buffer input
+ FXBuffer cBOutput; // Circular Buffer output
+ FloatVec input; // time domain temp vector for input
+ FloatVec output; // time domain temp vector for output
+ FloatVec outTail; // time domain temp vector for output tail (for overlap-add method)
+
+ //Current parameters
+ float inputGainDb;
+ struct BandParams {
+ bool enabled;
+ float freqCutoffHz;
+ size_t binStart;
+ size_t binStop;
+ };
+ struct EqBandParams : public BandParams {
+ float gainDb;
+ };
+ struct MbcBandParams : public BandParams {
+ float gainPreDb;
+ float gainPostDb;
+ float attackTimeMs;
+ float releaseTimeMs;
+ float ratio;
+ float thresholdDb;
+ float kneeWidthDb;
+ float noiseGateThresholdDb;
+ float expanderRatio;
+
+ //Historic values
+ float previousEnvelope;
+ };
+
+ bool mPreEqInUse;
+ bool mPreEqEnabled;
+ std::vector<EqBandParams> mPreEqBands;
+
+ bool mMbcInUse;
+ bool mMbcEnabled;
+ std::vector<MbcBandParams> mMbcBands;
+
+ bool mPostEqInUse;
+ bool mPostEqEnabled;
+ std::vector<EqBandParams> mPostEqBands;
+
+ bool mLimiterInUse;
+ bool mLimiterEnabled;
+ FloatVec mPreEqFactorVector; // temp pre-computed vector to shape spectrum at preEQ stage
+ FloatVec mPostEqFactorVector; // temp pre-computed vector to shape spectrum at postEQ stage
+
+ void initBuffers(unsigned int blockSize, unsigned int overlapSize, unsigned int halfFftSize,
+ unsigned int samplingRate, DPBase &dpBase);
+ void computeBinStartStop(BandParams &bp, size_t binStart);
+private:
+ unsigned int mSamplingRate;
+ unsigned int mBlockSize;
+
+};
+
+class DPFrequency : public DPBase {
+public:
+ virtual size_t processSamples(const float *in, float *out, size_t samples);
+ virtual void reset();
+ void configure(size_t blockSize, size_t overlapSize, size_t samplingRate);
+ static size_t getMinBockSize();
+ static size_t getMaxBockSize();
+
+private:
+ void updateParameters(ChannelBuffer &cb, int channelIndex);
+ size_t processMono(ChannelBuffer &cb);
+ size_t processOneVector(FloatVec &output, FloatVec &input, ChannelBuffer &cb);
+
+ size_t mBlockSize;
+ size_t mHalfFFTSize;
+ size_t mOverlapSize;
+ size_t mSamplingRate;
+
+ std::vector<ChannelBuffer> mChannelBuffers;
+
+ //dsp
+ FloatVec mVWindow; //window class.
+ Eigen::VectorXcf mComplexTemp;
+ Eigen::FFT<float> mFftServer;
+};
+
+} //namespace dp_fx
+
+#endif // DPFREQUENCY_H_
diff --git a/media/libeffects/dynamicsproc/dsp/RDsp.h b/media/libeffects/dynamicsproc/dsp/RDsp.h
new file mode 100644
index 0000000..1048442
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/RDsp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RDSP_H
+#define RDSP_H
+
+#include <complex>
+#include <log/log.h>
+#include <vector>
+using FloatVec = std::vector<float>;
+using ComplexVec = std::vector<std::complex<float>>;
+
+// =======
+// DSP window creation
+// =======
+
+#define TWOPI (M_PI * 2)
+
+enum rdsp_window_type {
+ RDSP_WINDOW_RECTANGULAR,
+ RDSP_WINDOW_TRIANGULAR,
+ RDSP_WINDOW_TRIANGULAR_FLAT_TOP,
+ RDSP_WINDOW_HAMMING,
+ RDSP_WINDOW_HAMMING_FLAT_TOP,
+ RDSP_WINDOW_HANNING,
+ RDSP_WINDOW_HANNING_FLAT_TOP,
+};
+
+template <typename T>
+static void fillRectangular(T &v) {
+ const size_t size = v.size();
+ for (size_t i = 0; i < size; i++) {
+ v[i] = 1.0;
+ }
+} //rectangular
+
+template <typename T>
+static void fillTriangular(T &v, size_t overlap) {
+ const size_t size = v.size();
+ //ramp up
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = (2.0 * i + 1) / (2 * overlap);
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ v[i] = (2.0 * (size - i) - 1) / (2 * overlap);
+ }
+ }
+} //triangular
+
+template <typename T>
+static void fillHamming(T &v, size_t overlap) {
+ const size_t size = v.size();
+ const size_t twoOverlap = 2 * overlap;
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = 0.54 - 0.46 * cos(TWOPI * i /(twoOverlap - 1));
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ int k = i - ((int)size - 2 * overlap);
+ v[i] = 0.54 - 0.46 * cos(TWOPI * k / (twoOverlap - 1));
+ }
+ }
+} //hamming
+
+template <typename T>
+static void fillHanning(T &v, size_t overlap) {
+ const size_t size = v.size();
+ const size_t twoOverlap = 2 * overlap;
+ //ramp up
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = 0.5 * (1.0 - cos(TWOPI * i / (twoOverlap - 1)));
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ int k = i - ((int)size - 2 * overlap);
+ v[i] = 0.5 * (1.0 - cos(TWOPI * k / (twoOverlap - 1)));
+ }
+ }
+}
+
+template <typename T>
+static void fill_window(T &v, int type, size_t size, size_t overlap) {
+ if (overlap > size / 2) {
+ overlap = size / 2;
+ }
+ v.resize(size);
+
+ switch (type) {
+ case RDSP_WINDOW_RECTANGULAR:
+ fillRectangular(v);
+ break;
+ case RDSP_WINDOW_TRIANGULAR:
+ fillTriangular(v, size / 2);
+ break;
+ case RDSP_WINDOW_TRIANGULAR_FLAT_TOP:
+ fillTriangular(v, overlap);
+ break;
+ case RDSP_WINDOW_HAMMING:
+ fillHamming(v, size / 2);
+ break;
+ case RDSP_WINDOW_HAMMING_FLAT_TOP:
+ fillHamming(v, overlap);
+ break;
+ case RDSP_WINDOW_HANNING:
+ fillHanning(v, size / 2);
+ break;
+ case RDSP_WINDOW_HANNING_FLAT_TOP:
+ fillHanning(v, overlap);
+ break;
+ default:
+ ALOGE("Error: unknown window type %d", type);
+ }
+}
+
+//};
+#endif //RDSP_H
diff --git a/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
new file mode 100644
index 0000000..c139cd8
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHCIRCULARBUFFER_H
+#define SHCIRCULARBUFFER_H
+
+#include <log/log.h>
+#include <vector>
+
+template <class T>
+class SHCircularBuffer {
+
+public:
+ SHCircularBuffer() : mReadIndex(0), mWriteIndex(0), mReadAvailable(0) {
+ }
+
+ explicit SHCircularBuffer(size_t maxSize) {
+ resize(maxSize);
+ }
+ void resize(size_t maxSize) {
+ mBuffer.resize(maxSize);
+ mReadIndex = 0;
+ mWriteIndex = 0;
+ mReadAvailable = 0;
+ }
+ inline void write(T value) {
+ if (availableToWrite()) {
+ mBuffer[mWriteIndex++] = value;
+ if (mWriteIndex >= getSize()) {
+ mWriteIndex = 0;
+ }
+ mReadAvailable++;
+ } else {
+ ALOGE("Error: SHCircularBuffer no space to write. allocated size %zu ", getSize());
+ }
+ }
+ inline T read() {
+ T value = T();
+ if (availableToRead()) {
+ value = mBuffer[mReadIndex++];
+ if (mReadIndex >= getSize()) {
+ mReadIndex = 0;
+ }
+ mReadAvailable--;
+ } else {
+ ALOGW("Warning: SHCircularBuffer no data available to read. Default value returned");
+ }
+ return value;
+ }
+ inline size_t availableToRead() const {
+ return mReadAvailable;
+ }
+ inline size_t availableToWrite() const {
+ return getSize() - mReadAvailable;
+ }
+ inline size_t getSize() const {
+ return mBuffer.size();
+ }
+
+private:
+ std::vector<T> mBuffer;
+ size_t mReadIndex;
+ size_t mWriteIndex;
+ size_t mReadAvailable;
+};
+
+
+#endif //SHCIRCULARBUFFER_H
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 03700bf..a3db754 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -333,6 +333,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANT),
TERMINATOR
};
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5ad4c01..b874df4 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <inttypes.h>
+#include <stdlib.h>
#include "include/SecureBuffer.h"
#include "include/SharedMemoryBuffer.h"
@@ -88,6 +89,22 @@
static const char *kCodecMaxHeight = "android.media.mediacodec.maxheight"; /* 0..n */
static const char *kCodecError = "android.media.mediacodec.errcode";
static const char *kCodecErrorState = "android.media.mediacodec.errstate";
+static const char *kCodecLatencyMax = "android.media.mediacodec.latency.max"; /* in us */
+static const char *kCodecLatencyMin = "android.media.mediacodec.latency.min"; /* in us */
+static const char *kCodecLatencyAvg = "android.media.mediacodec.latency.avg"; /* in us */
+static const char *kCodecLatencyCount = "android.media.mediacodec.latency.n";
+static const char *kCodecLatencyHist = "android.media.mediacodec.latency.hist"; /* in us */
+static const char *kCodecLatencyUnknown = "android.media.mediacodec.latency.unknown";
+
+// the kCodecRecent* fields appear only in getMetrics() results
+static const char *kCodecRecentLatencyMax = "android.media.mediacodec.recent.max"; /* in us */
+static const char *kCodecRecentLatencyMin = "android.media.mediacodec.recent.min"; /* in us */
+static const char *kCodecRecentLatencyAvg = "android.media.mediacodec.recent.avg"; /* in us */
+static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
+static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist"; /* in us */
+
+// XXX suppress until we get our representation right
+static bool kEmitHistogram = false;
static int64_t getId(const sp<IResourceManagerClient> &client) {
@@ -506,12 +523,14 @@
mDequeueOutputTimeoutGeneration(0),
mDequeueOutputReplyID(0),
mHaveInputSurface(false),
- mHavePendingInputBuffers(false) {
+ mHavePendingInputBuffers(false),
+ mLatencyUnknown(0) {
if (uid == kNoUid) {
mUid = IPCThreadState::self()->getCallingUid();
} else {
mUid = uid;
}
+
initAnalyticsItem();
}
@@ -523,16 +542,90 @@
}
void MediaCodec::initAnalyticsItem() {
- CHECK(mAnalyticsItem == NULL);
- // set up our new record, get a sessionID, put it into the in-progress list
- mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
- if (mAnalyticsItem != NULL) {
- // don't record it yet; only at the end, when we have decided that we have
- // data worth writing (e.g. .count() > 0)
+ if (mAnalyticsItem == NULL) {
+ mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
+ }
+
+ mLatencyHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
+
+ {
+ Mutex::Autolock al(mRecentLock);
+ for (int i = 0; i<kRecentLatencyFrames; i++) {
+ mRecentSamples[i] = kRecentSampleInvalid;
+ }
+ mRecentHead = 0;
+ }
+}
+
+void MediaCodec::updateAnalyticsItem() {
+ ALOGV("MediaCodec::updateAnalyticsItem");
+ if (mAnalyticsItem == NULL) {
+ return;
+ }
+
+ if (mLatencyHist.getCount() != 0 ) {
+ mAnalyticsItem->setInt64(kCodecLatencyMax, mLatencyHist.getMax());
+ mAnalyticsItem->setInt64(kCodecLatencyMin, mLatencyHist.getMin());
+ mAnalyticsItem->setInt64(kCodecLatencyAvg, mLatencyHist.getAvg());
+ mAnalyticsItem->setInt64(kCodecLatencyCount, mLatencyHist.getCount());
+
+ if (kEmitHistogram) {
+ // and the histogram itself
+ std::string hist = mLatencyHist.emit();
+ mAnalyticsItem->setCString(kCodecLatencyHist, hist.c_str());
+ }
+ }
+ if (mLatencyUnknown > 0) {
+ mAnalyticsItem->setInt64(kCodecLatencyUnknown, mLatencyUnknown);
+ }
+
+#if 0
+ // enable for short term, only while debugging
+ updateEphemeralAnalytics(mAnalyticsItem);
+#endif
+}
+
+void MediaCodec::updateEphemeralAnalytics(MediaAnalyticsItem *item) {
+ ALOGD("MediaCodec::updateEphemeralAnalytics()");
+
+ if (item == NULL) {
+ return;
+ }
+
+ Histogram recentHist;
+
+ // build an empty histogram
+ recentHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
+
+ // stuff it with the samples in the ring buffer
+ {
+ Mutex::Autolock al(mRecentLock);
+
+ for (int i=0; i<kRecentLatencyFrames; i++) {
+ if (mRecentSamples[i] != kRecentSampleInvalid) {
+ recentHist.insert(mRecentSamples[i]);
+ }
+ }
+ }
+
+
+ // spit the data (if any) into the supplied analytics record
+ if (recentHist.getCount()!= 0 ) {
+ item->setInt64(kCodecRecentLatencyMax, recentHist.getMax());
+ item->setInt64(kCodecRecentLatencyMin, recentHist.getMin());
+ item->setInt64(kCodecRecentLatencyAvg, recentHist.getAvg());
+ item->setInt64(kCodecRecentLatencyCount, recentHist.getCount());
+
+ if (kEmitHistogram) {
+ // and the histogram itself
+ std::string hist = recentHist.emit();
+ item->setCString(kCodecRecentLatencyHist, hist.c_str());
+ }
}
}
void MediaCodec::flushAnalyticsItem() {
+ updateAnalyticsItem();
if (mAnalyticsItem != NULL) {
// don't log empty records
if (mAnalyticsItem->count() > 0) {
@@ -543,6 +636,190 @@
}
}
+bool MediaCodec::Histogram::setup(int nbuckets, int64_t width, int64_t floor)
+{
+ if (nbuckets <= 0 || width <= 0) {
+ return false;
+ }
+
+ // get histogram buckets
+ if (nbuckets == mBucketCount && mBuckets != NULL) {
+ // reuse our existing buffer
+ memset(mBuckets, 0, sizeof(*mBuckets) * mBucketCount);
+ } else {
+ // get a new pre-zeroed buffer
+ int64_t *newbuckets = (int64_t *)calloc(nbuckets, sizeof (*mBuckets));
+ if (newbuckets == NULL) {
+ goto bad;
+ }
+ if (mBuckets != NULL)
+ free(mBuckets);
+ mBuckets = newbuckets;
+ }
+
+ mWidth = width;
+ mFloor = floor;
+ mCeiling = floor + nbuckets * width;
+ mBucketCount = nbuckets;
+
+ mMin = INT64_MAX;
+ mMax = INT64_MIN;
+ mSum = 0;
+ mCount = 0;
+ mBelow = mAbove = 0;
+
+ return true;
+
+ bad:
+ if (mBuckets != NULL) {
+ free(mBuckets);
+ mBuckets = NULL;
+ }
+
+ return false;
+}
+
+void MediaCodec::Histogram::insert(int64_t sample)
+{
+ // histogram is not set up
+ if (mBuckets == NULL) {
+ return;
+ }
+
+ mCount++;
+ mSum += sample;
+ if (mMin > sample) mMin = sample;
+ if (mMax < sample) mMax = sample;
+
+ if (sample < mFloor) {
+ mBelow++;
+ } else if (sample >= mCeiling) {
+ mAbove++;
+ } else {
+ int64_t slot = (sample - mFloor) / mWidth;
+ CHECK(slot < mBucketCount);
+ mBuckets[slot]++;
+ }
+ return;
+}
+
+std::string MediaCodec::Histogram::emit()
+{
+ std::string value;
+ char buffer[64];
+
+ // emits: width,Below{bucket0,bucket1,...., bucketN}above
+ // unconfigured will emit: 0,0{}0
+ // XXX: is this best representation?
+ snprintf(buffer, sizeof(buffer), "%" PRId64 ",%" PRId64 ",%" PRId64 "{",
+ mFloor, mWidth, mBelow);
+ value = buffer;
+ for (int i = 0; i < mBucketCount; i++) {
+ if (i != 0) {
+ value = value + ",";
+ }
+ snprintf(buffer, sizeof(buffer), "%" PRId64, mBuckets[i]);
+ value = value + buffer;
+ }
+ snprintf(buffer, sizeof(buffer), "}%" PRId64 , mAbove);
+ value = value + buffer;
+ return value;
+}
+
+// when we send a buffer to the codec;
+void MediaCodec::statsBufferSent(int64_t presentationUs) {
+
+ // only enqueue if we have a legitimate time
+ if (presentationUs <= 0) {
+ ALOGV("presentation time: %" PRId64, presentationUs);
+ return;
+ }
+
+ const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
+ BufferFlightTiming_t startdata = { presentationUs, nowNs };
+
+ {
+ // mutex access to mBuffersInFlight and other stats
+ Mutex::Autolock al(mLatencyLock);
+
+
+ // XXX: we *could* make sure that the time is later than the end of queue
+ // as part of a consistency check...
+ mBuffersInFlight.push_back(startdata);
+ }
+}
+
+// when we get a buffer back from the codec
+void MediaCodec::statsBufferReceived(int64_t presentationUs) {
+
+ CHECK_NE(mState, UNINITIALIZED);
+
+ // mutex access to mBuffersInFlight and other stats
+ Mutex::Autolock al(mLatencyLock);
+
+ // how long this buffer took for the round trip through the codec
+ // NB: pipelining can/will make these times larger. e.g., if each packet
+ // is always 2 msec and we have 3 in flight at any given time, we're going to
+ // see "6 msec" as an answer.
+
+ // ignore stuff with no presentation time
+ if (presentationUs <= 0) {
+ ALOGD("-- returned buffer has bad timestamp %" PRId64 ", ignore it", presentationUs);
+ mLatencyUnknown++;
+ return;
+ }
+
+ BufferFlightTiming_t startdata;
+ bool valid = false;
+ while (mBuffersInFlight.size() > 0) {
+ startdata = *mBuffersInFlight.begin();
+ ALOGV("-- Looking at startdata. presentation %" PRId64 ", start %" PRId64,
+ startdata.presentationUs, startdata.startedNs);
+ if (startdata.presentationUs == presentationUs) {
+ // a match
+ ALOGV("-- match entry for %" PRId64 ", hits our frame of %" PRId64,
+ startdata.presentationUs, presentationUs);
+ mBuffersInFlight.pop_front();
+ valid = true;
+ break;
+ } else if (startdata.presentationUs < presentationUs) {
+ // we must have missed the match for this, drop it and keep looking
+ ALOGV("-- drop entry for %" PRId64 ", before our frame of %" PRId64,
+ startdata.presentationUs, presentationUs);
+ mBuffersInFlight.pop_front();
+ continue;
+ } else {
+ // head is after, so we don't have a frame for ourselves
+ ALOGV("-- found entry for %" PRId64 ", AFTER our frame of %" PRId64
+ " we have nothing to pair with",
+ startdata.presentationUs, presentationUs);
+ mLatencyUnknown++;
+ return;
+ }
+ }
+ if (!valid) {
+ ALOGV("-- empty queue, so ignore that.");
+ mLatencyUnknown++;
+ return;
+ }
+
+ // nowNs start our calculations
+ const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
+ int64_t latencyUs = (nowNs - startdata.startedNs + 500) / 1000;
+
+ mLatencyHist.insert(latencyUs);
+
+ // push into the recent samples
+ {
+ Mutex::Autolock al(mRecentLock);
+
+ if (mRecentHead >= kRecentLatencyFrames) {
+ mRecentHead = 0;
+ }
+ mRecentSamples[mRecentHead++] = latencyUs;
+ }
+}
+
// static
status_t MediaCodec::PostAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response) {
@@ -778,7 +1055,6 @@
msg->setPointer("descrambler", descrambler.get());
}
if (mAnalyticsItem != NULL) {
- // XXX: save indication that it's crypto in some way...
mAnalyticsItem->setInt32(kCodecCrypto, 1);
}
} else if (mFlags & kFlagIsSecure) {
@@ -1245,11 +1521,14 @@
return UNKNOWN_ERROR;
}
- // XXX: go get current values for whatever in-flight data we want
+ // update any in-flight data that's not carried within the record
+ updateAnalyticsItem();
// send it back to the caller.
reply = mAnalyticsItem->dup();
+ updateEphemeralAnalytics(reply);
+
return OK;
}
@@ -1435,6 +1714,8 @@
int64_t timeUs;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ statsBufferReceived(timeUs);
+
response->setInt64("timeUs", timeUs);
int32_t flags;
@@ -2919,6 +3200,9 @@
Mutex::Autolock al(mBufferLock);
info->mOwnedByClient = false;
info->mData.clear();
+
+ statsBufferSent(timeUs);
+
if (mAnalyticsItem != NULL) {
mAnalyticsItem->addInt64(kCodecBytesIn, size);
}
@@ -3138,6 +3422,8 @@
msg->setInt64("timeUs", timeUs);
+ statsBufferReceived(timeUs);
+
int32_t flags;
CHECK(buffer->meta()->findInt32("flags", &flags));
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
index d0b72b7..13b6d05 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
@@ -45,9 +45,11 @@
OMX_COMPONENTTYPE **component)
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mFLACDecoder(NULL),
- mHasStreamInfo(false),
mInputBufferCount(0),
+ mHasStreamInfo(false),
mSignalledError(false),
+ mSawInputEOS(false),
+ mFinishedDecoder(false),
mOutputPortSettingsChange(NONE) {
ALOGV("ctor:");
memset(&mStreamInfo, 0, sizeof(mStreamInfo));
@@ -292,7 +294,6 @@
}
void SoftFlacDecoder::onQueueFilled(OMX_U32 /* portIndex */) {
- ALOGV("onQueueFilled:");
if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
@@ -300,96 +301,101 @@
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ ALOGV("onQueueFilled %d/%d:", inQueue.empty(), outQueue.empty());
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty()) {
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- uint8_t* inBuffer = inHeader->pBuffer + inHeader->nOffset;
- uint32_t inBufferLength = inHeader->nFilledLen;
- bool endOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+ short *outBuffer = reinterpret_cast<short *>(outHeader->pBuffer + outHeader->nOffset);
+ size_t outBufferSize = outHeader->nAllocLen - outHeader->nOffset;
+ int64_t timeStamp = 0;
- if (inHeader->nFilledLen == 0) {
- if (endOfInput) {
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- outInfo->mOwnedByUs = false;
- outQueue.erase(outQueue.begin());
- notifyFillBufferDone(outHeader);
- } else {
- ALOGE("onQueueFilled: emptyInputBuffer received");
+ if (!inQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ uint8_t* inBuffer = inHeader->pBuffer + inHeader->nOffset;
+ uint32_t inBufferLength = inHeader->nFilledLen;
+ ALOGV("input: %u bytes", inBufferLength);
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw EOS");
+ mSawInputEOS = true;
}
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- notifyEmptyBufferDone(inHeader);
- return;
- }
- if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
- ALOGE("onQueueFilled: first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
- inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
- }
- if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
- status_t decoderErr = mFLACDecoder->parseMetadata(inBuffer, inBufferLength);
- mInputBufferCount++;
- if (decoderErr != OK && decoderErr != WOULD_BLOCK) {
- ALOGE("onQueueFilled: FLACDecoder parseMetaData returns error %d", decoderErr);
+ if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+ ALOGE("onQueueFilled: first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+ inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+ }
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+ ALOGV("received config buffer of size %u", inBufferLength);
+ status_t decoderErr = mFLACDecoder->parseMetadata(inBuffer, inBufferLength);
+ mInputBufferCount++;
+
+ if (decoderErr != OK && decoderErr != WOULD_BLOCK) {
+ ALOGE("onQueueFilled: FLACDecoder parseMetaData returns error %d", decoderErr);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
+ return;
+ }
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+
+ if (decoderErr == WOULD_BLOCK) {
+ continue;
+ }
+ mStreamInfo = mFLACDecoder->getStreamInfo();
+ mHasStreamInfo = true;
+
+ // Only send out port settings changed event if both sample rate
+ // and numChannels are valid.
+ if (mStreamInfo.sample_rate && mStreamInfo.channels) {
+ ALOGD("onQueueFilled: initially configuring decoder: %d Hz, %d channels",
+ mStreamInfo.sample_rate, mStreamInfo.channels);
+
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ }
+ return;
+ }
+
+ status_t decoderErr = mFLACDecoder->decodeOneFrame(
+ inBuffer, inBufferLength, outBuffer, &outBufferSize);
+ if (decoderErr != OK) {
+ ALOGE("onQueueFilled: FLACDecoder decodeOneFrame returns error %d", decoderErr);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
return;
}
+ mInputBufferCount++;
+ timeStamp = inHeader->nTimeStamp;
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
notifyEmptyBufferDone(inHeader);
- if (decoderErr == WOULD_BLOCK) {
+ if (outBufferSize == 0) {
+ ALOGV("no output, trying again");
continue;
}
- mStreamInfo = mFLACDecoder->getStreamInfo();
- mHasStreamInfo = true;
-
- // Only send out port settings changed event if both sample rate
- // and numChannels are valid.
- if (mStreamInfo.sample_rate && mStreamInfo.channels) {
- ALOGD("onQueueFilled: initially configuring decoder: %d Hz, %d channels",
- mStreamInfo.sample_rate, mStreamInfo.channels);
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
+ } else if (mSawInputEOS && !mFinishedDecoder) {
+ status_t decoderErr = mFLACDecoder->decodeOneFrame(NULL, 0, outBuffer, &outBufferSize);
+ mFinishedDecoder = true;
+ if (decoderErr != OK) {
+ ALOGE("onQueueFilled: FLACDecoder finish returns error %d", decoderErr);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
+ return;
}
- return;
- }
-
- short *outBuffer =
- reinterpret_cast<short *>(outHeader->pBuffer + outHeader->nOffset);
- size_t outBufferSize = outHeader->nAllocLen - outHeader->nOffset;
-
- status_t decoderErr = mFLACDecoder->decodeOneFrame(
- inBuffer, inBufferLength, outBuffer, &outBufferSize);
- if (decoderErr != OK) {
- ALOGE("onQueueFilled: FLACDecoder decodeOneFrame returns error %d", decoderErr);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
- return;
- }
-
- mInputBufferCount++;
- int64_t ts = inHeader->nTimeStamp;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- notifyEmptyBufferDone(inHeader);
-
- if (endOfInput) {
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- } else if (outBufferSize == 0) {
- continue;
} else {
- outHeader->nFlags = 0;
+ ALOGE("no input buffer but did not get EOS");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
+ return;
}
outHeader->nFilledLen = outBufferSize;
- outHeader->nTimeStamp = ts;
+ outHeader->nTimeStamp = timeStamp;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
index 0f17ed8..b63f7ad 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
@@ -52,9 +52,11 @@
FLACDecoder *mFLACDecoder;
FLAC__StreamMetadata_StreamInfo mStreamInfo;
- bool mHasStreamInfo;
size_t mInputBufferCount;
+ bool mHasStreamInfo;
bool mSignalledError;
+ bool mSawInputEOS;
+ bool mFinishedDecoder;
enum {
NONE,
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index 56d2d69..a0e46c3 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -56,12 +56,13 @@
mCompressionLevel(FLAC_COMPRESSION_LEVEL_DEFAULT),
mEncoderWriteData(false),
mEncoderReturnedEncodedData(false),
+ mSawInputEOS(false),
+ mSentOutputEOS(false),
mEncoderReturnedNbBytes(0),
- mInputBufferPcm32(NULL)
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
- , mHeaderOffset(0)
- , mWroteHeader(false)
-#endif
+ mInputBufferPcm32(NULL),
+ mHeaderOffset(0),
+ mHeaderComplete(false),
+ mWroteHeader(false)
{
ALOGV("SoftFlacEncoder::SoftFlacEncoder(name=%s)", name);
initPorts();
@@ -354,55 +355,55 @@
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ FLAC__bool ok = true;
+
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty()) {
+ if (!inQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw EOS on buffer of size %u", inHeader->nFilledLen);
+ mSawInputEOS = true;
+ }
+
+ if (inHeader->nFilledLen > kMaxInputBufferSize) {
+ ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ assert(mNumChannels != 0);
+ mEncoderWriteData = true;
+ mEncoderReturnedEncodedData = false;
+ mEncoderReturnedNbBytes = 0;
+ mCurrentInputTimeStamp = inHeader->nTimeStamp;
+
+ const unsigned nbInputFrames = inHeader->nFilledLen / (2 * mNumChannels);
+ const unsigned nbInputSamples = inHeader->nFilledLen / 2;
+ const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer);
+
+ CHECK_LE(nbInputSamples, 2 * kMaxNumSamplesPerFrame);
+ for (unsigned i=0 ; i < nbInputSamples ; i++) {
+ mInputBufferPcm32[i] = (FLAC__int32) pcm16[i];
+ }
+ ALOGV(" about to encode %u samples per channel", nbInputFrames);
+ ok = FLAC__stream_encoder_process_interleaved(
+ mFlacStreamEncoder,
+ mInputBufferPcm32,
+ nbInputFrames /*samples per channel*/ );
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
-
- return;
- }
-
- if (inHeader->nFilledLen > kMaxInputBufferSize) {
- ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
- }
-
- assert(mNumChannels != 0);
- mEncoderWriteData = true;
- mEncoderReturnedEncodedData = false;
- mEncoderReturnedNbBytes = 0;
- mCurrentInputTimeStamp = inHeader->nTimeStamp;
-
- const unsigned nbInputFrames = inHeader->nFilledLen / (2 * mNumChannels);
- const unsigned nbInputSamples = inHeader->nFilledLen / 2;
- const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer);
-
- CHECK_LE(nbInputSamples, 2 * kMaxNumSamplesPerFrame);
- for (unsigned i=0 ; i < nbInputSamples ; i++) {
- mInputBufferPcm32[i] = (FLAC__int32) pcm16[i];
- }
- ALOGV(" about to encode %u samples per channel", nbInputFrames);
- FLAC__bool ok = FLAC__stream_encoder_process_interleaved(
- mFlacStreamEncoder,
- mInputBufferPcm32,
- nbInputFrames /*samples per channel*/ );
-
if (ok) {
if (mEncoderReturnedEncodedData && (mEncoderReturnedNbBytes != 0)) {
ALOGV(" dequeueing buffer on output port after writing data");
@@ -414,6 +415,21 @@
mEncoderReturnedEncodedData = false;
} else {
ALOGV(" encoder process_interleaved returned without data to write");
+ if (mSawInputEOS && !mSentOutputEOS) {
+ ALOGV("finishing encoder");
+ mSentOutputEOS = true;
+ FLAC__stream_encoder_finish(mFlacStreamEncoder);
+ if (mEncoderReturnedEncodedData && (mEncoderReturnedNbBytes != 0)) {
+ ALOGV(" dequeueing residual buffer on output port after writing data");
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ mEncoderReturnedEncodedData = false;
+ }
+ }
}
} else {
ALOGE(" error encountered during encoding");
@@ -422,11 +438,6 @@
return;
}
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
}
}
@@ -438,16 +449,22 @@
ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%zu, samples=%u, curr_frame=%u)",
bytes, samples, current_frame);
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
if (samples == 0) {
- ALOGI(" saving %zu bytes of header", bytes);
- memcpy(mHeader + mHeaderOffset, buffer, bytes);
- mHeaderOffset += bytes;// will contain header size when finished receiving header
+ ALOGV("saving %zu bytes of header", bytes);
+ if (mHeaderOffset + bytes > sizeof(mHeader) || mHeaderComplete) {
+ ALOGW("header is too big, or header already received");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ } else {
+ memcpy(mHeader + mHeaderOffset, buffer, bytes);
+ mHeaderOffset += bytes;// will contain header size when finished receiving header
+ if (buffer[0] & 0x80) {
+ mHeaderComplete = true;
+ }
+ }
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}
-#endif
-
if ((samples == 0) || !mEncoderWriteData) {
// called by the encoder because there's header data to save, but it's not the role
// of this component (unless WRITE_FLAC_HEADER_IN_FIRST_BUFFER is defined)
@@ -460,16 +477,23 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
- if (!mWroteHeader) {
- ALOGI(" writing %d bytes of header on output port", mHeaderOffset);
+ if (mHeaderComplete && !mWroteHeader) {
+ ALOGV(" writing %d bytes of header on output port", mHeaderOffset);
memcpy(outHeader->pBuffer + outHeader->nOffset + outHeader->nFilledLen,
mHeader, mHeaderOffset);
outHeader->nFilledLen += mHeaderOffset;
- outHeader->nOffset += mHeaderOffset;
mWroteHeader = true;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
+ notifyFillBufferDone(outHeader);
+ outInfo = NULL;
+ outHeader = NULL;
+ // get the next buffer for the rest of the data
+ CHECK(!outQueue.empty());
+ outInfo = *outQueue.begin();
+ outHeader = outInfo->mHeader;
}
-#endif
// write encoded data
ALOGV(" writing %zu bytes of encoded data on output port", bytes);
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
index f4f0655..64a6b1e 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
@@ -22,10 +22,6 @@
#include "FLAC/stream_encoder.h"
-// use this symbol to have the first output buffer start with FLAC frame header so a dump of
-// all the output buffers can be opened as a .flac file
-//#define WRITE_FLAC_HEADER_IN_FIRST_BUFFER
-
namespace android {
struct SoftFlacEncoder : public SimpleSoftOMXComponent {
@@ -62,6 +58,8 @@
// should the data received by the callback be written to the output port
bool mEncoderWriteData;
bool mEncoderReturnedEncodedData;
+ bool mSawInputEOS;
+ bool mSentOutputEOS;
size_t mEncoderReturnedNbBytes;
OMX_TICKS mCurrentInputTimeStamp;
@@ -85,11 +83,10 @@
// before passing the input data to the encoder
FLAC__int32* mInputBufferPcm32;
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
unsigned mHeaderOffset;
+ bool mHeaderComplete;
bool mWroteHeader;
char mHeader[128];
-#endif
DISALLOW_EVIL_CONSTRUCTORS(SoftFlacEncoder);
};
diff --git a/media/libstagefright/flac/dec/FLACDecoder.cpp b/media/libstagefright/flac/dec/FLACDecoder.cpp
index e0e9211..a2b6ab7 100644
--- a/media/libstagefright/flac/dec/FLACDecoder.cpp
+++ b/media/libstagefright/flac/dec/FLACDecoder.cpp
@@ -423,22 +423,16 @@
short *outBuffer, size_t *outBufferLen) {
ALOGV("decodeOneFrame: input size(%zu)", inBufferLen);
- if (inBufferLen == 0) {
- ALOGV("decodeOneFrame: no input data");
- if (outBufferLen) {
- *outBufferLen = 0;
- }
- return OK;
- }
-
if (!mStreamInfoValid) {
ALOGW("decodeOneFrame: no streaminfo metadata block");
}
- status_t err = addDataToBuffer(inBuffer, inBufferLen);
- if (err != OK) {
- ALOGW("decodeOneFrame: addDataToBuffer returns error %d", err);
- return err;
+ if (inBufferLen != 0) {
+ status_t err = addDataToBuffer(inBuffer, inBufferLen);
+ if (err != OK) {
+ ALOGW("decodeOneFrame: addDataToBuffer returns error %d", err);
+ return err;
+ }
}
mWriteRequested = true;
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index f55de64..3b84018 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -944,4 +944,47 @@
return mItems[index].mName;
}
+status_t AMessage::setEntryNameAt(size_t index, const char *name) {
+ if (index >= mNumItems) {
+ return BAD_INDEX;
+ }
+ if (name == nullptr) {
+ return BAD_VALUE;
+ }
+ if (!strcmp(name, mItems[index].mName)) {
+ return OK; // name has not changed
+ }
+ size_t len = strlen(name);
+ if (findItemIndex(name, len) < mNumItems) {
+ return ALREADY_EXISTS;
+ }
+ delete[] mItems[index].mName;
+ mItems[index].mName = nullptr;
+ mItems[index].setName(name, len);
+ return OK;
+}
+
+status_t AMessage::removeEntryAt(size_t index) {
+ if (index >= mNumItems) {
+ return BAD_INDEX;
+ }
+ // delete entry data and objects
+ --mNumItems;
+ delete[] mItems[index].mName;
+ mItems[index].mName = nullptr;
+ freeItemValue(&mItems[index]);
+
+ // swap entry with last entry and clear last entry's data
+ if (index < mNumItems) {
+ mItems[index] = mItems[mNumItems];
+ mItems[mNumItems].mName = nullptr;
+ mItems[mNumItems].mType = kTypeInt32;
+ }
+ return OK;
+}
+
+size_t AMessage::findEntryByName(const char *name) const {
+ return name == nullptr ? countEntries() : findItemIndex(name, strlen(name));
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index b343c16..f663542 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -24,10 +24,12 @@
header_libs: [
"libhardware_headers",
"libstagefright_foundation_headers",
+ "media_plugin_headers",
],
export_header_lib_headers: [
"libstagefright_foundation_headers",
+ "media_plugin_headers",
],
export_shared_lib_headers: [
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 8580eb5..d90a0de 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -183,6 +183,36 @@
size_t countEntries() const;
const char *getEntryNameAt(size_t index, Type *type) const;
+ /**
+ * Finds an entry by name and returns its index.
+ *
+ * \retval countEntries() if the entry is not found.
+ */
+ size_t findEntryByName(const char *name) const;
+
+ /**
+ * Sets the name of an entry based on index.
+ *
+ * \param index index of the entry
+ * \param name (new) name of the entry
+ *
+ * \retval OK the name was set successfully
+ * \retval BAD_INDEX invalid index
+ * \retval BAD_VALUE name is invalid (null)
+ * \retval ALREADY_EXISTS name is already used by another entry
+ */
+ status_t setEntryNameAt(size_t index, const char *name);
+
+ /**
+ * Removes an entry based on index.
+ *
+ * \param index index of the entry
+ *
+ * \retval OK the entry was removed successfully
+ * \retval BAD_INDEX invalid index
+ */
+ status_t removeEntryAt(size_t index);
+
protected:
virtual ~AMessage();
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index ef8de1f..48a1224 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -320,7 +320,9 @@
MediaAnalyticsItem *mAnalyticsItem;
void initAnalyticsItem();
+ void updateAnalyticsItem();
void flushAnalyticsItem();
+ void updateEphemeralAnalytics(MediaAnalyticsItem *item);
sp<AMessage> mOutputFormat;
sp<AMessage> mInputFormat;
@@ -441,6 +443,63 @@
void onReleaseCrypto(const sp<AMessage>& msg);
+ // managing time-of-flight aka latency
+ typedef struct {
+ int64_t presentationUs;
+ int64_t startedNs;
+ } BufferFlightTiming_t;
+ std::deque<BufferFlightTiming_t> mBuffersInFlight;
+ Mutex mLatencyLock;
+ int64_t mLatencyUnknown; // buffers for which we couldn't calculate latency
+
+ void statsBufferSent(int64_t presentationUs);
+ void statsBufferReceived(int64_t presentationUs);
+
+ enum {
+ // the default shape of our latency histogram buckets
+ // XXX: should these be configurable in some way?
+ kLatencyHistBuckets = 20,
+ kLatencyHistWidth = 2000,
+ kLatencyHistFloor = 2000,
+
+ // how many samples are in the 'recent latency' histogram
+ // 300 frames = 5 sec @ 60fps or ~12 sec @ 24fps
+ kRecentLatencyFrames = 300,
+
+ // how we initialize mRecentSamples
+ kRecentSampleInvalid = -1,
+ };
+
+ int64_t mRecentSamples[kRecentLatencyFrames];
+ int mRecentHead;
+ Mutex mRecentLock;
+
+ class Histogram {
+ public:
+ Histogram() : mFloor(0), mWidth(0), mBelow(0), mAbove(0),
+ mMin(INT64_MAX), mMax(INT64_MIN), mSum(0), mCount(0),
+ mBucketCount(0), mBuckets(NULL) {};
+ ~Histogram() { clear(); };
+ void clear() { if (mBuckets != NULL) free(mBuckets); mBuckets = NULL; };
+ bool setup(int nbuckets, int64_t width, int64_t floor = 0);
+ void insert(int64_t sample);
+ int64_t getMin() const { return mMin; }
+ int64_t getMax() const { return mMax; }
+ int64_t getCount() const { return mCount; }
+ int64_t getSum() const { return mSum; }
+ int64_t getAvg() const { return mSum / (mCount == 0 ? 1 : mCount); }
+ std::string emit();
+ private:
+ int64_t mFloor, mCeiling, mWidth;
+ int64_t mBelow, mAbove;
+ int64_t mMin, mMax, mSum, mCount;
+
+ int mBucketCount;
+ int64_t *mBuckets;
+ };
+
+ Histogram mLatencyHist;
+
DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
};
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d8c41d2..28524b0 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -49,10 +49,8 @@
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
- AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
- AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
- AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
@@ -79,6 +77,8 @@
AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
AMediaCodecActionCode_isRecoverable; # introduced=28
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
index e099d95..4ec6042 100644
--- a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
@@ -1242,7 +1242,8 @@
// Ask server whether the controller is trusted.
// App cannot know this because apps cannot query enabled notification listener for
// another package, but system server can do.
- mIsTrusted = manager.isTrusted(packageName, pid, uid);
+ mIsTrusted = manager.isTrustedForMediaControl(
+ new MediaSessionManager.RemoteUserInfo(packageName, pid, uid));
}
@Override
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
index b2acc26..97279d6 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -284,14 +284,8 @@
mSurfaceView.setLayoutParams(params);
mTextureView.setSurfaceListener(this);
mSurfaceView.setSurfaceListener(this);
-
- // TODO: Choose TextureView when SurfaceView cannot be created.
- // Choose surface view by default
- mTextureView.setVisibility(View.GONE);
- mSurfaceView.setVisibility(View.VISIBLE);
mInstance.addView(mTextureView);
mInstance.addView(mSurfaceView);
- mCurrentView = mSurfaceView;
mSubtitleView = new SubtitleView(mInstance.getContext());
mSubtitleView.setLayoutParams(params);
@@ -309,16 +303,22 @@
"http://schemas.android.com/apk/res/android",
"enableSubtitle", false);
+ // TODO: Choose TextureView when SurfaceView cannot be created.
+ // Choose surface view by default
int viewType = (attrs == null) ? VideoView2.VIEW_TYPE_SURFACEVIEW
: attrs.getAttributeIntValue(
"http://schemas.android.com/apk/res/android",
- "viewType", 0);
- if (viewType == 0) {
+ "viewType", VideoView2.VIEW_TYPE_SURFACEVIEW);
+ if (viewType == VideoView2.VIEW_TYPE_SURFACEVIEW) {
Log.d(TAG, "viewType attribute is surfaceView.");
- // TODO: implement
- } else if (viewType == 1) {
+ mTextureView.setVisibility(View.GONE);
+ mSurfaceView.setVisibility(View.VISIBLE);
+ mCurrentView = mSurfaceView;
+ } else if (viewType == VideoView2.VIEW_TYPE_TEXTUREVIEW) {
Log.d(TAG, "viewType attribute is textureView.");
- // TODO: implement
+ mTextureView.setVisibility(View.VISIBLE);
+ mSurfaceView.setVisibility(View.GONE);
+ mCurrentView = mTextureView;
}
MediaRouteSelector.Builder builder = new MediaRouteSelector.Builder();
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ea06b6c..b38d37f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -753,8 +753,8 @@
output.notificationFrameCount = input.notificationFrameCount;
output.flags = input.flags;
- track = thread->createTrack_l(client, streamType, &output.sampleRate, input.config.format,
- input.config.channel_mask,
+ track = thread->createTrack_l(client, streamType, input.attr, &output.sampleRate,
+ input.config.format, input.config.channel_mask,
&output.frameCount, &output.notificationFrameCount,
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
@@ -1673,7 +1673,7 @@
output.frameCount = input.frameCount;
output.notificationFrameCount = input.notificationFrameCount;
- recordTrack = thread->createRecordTrack_l(client, &output.sampleRate,
+ recordTrack = thread->createRecordTrack_l(client, input.attr, &output.sampleRate,
input.config.format, input.config.channel_mask,
&output.frameCount, sessionId,
&output.notificationFrameCount,
@@ -1962,39 +1962,10 @@
status_t AudioFlinger::getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
{
- // Fake data
- size_t fakeNum = 2;
- audio_devices_t fakeTypes[] = { AUDIO_DEVICE_IN_BUILTIN_MIC, AUDIO_DEVICE_IN_BACK_MIC };
- for (size_t i = 0; i < fakeNum; i++) {
- struct audio_microphone_characteristic_t characteristics;
- sprintf(characteristics.device_id, "microphone:%zu", i);
- characteristics.device = fakeTypes[i];
- sprintf(characteristics.address, "");
- characteristics.location = AUDIO_MICROPHONE_LOCATION_MAINBODY;
- characteristics.group = 0;
- characteristics.index_in_the_group = i;
- characteristics.sensitivity = 1.0f;
- characteristics.max_spl = 100.0f;
- characteristics.min_spl = 0.0f;
- characteristics.directionality = AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
- characteristics.num_frequency_responses = 5 - i;
- for (size_t j = 0; j < characteristics.num_frequency_responses; j++) {
- characteristics.frequency_responses[0][j] = 100.0f - j;
- characteristics.frequency_responses[1][j] = 100.0f + j;
- }
- for (size_t j = 0; j < AUDIO_CHANNEL_COUNT_MAX; j++) {
- characteristics.channel_mapping[j] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
- }
- characteristics.geometric_location.x = 0.1f;
- characteristics.geometric_location.y = 0.2f;
- characteristics.geometric_location.z = 0.3f;
- characteristics.orientation.x = 0.0f;
- characteristics.orientation.y = 1.0f;
- characteristics.orientation.z = 0.0f;
- media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(characteristics);
- microphones->push_back(microphoneInfo);
- }
- return NO_ERROR;
+ AutoMutex lock(mHardwareLock);
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
+ status_t status = dev->getMicrophones(microphones);
+ return status;
}
// setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index 366a164..a210a1b 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -23,6 +23,7 @@
class MmapTrack : public TrackBase {
public:
MmapTrack(ThreadBase *thread,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 6454be5..ea01a25 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -25,6 +25,7 @@
Track( PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 1733ef5..2b993ee 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -24,6 +24,7 @@
public:
RecordTrack(RecordThread *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 62e9fe7..b5b50f8 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -57,6 +57,7 @@
#include <powermanager/PowerManager.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
#include "AudioFlinger.h"
#include "FastMixer.h"
@@ -1554,6 +1555,7 @@
mActiveTracksGeneration++;
mLatestActiveTrack = track;
++mBatteryCounter[track->uid()].second;
+ mHasChanged = true;
return mActiveTracks.add(track);
}
@@ -1568,6 +1570,7 @@
mActiveTracksGeneration++;
--mBatteryCounter[track->uid()].second;
// mLatestActiveTrack is not cleared even if is the same as track.
+ mHasChanged = true;
return index;
}
@@ -1578,6 +1581,7 @@
logTrack("clear", track);
}
mLastActiveTracksGeneration = mActiveTracksGeneration;
+ if (!mActiveTracks.empty()) { mHasChanged = true; }
mActiveTracks.clear();
mLatestActiveTrack.clear();
mBatteryCounter.clear();
@@ -1615,6 +1619,13 @@
}
template <typename T>
+bool AudioFlinger::ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
+ const bool hasChanged = mHasChanged;
+ mHasChanged = false;
+ return hasChanged;
+}
+
+template <typename T>
void AudioFlinger::ThreadBase::ActiveTracks<T>::logTrack(
const char *funcName, const sp<T> &track) const {
if (mLocalLog != nullptr) {
@@ -1847,6 +1858,7 @@
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -2125,7 +2137,7 @@
}
}
- track = new Track(this, client, streamType, sampleRate, format,
+ track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
@@ -2609,6 +2621,24 @@
}
}
+void AudioFlinger::PlaybackThread::updateMetadata_l()
+{
+ // TODO: add volume support
+ if (mOutput == nullptr || mOutput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamOutHalInterface::SourceMetadata metadata;
+ for (const sp<Track> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .usage = track->attributes().usage,
+ .content_type = track->attributes().content_type,
+ .gain = 1,
+ });
+ }
+ mOutput->stream->updateSourceMetadata(metadata);
+}
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
{
@@ -3306,6 +3336,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
@@ -6117,6 +6149,17 @@
return true;
}
+void AudioFlinger::DuplicatingThread::updateMetadata_l()
+{
+ // TODO: The duplicated track metadata are stored in other threads
+ // (accessible through mActiveTracks::OutputTrack::thread()::mActiveTracks::Track::attributes())
+ // but this information can be mutated at any time by the owning threads.
+ // Taking the lock of any other owning threads is no possible due to timing constrains.
+ // Similarly, the other threads can not push the metadatas in this thread as cross deadlock
+ // would be possible.
+ // A lock-free structure needs to be used to shared the metadata (maybe an atomic shared_ptr ?).
+}
+
uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
{
return (mWaitTimeMs * 1000) / 2;
@@ -6444,6 +6487,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
if (allStopped) {
standbyIfNotAlreadyInStandby();
}
@@ -6808,6 +6853,7 @@
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
+ const audio_attributes_t& attr,
uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -6941,7 +6987,7 @@
{ // scope for mLock
Mutex::Autolock _l(mLock);
- track = new RecordTrack(this, client, sampleRate,
+ track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, uid,
*flags, TrackBase::TYPE_DEFAULT, portId);
@@ -7129,42 +7175,25 @@
{
ALOGV("RecordThread::getActiveMicrophones");
AutoMutex _l(mLock);
- // Fake data
- struct audio_microphone_characteristic_t characteristic;
- sprintf(characteristic.device_id, "builtin_mic");
- characteristic.device = AUDIO_DEVICE_IN_BUILTIN_MIC;
- sprintf(characteristic.address, "");
- characteristic.location = AUDIO_MICROPHONE_LOCATION_MAINBODY;
- characteristic.group = 0;
- characteristic.index_in_the_group = 0;
- characteristic.sensitivity = 1.0f;
- characteristic.max_spl = 100.0f;
- characteristic.min_spl = 0.0f;
- characteristic.directionality = AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
- characteristic.num_frequency_responses = 5;
- for (size_t i = 0; i < characteristic.num_frequency_responses; i++) {
- characteristic.frequency_responses[0][i] = 100.0f - i;
- characteristic.frequency_responses[1][i] = 100.0f + i;
+ status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
+ return status;
+}
+
+void AudioFlinger::RecordThread::updateMetadata_l()
+{
+ if (mInput == nullptr || mInput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
}
- for (size_t i = 0; i < AUDIO_CHANNEL_COUNT_MAX; i++) {
- characteristic.channel_mapping[i] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+ StreamInHalInterface::SinkMetadata metadata;
+ for (const sp<RecordTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .source = track->attributes().source,
+ .gain = 1, // capture tracks do not have volumes
+ });
}
- audio_microphone_channel_mapping_t channel_mappings[] = {
- AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT,
- AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED,
- };
- for (size_t i = 0; i < mChannelCount; i++) {
- characteristic.channel_mapping[i] = channel_mappings[i % 2];
- }
- characteristic.geometric_location.x = 0.1f;
- characteristic.geometric_location.y = 0.2f;
- characteristic.geometric_location.z = 0.3f;
- characteristic.orientation.x = 0.0f;
- characteristic.orientation.y = 1.0f;
- characteristic.orientation.z = 0.0f;
- media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(characteristic);
- activeMicrophones->push_back(microphoneInfo);
- return NO_ERROR;
+ mInput->stream->updateSinkMetadata(metadata);
}
// destroyTrack_l() must be called with ThreadBase::mLock held
@@ -7994,7 +8023,8 @@
return PERMISSION_DENIED;
}
- sp<MmapTrack> track = new MmapTrack(this, mSampleRate, mFormat, mChannelMask, mSessionId,
+ // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
+ sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
client.clientUid, client.clientPid, portId);
mActiveTracks.add(track);
@@ -8130,6 +8160,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
lockEffectChains_l(effectChains);
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
@@ -8677,6 +8709,24 @@
}
}
+void AudioFlinger::MmapPlaybackThread::updateMetadata_l()
+{
+ if (mOutput == nullptr || mOutput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamOutHalInterface::SourceMetadata metadata;
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .usage = track->attributes().usage,
+ .content_type = track->attributes().content_type,
+ .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
+ });
+ }
+ mOutput->stream->updateSourceMetadata(metadata);
+}
+
void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
{
if (!mMasterMute) {
@@ -8721,4 +8771,22 @@
mInput = NULL;
return input;
}
+
+void AudioFlinger::MmapCaptureThread::updateMetadata_l()
+{
+ if (mInput == nullptr || mInput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamInHalInterface::SinkMetadata metadata;
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .source = track->attributes().source,
+ .gain = 1, // capture tracks do not have volumes
+ });
+ }
+ mInput->stream->updateSinkMetadata(metadata);
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7cd46a7..bb81224 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -425,6 +425,9 @@
// check if some effects must be suspended when an effect chain is added
void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
+ // sends the metadata of the active tracks to the HAL
+ virtual void updateMetadata_l() = 0;
+
String16 getWakeLockTag();
virtual void preExit() { }
@@ -563,6 +566,10 @@
// periodically called in the threadLoop() to update power state uids.
void updatePowerState(sp<ThreadBase> thread, bool force = false);
+ /** @return true if the active tracks have changed since the last time
+ * this function was called or the vector was created. */
+ bool readAndClearHasChanged();
+
private:
void logTrack(const char *funcName, const sp<T> &track) const;
@@ -581,6 +588,8 @@
int mLastActiveTracksGeneration;
wp<T> mLatestActiveTrack; // latest track added to ActiveTracks
SimpleLog * const mLocalLog;
+ // If the active tracks have changed since last call to readAndClearHasChanged
+ bool mHasChanged = false;
};
SimpleLog mLocalLog;
@@ -706,6 +715,7 @@
sp<Track> createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t *sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -917,6 +927,7 @@
void removeTrack_l(const sp<Track>& track);
void readOutputParameters_l();
+ void updateMetadata_l() override;
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -1275,6 +1286,8 @@
void addOutputTrack(MixerThread* thread);
void removeOutputTrack(MixerThread* thread);
uint32_t waitTimeMs() const { return mWaitTimeMs; }
+
+ void updateMetadata_l() override;
protected:
virtual uint32_t activeSleepTimeUs() const;
@@ -1387,6 +1400,7 @@
sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
+ const audio_attributes_t& attr,
uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -1461,6 +1475,8 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ void updateMetadata_l() override;
+
private:
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
@@ -1658,6 +1674,8 @@
virtual bool isOutput() const override { return true; }
+ void updateMetadata_l() override;
+
protected:
audio_stream_type_t mStreamType;
@@ -1684,6 +1702,8 @@
virtual bool isOutput() const override { return false; }
+ void updateMetadata_l() override;
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index a7e966f..ccfb69f 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -61,6 +61,7 @@
TrackBase(ThreadBase *thread,
const sp<Client>& client,
+ const audio_attributes_t& mAttr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -97,6 +98,7 @@
virtual void invalidate() { mIsInvalid = true; }
bool isInvalid() const { return mIsInvalid; }
+ audio_attributes_t attributes() const { return mAttr; }
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -188,6 +190,7 @@
size_t mBufferSize; // size of mBuffer in bytes
// we don't really need a lock for these
track_state mState;
+ const audio_attributes_t mAttr;
const uint32_t mSampleRate; // initial sample rate only; for tracks which
// support dynamic rates, the current value is in control block
const audio_format_t mFormat;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9b93939..44ce3aa 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -63,6 +63,7 @@
AudioFlinger::ThreadBase::TrackBase::TrackBase(
ThreadBase *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -81,6 +82,7 @@
mCblk(NULL),
// mBuffer, mBufferSize
mState(IDLE),
+ mAttr(attr),
mSampleRate(sampleRate),
mFormat(format),
mChannelMask(channelMask),
@@ -372,6 +374,7 @@
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -384,7 +387,7 @@
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId)
- : TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+ : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, uid, true /*isOut*/,
@@ -1259,6 +1262,7 @@
size_t frameCount,
uid_t uid)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
+ audio_attributes_t{} /* currently unused for output track */,
sampleRate, format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
@@ -1461,6 +1465,7 @@
size_t bufferSize,
audio_output_flags_t flags)
: Track(playbackThread, NULL, streamType,
+ audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
@@ -1595,6 +1600,7 @@
AudioFlinger::RecordThread::RecordTrack::RecordTrack(
RecordThread *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -1606,7 +1612,7 @@
audio_input_flags_t flags,
track_type type,
audio_port_handle_t portId)
- : TrackBase(thread, client, sampleRate, format,
+ : TrackBase(thread, client, attr, sampleRate, format,
channelMask, frameCount, buffer, bufferSize, sessionId, uid, false /*isOut*/,
(type == TYPE_DEFAULT) ?
((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
@@ -1821,7 +1827,9 @@
void *buffer,
size_t bufferSize,
audio_input_flags_t flags)
- : RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
+ : RecordTrack(recordThread, NULL,
+ audio_attributes_t{} /* currently unused for patch track */,
+ sampleRate, format, channelMask, frameCount,
buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
{
@@ -1882,6 +1890,7 @@
AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -1889,7 +1898,7 @@
uid_t uid,
pid_t pid,
audio_port_handle_t portId)
- : TrackBase(thread, NULL, sampleRate, format,
+ : TrackBase(thread, NULL, attr, sampleRate, format,
channelMask, (size_t)0 /* frameCount */,
nullptr /* buffer */, (size_t)0 /* bufferSize */,
sessionId, uid, false /* isOut */,
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 4862684..fc012a2 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -38,6 +38,7 @@
DEVICE_CATEGORY_SPEAKER,
DEVICE_CATEGORY_EARPIECE,
DEVICE_CATEGORY_EXT_MEDIA,
+ DEVICE_CATEGORY_HEARING_AID,
DEVICE_CATEGORY_CNT
};
@@ -125,8 +126,9 @@
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
case AUDIO_DEVICE_OUT_USB_HEADSET:
- case AUDIO_DEVICE_OUT_HEARING_AID:
return DEVICE_CATEGORY_HEADSET;
+ case AUDIO_DEVICE_OUT_HEARING_AID:
+ return DEVICE_CATEGORY_HEARING_AID;
case AUDIO_DEVICE_OUT_LINE:
case AUDIO_DEVICE_OUT_AUX_DIGITAL:
case AUDIO_DEVICE_OUT_USB_DEVICE:
diff --git a/services/audiopolicy/common/managerdefinitions/include/Gains.h b/services/audiopolicy/common/managerdefinitions/include/Gains.h
index 8332af9..cb229a4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Gains.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Gains.h
@@ -52,6 +52,7 @@
static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
+ static const VolumeCurvePoint sHearingAidVolumeCurve[Volume::VOLCNT];
// default volume curves per stream and device category. See initializeVolumeCurves()
static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
index b2dafdd..6407a17 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
@@ -113,86 +113,104 @@
{0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
};
+const VolumeCurvePoint
+Gains::sHearingAidVolumeCurve[Volume::VOLCNT] = {
+ {1, -128.0f}, {20, -80.0f}, {60, -40.0f}, {100, 0.0f}
+};
+
const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
[DEVICE_CATEGORY_CNT] = {
{ // AUDIO_STREAM_VOICE_CALL
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_SYSTEM
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_RING
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_MUSIC
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ALARM
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_NOTIFICATION
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_BLUETOOTH_SCO
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ENFORCED_AUDIBLE
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_DTMF
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_TTS
// "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sSilentVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ACCESSIBILITY
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_REROUTING
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_PATCH
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 7273d0d..6f48eae 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -29,6 +29,7 @@
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+ MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEARING_AID),
TERMINATOR
};
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index 43a47b0..ec64a7c 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -43,6 +43,8 @@
</volume>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -55,6 +57,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -67,6 +71,8 @@
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -75,18 +81,22 @@
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEADSET"
- ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_SPEAKER">
- <point>1,-2970</point>
+ <point>0,-2970</point>
<point>33,-2010</point>
<point>66,-1020</point>
<point>100,0</point>
</volume>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
- ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
- ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -99,6 +109,8 @@
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>0,-4200</point>
<point>33,-2800</point>
@@ -119,6 +131,8 @@
</volume>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -131,6 +145,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -143,6 +159,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -151,14 +169,18 @@
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="SILENT_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEADSET"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_SPEAKER"
- ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EARPIECE"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -167,6 +189,8 @@
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -175,5 +199,7 @@
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="FULL_SCALE_VOLUME_CURVE"/>
</volumes>
diff --git a/services/audiopolicy/config/default_volume_tables.xml b/services/audiopolicy/config/default_volume_tables.xml
index 9a22b1d..207be41 100644
--- a/services/audiopolicy/config/default_volume_tables.xml
+++ b/services/audiopolicy/config/default_volume_tables.xml
@@ -67,4 +67,63 @@
<point>60,-2100</point>
<point>100,-1000</point>
</reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
</volumes>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 92a2030..8f6db46 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -3777,6 +3777,16 @@
ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
status = NO_INIT;
}
+ // If microphones address is empty, set it according to device type
+ for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+ if (mAvailableInputDevices[i]->mAddress.isEmpty()) {
+ if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+ mAvailableInputDevices[i]->mAddress = String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
+ mAvailableInputDevices[i]->mAddress = String8(AUDIO_BACK_MICROPHONE_ADDRESS);
+ }
+ }
+ }
if (mPrimaryOutput == 0) {
ALOGE("Failed to open primary output");
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 5d90408..082923a 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -505,91 +505,129 @@
| ActivityManager::UID_OBSERVER_ACTIVE,
ActivityManager::PROCESS_STATE_UNKNOWN,
String16("audioserver"));
+ status_t res = am.linkToDeath(this);
+ if (!res) {
+ Mutex::Autolock _l(mLock);
+ mObserverRegistered = true;
+ } else {
+ ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
+ am.unregisterUidObserver(this);
+ }
}
void AudioPolicyService::UidPolicy::unregisterSelf() {
ActivityManager am;
+ am.unlinkToDeath(this);
am.unregisterUidObserver(this);
+ Mutex::Autolock _l(mLock);
+ mObserverRegistered = false;
}
-void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
- onUidIdle(uid, disabled);
-}
-
-void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
- {
- Mutex::Autolock _l(mUidLock);
- mActiveUids.insert(uid);
- }
- sp<AudioPolicyService> service = mService.promote();
- if (service != nullptr) {
- service->setRecordSilenced(uid, false);
- }
-}
-
-void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
- bool deleted = false;
- {
- Mutex::Autolock _l(mUidLock);
- if (mActiveUids.erase(uid) > 0) {
- deleted = true;
- }
- }
- if (deleted) {
- sp<AudioPolicyService> service = mService.promote();
- if (service != nullptr) {
- service->setRecordSilenced(uid, true);
- }
- }
-}
-
-void AudioPolicyService::UidPolicy::addOverrideUid(uid_t uid, bool active) {
- updateOverrideUid(uid, active, true);
-}
-
-void AudioPolicyService::UidPolicy::removeOverrideUid(uid_t uid) {
- updateOverrideUid(uid, false, false);
-}
-
-void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
- bool wasActive = false;
- bool isActive = false;
- {
- Mutex::Autolock _l(mUidLock);
- wasActive = isUidActiveLocked(uid);
- mOverrideUids.erase(uid);
- if (insert) {
- mOverrideUids.insert(std::pair<uid_t, bool>(uid, active));
- }
- isActive = isUidActiveLocked(uid);
- }
- if (wasActive != isActive) {
- sp<AudioPolicyService> service = mService.promote();
- if (service != nullptr) {
- service->setRecordSilenced(uid, !isActive);
- }
- }
+void AudioPolicyService::UidPolicy::binderDied(__unused const wp<IBinder> &who) {
+ Mutex::Autolock _l(mLock);
+ mCachedUids.clear();
+ mObserverRegistered = false;
}
bool AudioPolicyService::UidPolicy::isUidActive(uid_t uid) {
- // Non-app UIDs are considered always active
- if (uid < FIRST_APPLICATION_UID) {
- return true;
+ if (isServiceUid(uid)) return true;
+ bool needToReregister = false;
+ {
+ Mutex::Autolock _l(mLock);
+ needToReregister = !mObserverRegistered;
}
- Mutex::Autolock _l(mUidLock);
- return isUidActiveLocked(uid);
+ if (needToReregister) {
+ // Looks like ActivityManager has died previously, attempt to re-register.
+ registerSelf();
+ }
+ {
+ Mutex::Autolock _l(mLock);
+ auto overrideIter = mOverrideUids.find(uid);
+ if (overrideIter != mOverrideUids.end()) {
+ return overrideIter->second;
+ }
+ // In an absense of the ActivityManager, assume everything to be active.
+ if (!mObserverRegistered) return true;
+ auto cacheIter = mCachedUids.find(uid);
+ if (cacheIter != mCachedUids.end()) {
+ return cacheIter->second;
+ }
+ }
+ ActivityManager am;
+ bool active = am.isUidActive(uid, String16("audioserver"));
+ {
+ Mutex::Autolock _l(mLock);
+ mCachedUids.insert(std::pair<uid_t, bool>(uid, active));
+ }
+ return active;
}
-bool AudioPolicyService::UidPolicy::isUidActiveLocked(uid_t uid) {
- // Non-app UIDs are considered always active
- if (uid < FIRST_APPLICATION_UID) {
- return true;
+void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
+ updateUidCache(uid, true, true);
+}
+
+void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
+ updateUidCache(uid, false, false);
+}
+
+void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
+ updateUidCache(uid, false, true);
+}
+
+bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
+ return uid % AID_USER_OFFSET < AID_APP_START;
+}
+
+void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
+ sp<AudioPolicyService> service = mService.promote();
+ if (service != nullptr) {
+ service->setRecordSilenced(uid, !active);
}
- auto it = mOverrideUids.find(uid);
- if (it != mOverrideUids.end()) {
- return it->second;
+}
+
+void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
+ if (isServiceUid(uid)) return;
+ bool wasOverridden = false, wasActive = false;
+ {
+ Mutex::Autolock _l(mLock);
+ updateUidLocked(&mOverrideUids, uid, active, insert, &wasOverridden, &wasActive);
}
- return mActiveUids.find(uid) != mActiveUids.end();
+ if (!wasOverridden && insert) {
+ notifyService(uid, active); // Started to override.
+ } else if (wasOverridden && !insert) {
+ notifyService(uid, isUidActive(uid)); // Override ceased, notify with ground truth.
+ } else if (wasActive != active) {
+ notifyService(uid, active); // Override updated.
+ }
+}
+
+void AudioPolicyService::UidPolicy::updateUidCache(uid_t uid, bool active, bool insert) {
+ if (isServiceUid(uid)) return;
+ bool wasActive = false;
+ {
+ Mutex::Autolock _l(mLock);
+ updateUidLocked(&mCachedUids, uid, active, insert, nullptr, &wasActive);
+ // Do not notify service if currently overridden.
+ if (mOverrideUids.find(uid) != mOverrideUids.end()) return;
+ }
+ bool nowActive = active && insert;
+ if (wasActive != nowActive) notifyService(uid, nowActive);
+}
+
+void AudioPolicyService::UidPolicy::updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+ uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive) {
+ auto it = uids->find(uid);
+ if (it != uids->end()) {
+ if (wasThere != nullptr) *wasThere = true;
+ if (wasActive != nullptr) *wasActive = it->second;
+ if (insert) {
+ it->second = active;
+ } else {
+ uids->erase(it);
+ }
+ } else if (insert) {
+ uids->insert(std::pair<uid_t, bool>(uid, active));
+ }
}
// ----------- AudioPolicyService::AudioCommandThread implementation ----------
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index bfa3ef4..b3bc12b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -35,7 +35,6 @@
#include "managerdefault/AudioPolicyManager.h"
#include <unordered_map>
-#include <unordered_set>
namespace android {
@@ -264,31 +263,40 @@
// transparently handles recording while the UID transitions between idle/active state
// avoiding to get stuck in a state receiving non-empty buffers while idle or in a state
// receiving empty buffers while active.
- class UidPolicy : public BnUidObserver {
+ class UidPolicy : public BnUidObserver, public virtual IBinder::DeathRecipient {
public:
explicit UidPolicy(wp<AudioPolicyService> service)
- : mService(service) {}
+ : mService(service), mObserverRegistered(false) {}
void registerSelf();
void unregisterSelf();
+ // IBinder::DeathRecipient implementation
+ void binderDied(const wp<IBinder> &who) override;
+
bool isUidActive(uid_t uid);
- void onUidGone(uid_t uid, bool disabled);
- void onUidActive(uid_t uid);
- void onUidIdle(uid_t uid, bool disabled);
+ // BnUidObserver implementation
+ void onUidActive(uid_t uid) override;
+ void onUidGone(uid_t uid, bool disabled) override;
+ void onUidIdle(uid_t uid, bool disabled) override;
- void addOverrideUid(uid_t uid, bool active);
- void removeOverrideUid(uid_t uid);
+ void addOverrideUid(uid_t uid, bool active) { updateOverrideUid(uid, active, true); }
+ void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
private:
- bool isUidActiveLocked(uid_t uid);
+ bool isServiceUid(uid_t uid) const;
+ void notifyService(uid_t uid, bool active);
void updateOverrideUid(uid_t uid, bool active, bool insert);
+ void updateUidCache(uid_t uid, bool active, bool insert);
+ void updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+ uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive);
- Mutex mUidLock;
wp<AudioPolicyService> mService;
- std::unordered_set<uid_t> mActiveUids;
+ Mutex mLock;
+ bool mObserverRegistered;
std::unordered_map<uid_t, bool> mOverrideUids;
+ std::unordered_map<uid_t, bool> mCachedUids;
};
// Thread used for tone playback and to send audio config commands to audio flinger