Merge "Camera: Synchronize access to torch status map" into pi-dev
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index c7d2545..ee3e98e 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -7100,7 +7100,7 @@
/**
* <p>The camera device is a monochrome camera that doesn't contain a color filter array,
- * and the pixel values on U and Y planes are all 128.</p>
+ * and the pixel values on U and V planes are all 128.</p>
*/
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME = 12,
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index ed9534f..73ed8c3 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -55,7 +55,7 @@
status_t ClearKeyCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
@@ -83,7 +83,7 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
- uint64_t appData, CasPluginCallback callback)
+ void *appData, CasPluginCallback callback)
: mCallback(callback), mAppData(appData) {
ALOGV("CTOR");
}
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index b7134e4..42cfb8f 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -44,7 +44,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
@@ -62,7 +62,7 @@
class ClearKeyCasPlugin : public CasPlugin {
public:
- ClearKeyCasPlugin(uint64_t appData, CasPluginCallback callback);
+ ClearKeyCasPlugin(void *appData, CasPluginCallback callback);
virtual ~ClearKeyCasPlugin();
virtual status_t setPrivateData(
@@ -94,7 +94,7 @@
Mutex mKeyFetcherLock;
std::unique_ptr<KeyFetcher> mKeyFetcher;
CasPluginCallback mCallback;
- uint64_t mAppData;
+ void* mAppData;
};
class ClearKeyDescramblerPlugin : public DescramblerPlugin {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 06516b5..8404a83 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -49,7 +49,7 @@
status_t MockCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t /*appData*/,
+ void* /*appData*/,
CasPluginCallback /*callback*/,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 9632492..8106990 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -39,7 +39,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
index 8098e79..31b8eb5 100644
--- a/include/media/MmapStreamCallback.h
+++ b/include/media/MmapStreamCallback.h
@@ -31,8 +31,9 @@
* The mmap stream should be torn down because conditions that permitted its creation with
* the requested parameters have changed and do not allow it to operate with the requested
* constraints any more.
+ * \param[in] handle handle for the client stream to tear down.
*/
- virtual void onTearDown() = 0;
+ virtual void onTearDown(audio_port_handle_t handle) = 0;
/**
* The volume to be applied to the use case specified when opening the stream has changed
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
index 92575f2..5f05b42 100644
--- a/media/extractors/aac/Android.bp
+++ b/media/extractors/aac/Android.bp
@@ -9,12 +9,12 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
- "libutils",
],
static_libs: [
+ "libstagefright_foundation",
"libstagefright_metadatautils",
+ "libutils",
],
name: "libaacextractor",
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
index bd8a00c..d962b93 100644
--- a/media/extractors/amr/Android.bp
+++ b/media/extractors/amr/Android.bp
@@ -9,6 +9,9 @@
shared_libs: [
"liblog",
"libmediaextractor",
+ ],
+
+ static_libs: [
"libstagefright_foundation",
],
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 0160ca4..6282793 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -10,11 +10,11 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
],
static_libs: [
"libFLAC",
+ "libstagefright_foundation",
],
name: "libflacextractor",
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 5412e99..fde09df18 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -9,12 +9,12 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation"
],
static_libs: [
"libmedia_midiiowrapper",
"libsonivox",
+ "libstagefright_foundation"
],
name: "libmidiextractor",
relative_install_path: "extractors",
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index c6cd753..681fd35 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -12,14 +12,14 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
- "libutils",
],
static_libs: [
"libstagefright_flacdec",
+ "libstagefright_foundation",
"libstagefright_metadatautils",
"libwebm",
+ "libutils",
],
name: "libmkvextractor",
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
index d93562c..a3aeaca 100644
--- a/media/extractors/mp3/Android.bp
+++ b/media/extractors/mp3/Android.bp
@@ -14,10 +14,10 @@
"liblog",
"libmediaextractor",
"libstagefright_foundation",
- "libutils",
],
static_libs: [
+ "libutils",
"libstagefright_id3",
],
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
index 3fe2336..fa739e8 100644
--- a/media/extractors/mp4/Android.bp
+++ b/media/extractors/mp4/Android.bp
@@ -15,13 +15,13 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
- "libutils",
],
static_libs: [
"libstagefright_esds",
+ "libstagefright_foundation",
"libstagefright_id3",
+ "libutils",
],
cflags: [
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index 26b8251..a1f6e9a 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -1591,10 +1591,9 @@
ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(masterImage.thumbnails[0]);
if (thumbItemIndex < 0) {
- ALOGW("%s: Thumbnail item id %d not found, use master instead",
- __FUNCTION__, masterImage.thumbnails[0]);
- *itemIndex = masterItemIndex;
- return OK;
+ // Do not return the master image in this case, fail it so that the
+ // thumbnail extraction code knows we really don't have it.
+ return INVALID_OPERATION;
}
*itemIndex = thumbItemIndex;
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index b012b5d..5e4a592 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -23,11 +23,11 @@
"liblog",
"libmediaextractor",
"libstagefright_foundation",
- "libutils",
],
static_libs: [
"libstagefright_mpeg2support",
+ "libutils",
],
name: "libmpeg2extractor",
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
index 2c09a5f..7c6fc75 100644
--- a/media/extractors/ogg/Android.bp
+++ b/media/extractors/ogg/Android.bp
@@ -10,11 +10,11 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
- "libutils",
],
static_libs: [
+ "libstagefright_foundation",
+ "libutils",
"libvorbisidec",
],
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index 17836bb..067933e 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -9,11 +9,11 @@
shared_libs: [
"liblog",
"libmediaextractor",
- "libstagefright_foundation",
],
static_libs: [
"libfifo",
+ "libstagefright_foundation",
],
name: "libwavextractor",
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 9dc5f05..67ec244 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -18,6 +18,7 @@
#include <inttypes.h>
+#include <algorithm>
#include <vector>
#include <math.h>
@@ -61,8 +62,8 @@
const float* lensShadingMap) {
uint32_t activeAreaWidth = activeAreaRight - activeAreaLeft;
uint32_t activeAreaHeight = activeAreaBottom - activeAreaTop;
- double spacingV = 1.0 / lsmHeight;
- double spacingH = 1.0 / lsmWidth;
+ double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
+ double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
std::vector<float> redMapVector(lsmWidth * lsmHeight);
float *redMap = redMapVector.data();
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index e5ad2d9..c1ff34b 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -26,23 +26,22 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-// TODO support FLOAT
-#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16
#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
static const int FRAMES_PER_LINE = 20000;
int main(int argc, const char **argv)
{
- AAudioArgsParser argParser;
- aaudio_result_t result;
- AAudioSimpleRecorder recorder;
- int actualSamplesPerFrame;
- int actualSampleRate;
- aaudio_format_t actualDataFormat;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ AAudioStream *aaudioStream = nullptr;
- AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
+ aaudio_format_t actualDataFormat;
aaudio_stream_state_t state;
+
+ int32_t actualSamplesPerFrame;
+ int32_t actualSampleRate;
int32_t framesPerBurst = 0;
int32_t framesPerRead = 0;
int32_t framesToRecord = 0;
@@ -50,18 +49,18 @@
int32_t nextFrameCount = 0;
int32_t frameCount = 0;
int32_t xRunCount = 0;
- int64_t previousFramePosition = -1;
- int16_t *data = nullptr;
- float peakLevel = 0.0;
int32_t deviceId;
+ int16_t *shortData = nullptr;
+ float *floatData = nullptr;
+ float peakLevel = 0.0;
+
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Monitor input level using AAudio read, V0.1.2\n", argv[0]);
+ printf("%s - Monitor input level using AAudio read, V0.1.3\n", argv[0]);
- argParser.setFormat(REQUIRED_FORMAT);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
}
@@ -69,6 +68,7 @@
result = recorder.open(argParser);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto finish;
}
aaudioStream = recorder.getStream();
@@ -96,17 +96,18 @@
printf("DataFormat: framesPerRead = %d\n",framesPerRead);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n",
- REQUIRED_FORMAT, actualDataFormat);
- // TODO handle other data formats
- assert(actualDataFormat == REQUIRED_FORMAT);
// Allocate a buffer for the PCM_16 audio data.
- data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
- goto finish;
+ switch (actualDataFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ shortData = new int16_t[framesPerRead * actualSamplesPerFrame];
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ floatData = new float[framesPerRead * actualSamplesPerFrame];
+ break;
+ default:
+ fprintf(stderr, "UNEXPECTED FORMAT! %d", actualDataFormat);
+ goto finish;
}
// Start the stream.
@@ -126,7 +127,12 @@
// Read audio data from the stream.
const int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
- int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+ int actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_read(aaudioStream, shortData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_read(aaudioStream, floatData, minFrames, timeoutNanos);
+ }
if (actual < 0) {
fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
result = actual;
@@ -140,7 +146,12 @@
// Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
- float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ float sample = 0.0f;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ sample = shortData[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ sample = floatData[frameIndex * actualSamplesPerFrame];
+ }
if (sample > peakLevel) {
peakLevel = sample;
}
@@ -151,17 +162,15 @@
displayPeakLevel(peakLevel);
peakLevel = 0.0;
nextFrameCount += FRAMES_PER_LINE;
- }
- // Print timestamps.
- int64_t framePosition = 0;
- int64_t frameTime = 0;
- aaudio_result_t timeResult;
- timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
- &framePosition, &frameTime);
+ // Print timestamps.
+ int64_t framePosition = 0;
+ int64_t frameTime = 0;
+ aaudio_result_t timeResult;
+ timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+ &framePosition, &frameTime);
- if (timeResult == AAUDIO_OK) {
- if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
+ if (timeResult == AAUDIO_OK) {
int64_t realTime = getNanoseconds();
int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);
@@ -175,11 +184,15 @@
(long long) framePosition,
(long long) frameTime,
latencyMillis);
- previousFramePosition = framePosition;
+ } else {
+ printf("WARNING - AAudioStream_getTimestamp() returned %d\n", timeResult);
}
}
}
+ state = AAudioStream_getState(aaudioStream);
+ printf("after loop, state = %s\n", AAudio_convertStreamStateToText(state));
+
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
@@ -192,7 +205,8 @@
finish:
recorder.close();
- delete[] data;
+ delete[] shortData;
+ delete[] floatData;
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 893795b..d10f812 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -26,29 +26,39 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-#define NUM_SECONDS 5
-
-int main(int argc, char **argv)
+int main(int argc, const char **argv)
{
- (void)argc; // unused
- AAudioSimpleRecorder recorder;
- PeakTrackerData_t myData = {0.0};
- aaudio_result_t result;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ PeakTrackerData_t myData = {0.0};
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
aaudio_stream_state_t state;
+
+ int loopsNeeded = 0;
const int displayRateHz = 20; // arbitrary
- const int loopsNeeded = NUM_SECONDS * displayRateHz;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Display audio input using an AAudio callback, V0.1.2\n", argv[0]);
+ printf("%s - Display audio input using an AAudio callback, V0.1.3\n", argv[0]);
- result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
- SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
+ if (argParser.parseArgs(argc, argv)) {
+ return EXIT_FAILURE;
+ }
+
+ result = recorder.open(argParser,
+ SimpleRecorderDataCallbackProc,
+ SimpleRecorderErrorCallbackProc,
+ &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto error;
}
+ aaudioStream = recorder.getStream();
+ argParser.compareWithStream(aaudioStream);
+
printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
printf("recorder.getSamplesPerFrame() = %d\n", recorder.getSamplesPerFrame());
@@ -58,7 +68,9 @@
goto error;
}
- printf("Sleep for %d seconds while audio record in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio record in a callback thread.\n",
+ argParser.getDurationSeconds());
+ loopsNeeded = argParser.getDurationSeconds() * displayRateHz;
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -67,7 +79,7 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- result = AAudioStream_waitForStateChange(recorder.getStream(),
+ result = AAudioStream_waitForStateChange(aaudioStream,
AAUDIO_STREAM_STATE_CLOSED,
&state,
0);
@@ -93,7 +105,8 @@
goto error;
}
- printf("Sleep for %d seconds while audio records in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio records in a callback thread.\n",
+ argParser.getDurationSeconds());
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -102,13 +115,14 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- state = AAudioStream_getState(recorder.getStream());
+ state = AAudioStream_getState(aaudioStream);
if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
break;
}
}
printf("Woke up now.\n");
+ argParser.compareWithStream(aaudioStream);
result = recorder.stop();
if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 39d079e..026ff0f 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -151,8 +151,7 @@
static void MyErrorCallbackProc(
AAudioStream *stream __unused,
void *userData __unused,
- aaudio_result_t error)
-{
+ aaudio_result_t error) {
printf("Error Callback, error: %d\n",(int)error);
LoopbackData *myData = (LoopbackData *) userData;
myData->outputError = error;
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index eb6925a..88d7401 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -87,7 +87,6 @@
return;
}
-// TODO use this as a base class within AAudio
class AAudioParameters {
public:
@@ -262,6 +261,9 @@
case 'd':
setDeviceId(atoi(&arg[2]));
break;
+ case 'f':
+ setFormat(atoi(&arg[2]));
+ break;
case 'i':
setInputPreset(atoi(&arg[2]));
break;
@@ -326,6 +328,10 @@
printf(" -b{bufferCapacity} frames\n");
printf(" -c{channels} for example 2 for stereo\n");
printf(" -d{deviceId} default is %d\n", AAUDIO_UNSPECIFIED);
+ printf(" -f{0|1|2} set format\n");
+ printf(" 0 = UNSPECIFIED\n");
+ printf(" 1 = PCM_I16\n");
+ printf(" 2 = FLOAT\n");
printf(" -i{inputPreset} eg. 5 for AAUDIO_INPUT_PRESET_CAMCORDER\n");
printf(" -m{0|1|2|3} set MMAP policy\n");
printf(" 0 = _UNSPECIFIED, use aaudio.mmap_policy system property, default\n");
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 38e1e4c..8e33a31 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -57,7 +57,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using AAudio V0.1.2\n", argv[0]);
+ printf("%s - Play a sine wave using AAudio V0.1.3\n", argv[0]);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index e167773..e33e9f8 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -204,7 +204,7 @@
AAudioArgsParser::usage();
printf(" -l{count} loopCount start/stop, every other one is silent\n");
printf(" -t{msec} play a high pitched tone at the beginning\n");
- printf(" -f force periodic underruns by sleeping in callback\n");
+ printf(" -z force periodic underruns by sleeping in callback\n");
}
int main(int argc, const char **argv)
@@ -219,7 +219,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V0.1.3\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback V0.1.4\n", argv[0]);
for (int i = 1; i < argc; i++) {
const char *arg = argv[i];
@@ -234,8 +234,8 @@
case 't':
prefixToneMsec = atoi(&arg[2]);
break;
- case 'f':
- forceUnderruns = true;
+ case 'z':
+ forceUnderruns = true; // Zzzzzzz
break;
default:
usage();
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index e40a6cd..2207cb8c 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -146,6 +146,8 @@
* to make more refined volume or routing decisions.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
/**
@@ -220,6 +222,8 @@
* enforce audio focus.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
@@ -252,6 +256,8 @@
* configuration.
*
* Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
/**
@@ -288,6 +294,8 @@
* Do not allocate a session ID.
* Effects cannot be used with this stream.
* Default.
+ *
+ * Added in API level 28.
*/
AAUDIO_SESSION_ID_NONE = -1,
@@ -297,6 +305,8 @@
* Note that the use of this flag may result in higher latency.
*
* Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
+ *
+ * Added in API level 28.
*/
AAUDIO_SESSION_ID_ALLOCATE = 0,
};
@@ -481,6 +491,8 @@
*
* The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param usage the desired usage, eg. AAUDIO_USAGE_GAME
*/
@@ -496,6 +508,8 @@
*
* The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
*/
@@ -514,6 +528,8 @@
* That is because VOICE_RECOGNITION is the preset with the lowest latency
* on many platforms.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param inputPreset the desired configuration for recording
*/
@@ -540,6 +556,8 @@
*
* Allocated session IDs will always be positive and nonzero.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
*/
@@ -1059,6 +1077,8 @@
*
* The sessionID for a stream should not change once the stream has been opened.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return session ID or AAUDIO_SESSION_ID_NONE
*/
@@ -1094,6 +1114,8 @@
/**
* Return the use case for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
*/
@@ -1102,6 +1124,8 @@
/**
* Return the content type for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
*/
@@ -1110,6 +1134,8 @@
/**
* Return the input preset for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
*/
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 788833b..b9e28a0 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -57,6 +57,7 @@
shared_libs: [
"libaudioclient",
+ "libaudioutils",
"liblog",
"libcutils",
"libutils",
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 2a3e668..0bdfeac 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -156,7 +156,7 @@
setInputPreset(configurationOutput.getInputPreset());
// Save device format so we can do format conversion and volume scaling together.
- mDeviceFormat = configurationOutput.getFormat();
+ setDeviceFormat(configurationOutput.getFormat());
result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
if (result != AAUDIO_OK) {
@@ -501,9 +501,9 @@
ALOGW("%s - AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared", __func__);
break;
case AAUDIO_SERVICE_EVENT_VOLUME:
+ ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
mStreamVolume = (float)message->event.dataDouble;
doSetVolume();
- ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
break;
case AAUDIO_SERVICE_EVENT_XRUN:
mXRunCount = static_cast<int32_t>(message->event.dataLong);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 0e0724b..0425cd5 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -138,8 +138,6 @@
// Calculate timeout for an operation involving framesPerOperation.
int64_t calculateReasonableTimeout(int32_t framesPerOperation);
- aaudio_format_t getDeviceFormat() const { return mDeviceFormat; }
-
int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }
/**
@@ -195,9 +193,6 @@
int64_t mServiceLatencyNanos = 0;
- // Sometimes the hardware is operating with a different format or channel count from the app.
- // Then we require conversion in AAudio.
- aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
int32_t mDeviceChannelCount = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 0c3b1fa..795ba2c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -117,7 +117,7 @@
// Still haven't got any timestamps from server.
// Keep waiting until we get some valid timestamps then start writing to the
// current buffer position.
- ALOGD("%s() wait for valid timestamps", __func__);
+ ALOGV("%s() wait for valid timestamps", __func__);
// Sleep very briefly and hope we get a timestamp soon.
*wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
ATRACE_END();
@@ -310,6 +310,9 @@
//------------------------------------------------------------------------------
// Implementation of PlayerBase
status_t AudioStreamInternalPlay::doSetVolume() {
- mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
+ float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
+ ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
+ __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
+ mVolumeRamp.setTarget(combinedVolume);
return android::NO_ERROR;
}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 61e03db..358021b 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -367,7 +367,6 @@
return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
}
-
aaudio_data_callback_result_t AudioStream::maybeCallDataCallback(void *audioData,
int32_t numFrames) {
aaudio_data_callback_result_t result = AAUDIO_CALLBACK_RESULT_STOP;
@@ -429,6 +428,12 @@
}
#endif
+void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
+ ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+ mDuckAndMuteVolume = duckAndMuteVolume;
+ doSetVolume(); // apply this change
+}
+
AudioStream::MyPlayerBase::MyPlayerBase(AudioStream *parent) : mParent(parent) {
}
@@ -450,7 +455,6 @@
}
}
-
void AudioStream::MyPlayerBase::destroy() {
unregisterWithAudioManager();
}
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 5273e36..31b895c 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -252,6 +252,20 @@
return AAudioConvert_formatToSizeInBytes(mFormat);
}
+ /**
+ * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+ */
+ int32_t getBytesPerDeviceFrame() const {
+ return mSamplesPerFrame * getBytesPerDeviceSample();
+ }
+
+ /**
+ * This is only valid after setDeviceFormat() has been called.
+ */
+ int32_t getBytesPerDeviceSample() const {
+ return AAudioConvert_formatToSizeInBytes(getDeviceFormat());
+ }
+
virtual int64_t getFramesWritten() = 0;
virtual int64_t getFramesRead() = 0;
@@ -314,10 +328,7 @@
}
// This is used by the AudioManager to duck and mute the stream when changing audio focus.
- void setDuckAndMuteVolume(float duckAndMuteVolume) {
- mDuckAndMuteVolume = duckAndMuteVolume;
- doSetVolume(); // apply this change
- }
+ void setDuckAndMuteVolume(float duckAndMuteVolume);
float getDuckAndMuteVolume() const {
return mDuckAndMuteVolume;
@@ -471,6 +482,17 @@
mFormat = format;
}
+ /**
+ * This should not be called after the open() call.
+ */
+ void setDeviceFormat(aaudio_format_t format) {
+ mDeviceFormat = format;
+ }
+
+ aaudio_format_t getDeviceFormat() const {
+ return mDeviceFormat;
+ }
+
void setState(aaudio_stream_state_t state);
void setDeviceId(int32_t deviceId) {
@@ -485,9 +507,23 @@
float mDuckAndMuteVolume = 1.0f;
-
protected:
+ /**
+ * Either convert the data from device format to app format and return a pointer
+ * to the conversion buffer,
+ * OR just pass back the original pointer.
+ *
+ * Note that this is only used for the INPUT path.
+ *
+ * @param audioData
+ * @param numFrames
+ * @return original pointer or the conversion buffer
+ */
+ virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ return audioData;
+ }
+
void setPeriodNanoseconds(int64_t periodNanoseconds) {
mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
}
@@ -539,6 +575,10 @@
int32_t mSessionId = AAUDIO_UNSPECIFIED;
+ // Sometimes the hardware is operating with a different format from the app.
+ // Then we require conversion in AAudio.
+ aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
// callback ----------------------------------
AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 293a6a8..3a7a578 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -87,7 +87,7 @@
break;
default:
- ALOGE("bad direction = %d", direction);
+ ALOGE("%s() bad direction = %d", __func__, direction);
result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
return result;
@@ -99,7 +99,7 @@
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
AudioStream *audioStream = nullptr;
if (streamPtr == nullptr) {
- ALOGE("build() streamPtr is null");
+ ALOGE("%s() streamPtr is null", __func__);
return AAUDIO_ERROR_NULL;
}
*streamPtr = nullptr;
@@ -124,13 +124,11 @@
if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
}
- ALOGD("mmapPolicy = %d, mapExclusivePolicy = %d",
- mmapPolicy, mapExclusivePolicy);
aaudio_sharing_mode_t sharingMode = getSharingMode();
if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
&& (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
- ALOGW("EXCLUSIVE sharing mode not supported. Use SHARED.");
+ ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
sharingMode = AAUDIO_SHARING_MODE_SHARED;
setSharingMode(sharingMode);
}
@@ -141,13 +139,14 @@
// TODO Support other performance settings in MMAP mode.
// Disable MMAP if low latency not requested.
if (getPerformanceMode() != AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
- ALOGD("build() MMAP not available because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not used.");
+ ALOGD("%s() MMAP not available because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not used.",
+ __func__);
allowMMap = false;
}
// SessionID and Effects are only supported in Legacy mode.
if (getSessionId() != AAUDIO_SESSION_ID_NONE) {
- ALOGD("build() MMAP not available because sessionId used.");
+ ALOGD("%s() MMAP not available because sessionId used.", __func__);
allowMMap = false;
}
@@ -163,7 +162,7 @@
audioStream = nullptr;
if (isMMap && allowLegacy) {
- ALOGD("build() MMAP stream did not open so try Legacy path");
+ ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
// If MMAP stream failed to open then TRY using a legacy stream.
result = builder_createStream(getDirection(), sharingMode,
false, &audioStream);
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index e6e7c8e..9b9744e 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -43,7 +43,7 @@
int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
mStorage = new uint8_t[bytesPerBuffer];
mStorageOwned = true;
- ALOGD("capacityInFrames = %d, bytesPerFrame = %d",
+ ALOGV("capacityInFrames = %d, bytesPerFrame = %d",
capacityInFrames, bytesPerFrame);
}
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index 3352b33..a6b9f5d 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -19,10 +19,12 @@
#include <utils/Log.h>
#include <stdint.h>
-#include <utils/String16.h>
+
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
#include <media/AudioTrack.h>
#include <media/AudioTimestamp.h>
-#include <aaudio/AAudio.h>
+#include <utils/String16.h>
#include "core/AudioStream.h"
#include "legacy/AudioStreamLegacy.h"
@@ -48,14 +50,17 @@
return AudioStreamLegacy_callback;
}
-aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer, int32_t numFrames) {
+aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
+ int32_t numFrames) {
+ void *finalAudioData = buffer;
if (getDirection() == AAUDIO_DIRECTION_INPUT) {
// Increment before because we already got the data from the device.
incrementFramesRead(numFrames);
+ finalAudioData = (void *) maybeConvertDeviceData(buffer, numFrames);
}
// Call using the AAudio callback interface.
- aaudio_data_callback_result_t callbackResult = maybeCallDataCallback(buffer, numFrames);
+ aaudio_data_callback_result_t callbackResult = maybeCallDataCallback(finalAudioData, numFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE
&& getDirection() == AAUDIO_DIRECTION_OUTPUT) {
@@ -67,15 +72,15 @@
// Implement FixedBlockProcessor
int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
- int32_t numFrames = numBytes / getBytesPerFrame();
+ int32_t numFrames = numBytes / getBytesPerDeviceFrame();
return (int32_t) callDataCallbackFrames(buffer, numFrames);
}
void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
aaudio_data_callback_result_t callbackResult;
- // This illegal size can be used to AudioFlinger to stop calling us.
+ // This illegal size can be used to tell AudioFlinger to stop calling us.
// This takes advantage of AudioFlinger killing the stream.
- // TODO need API change in AudioRecord and AudioTrack
+ // TODO add to API in AudioRecord and AudioTrack
const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
switch (opcode) {
@@ -100,7 +105,7 @@
// If the caller specified an exact size then use a block size adapter.
if (mBlockAdapter != nullptr) {
- int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+ int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
callbackResult = mBlockAdapter->processVariableBlock(
(uint8_t *) audioBuffer->raw, byteCount);
} else {
@@ -109,7 +114,7 @@
audioBuffer->frameCount);
}
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+ audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
} else { // STOP or invalid result
ALOGW("%s() callback requested stop, fake an error", __func__);
audioBuffer->size = SIZE_STOP_CALLBACKS;
@@ -179,19 +184,17 @@
int64_t localPosition;
status_t status = extendedTimestamp->getBestTimestamp(&localPosition, timeNanoseconds,
timebase, &location);
- // use MonotonicCounter to prevent retrograde motion.
- mTimestampPosition.update32((int32_t)localPosition);
- *framePosition = mTimestampPosition.get();
+ if (status == OK) {
+ // use MonotonicCounter to prevent retrograde motion.
+ mTimestampPosition.update32((int32_t) localPosition);
+ *framePosition = mTimestampPosition.get();
+ }
// ALOGD("getBestTimestamp() fposition: server = %6lld, kernel = %6lld, location = %d",
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_SERVER],
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_KERNEL],
// (int)location);
- if (status == WOULD_BLOCK) {
- return AAUDIO_ERROR_INVALID_STATE;
- } else {
- return AAudioConvert_androidToAAudioResult(status);
- }
+ return AAudioConvert_androidToAAudioResult(status);
}
void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 28158e2..1981ba3 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -19,13 +19,15 @@
#include <utils/Log.h>
#include <stdint.h>
-#include <utils/String16.h>
-#include <media/AudioRecord.h>
-#include <aaudio/AAudio.h>
-#include "AudioClock.h"
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
+#include <media/AudioRecord.h>
+#include <utils/String16.h>
+
#include "legacy/AudioStreamLegacy.h"
#include "legacy/AudioStreamRecord.h"
+#include "utility/AudioClock.h"
#include "utility/FixedBlockWriter.h"
using namespace android;
@@ -63,10 +65,6 @@
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
: builder.getBufferCapacity();
- // TODO implement an unspecified Android format then use that.
- audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
- ? AUDIO_FORMAT_PCM_FLOAT
- : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -82,6 +80,35 @@
break;
}
+ // Preserve behavior of API 26
+ if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
+ setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ }
+
+ // Maybe change device format to get a FAST path.
+ // AudioRecord does not support FAST mode for FLOAT data.
+ // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
+ // So IF the user asks for low latency FLOAT
+ // AND the sampleRate is likely to be compatible with FAST
+ // THEN request I16 and convert to FLOAT when passing to user.
+ // Note that hard coding 48000 Hz is not ideal because the sampleRate
+ // for a FAST path might not be 48000 Hz.
+ // It normally is but there is a chance that it is not.
+ // And there is no reliable way to know that in advance.
+ // Luckily the consequences of a wrong guess are minor.
+ // We just may not get a FAST track.
+ // But we wouldn't have anyway without this hack.
+ constexpr int32_t kMostLikelySampleRateForFast = 48000;
+ if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+ && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+ && (samplesPerFrame <= 2) // FAST only for mono and stereo
+ && (getSampleRate() == kMostLikelySampleRateForFast
+ || getSampleRate() == AAUDIO_UNSPECIFIED)) {
+ setDeviceFormat(AAUDIO_FORMAT_PCM_I16);
+ } else {
+ setDeviceFormat(getFormat());
+ }
+
uint32_t notificationFrames = 0;
// Setup the callback if there is one.
@@ -96,9 +123,6 @@
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
- ALOGD("open(), request notificationFrames = %u, frameCount = %u",
- notificationFrames, (uint)frameCount);
-
// Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
? AUDIO_PORT_HANDLE_NONE
@@ -120,39 +144,59 @@
aaudio_session_id_t requestedSessionId = builder.getSessionId();
audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
- mAudioRecord = new AudioRecord(
- mOpPackageName // const String16& opPackageName TODO does not compile
- );
- mAudioRecord->set(
- AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
- getSampleRate(),
- format,
- channelMask,
- frameCount,
- callback,
- callbackData,
- notificationFrames,
- false /*threadCanCallJava*/,
- sessionId,
- streamTransferType,
- flags,
- AUDIO_UID_INVALID, // DEFAULT uid
- -1, // DEFAULT pid
- &attributes,
- selectedDeviceId
- );
+ // ----------- open the AudioRecord ---------------------
+ // Might retry, but never more than once.
+ for (int i = 0; i < 2; i ++) {
+ audio_format_t requestedInternalFormat =
+ AAudioConvert_aaudioToAndroidDataFormat(getDeviceFormat());
- // Did we get a valid track?
- status_t status = mAudioRecord->initCheck();
- if (status != OK) {
- close();
- ALOGE("open(), initCheck() returned %d", status);
- return AAudioConvert_androidToAAudioResult(status);
+ mAudioRecord = new AudioRecord(
+ mOpPackageName // const String16& opPackageName TODO does not compile
+ );
+ mAudioRecord->set(
+ AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
+ getSampleRate(),
+ requestedInternalFormat,
+ channelMask,
+ frameCount,
+ callback,
+ callbackData,
+ notificationFrames,
+ false /*threadCanCallJava*/,
+ sessionId,
+ streamTransferType,
+ flags,
+ AUDIO_UID_INVALID, // DEFAULT uid
+ -1, // DEFAULT pid
+ &attributes,
+ selectedDeviceId
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioRecord->initCheck();
+ if (status != OK) {
+ close();
+ ALOGE("open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ // Check to see if it was worth hacking the deviceFormat.
+ bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
+ == AUDIO_INPUT_FLAG_FAST;
+ if (getFormat() != getDeviceFormat() && !gotFastPath) {
+ // We tried to get a FAST path by switching the device format.
+ // But it didn't work. So we might as well reopen using the same
+ // format for device and for app.
+ ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
+ mAudioRecord.clear();
+ setDeviceFormat(getFormat());
+ } else {
+ break; // Keep the one we just opened.
+ }
}
// Get the actual values from the AudioRecord.
setSamplesPerFrame(mAudioRecord->channelCount());
- setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
int32_t actualSampleRate = mAudioRecord->getSampleRate();
ALOGW_IF(actualSampleRate != getSampleRate(),
@@ -169,6 +213,29 @@
mBlockAdapter = nullptr;
}
+ // Allocate format conversion buffer if needed.
+ if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
+ && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+
+ if (builder.getDataCallbackProc() != nullptr) {
+ // If we have a callback then we need to convert the data into an internal float
+ // array and then pass that entire array to the app.
+ mFormatConversionBufferSizeInFrames =
+ (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
+ ? mCallbackBufferSize : getFramesPerBurst();
+ int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+ mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
+ } else {
+ // If we don't have a callback then we will read into an internal short array
+ // and then convert into the app float array in read().
+ mFormatConversionBufferSizeInFrames = getFramesPerBurst();
+ int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+ mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
+ }
+ ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
+ __func__, mFormatConversionBufferSizeInFrames);
+ }
+
// Update performance mode based on the actual stream.
// For example, if the sample rate does not match native then you won't get a FAST track.
audio_input_flags_t actualFlags = mAudioRecord->getFlags();
@@ -216,6 +283,24 @@
return AudioStream::close();
}
+const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ if (mFormatConversionBufferFloat.get() != nullptr) {
+ LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
+ "%s() conversion size %d too large for buffer %d",
+ __func__, numFrames, mFormatConversionBufferSizeInFrames);
+
+ int32_t numSamples = numFrames * getSamplesPerFrame();
+ // Only conversion supported is I16 to FLOAT
+ memcpy_to_float_from_i16(
+ mFormatConversionBufferFloat.get(),
+ (const int16_t *) audioData,
+ numSamples);
+ return mFormatConversionBufferFloat.get();
+ } else {
+ return audioData;
+ }
+}
+
void AudioStreamRecord::processCallback(int event, void *info) {
switch (event) {
case AudioRecord::EVENT_MORE_DATA:
@@ -302,9 +387,10 @@
int32_t numFrames,
int64_t timeoutNanoseconds)
{
- int32_t bytesPerFrame = getBytesPerFrame();
+ int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
int32_t numBytes;
- aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ // This will detect out of range values for numFrames.
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
if (result != AAUDIO_OK) {
return result;
}
@@ -315,19 +401,49 @@
// TODO add timeout to AudioRecord
bool blocking = (timeoutNanoseconds > 0);
- ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
- if (bytesRead == WOULD_BLOCK) {
+
+ ssize_t bytesActuallyRead = 0;
+ ssize_t totalBytesRead = 0;
+ if (mFormatConversionBufferI16.get() != nullptr) {
+ // Convert I16 data to float using an intermediate buffer.
+ float *floatBuffer = (float *) buffer;
+ int32_t framesLeft = numFrames;
+ // Perform conversion using multiple read()s if necessary.
+ while (framesLeft > 0) {
+ // Read into short internal buffer.
+ int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
+ size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
+ bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
+ if (bytesActuallyRead <= 0) {
+ break;
+ }
+ totalBytesRead += bytesActuallyRead;
+ int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
+ // Convert into app float buffer.
+ size_t numSamples = framesToConvert * getSamplesPerFrame();
+ memcpy_to_float_from_i16(
+ floatBuffer,
+ mFormatConversionBufferI16.get(),
+ numSamples);
+ floatBuffer += numSamples;
+ framesLeft -= framesToConvert;
+ }
+ } else {
+ bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
+ totalBytesRead = bytesActuallyRead;
+ }
+ if (bytesActuallyRead == WOULD_BLOCK) {
return 0;
- } else if (bytesRead < 0) {
- // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
- // AudioRecord invalidation
- if (bytesRead == DEAD_OBJECT) {
+ } else if (bytesActuallyRead < 0) {
+ // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioRecord invalidation.
+ if (bytesActuallyRead == DEAD_OBJECT) {
setState(AAUDIO_STREAM_STATE_DISCONNECTED);
return AAUDIO_ERROR_DISCONNECTED;
}
- return AAudioConvert_androidToAAudioResult(bytesRead);
+ return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
}
- int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
+ int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
incrementFramesRead(framesRead);
result = updateStateMachine();
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index c1723ba..2f41d34 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -76,6 +76,8 @@
return incrementFramesRead(frames);
}
+ const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) override;
+
private:
android::sp<android::AudioRecord> mAudioRecord;
// adapts between variable sized blocks and fixed size blocks
@@ -83,6 +85,11 @@
// TODO add 64-bit position reporting to AudioRecord and use it.
android::String16 mOpPackageName;
+
+ // Only one type of conversion buffer is used.
+ std::unique_ptr<float[]> mFormatConversionBufferFloat;
+ std::unique_ptr<int16_t[]> mFormatConversionBufferI16;
+ int32_t mFormatConversionBufferSizeInFrames = 0;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 023e8af..9653601 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -181,6 +181,7 @@
aaudio_format_t aaudioFormat =
AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
setFormat(aaudioFormat);
+ setDeviceFormat(aaudioFormat);
int32_t actualSampleRate = mAudioTrack->getSampleRate();
ALOGW_IF(actualSampleRate != getSampleRate(),
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 21d3fa6..a8369c2 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -1231,6 +1231,14 @@
ALOGW("dead IAudioRecord, creating a new one from %s()", from);
++mSequence;
+ const int INITIAL_RETRIES = 3;
+ int retries = INITIAL_RETRIES;
+retry:
+ if (retries < INITIAL_RETRIES) {
+ // refresh the audio configuration cache in this process to make sure we get new
+ // input parameters and new IAudioRecord in createRecord_l()
+ AudioSystem::clearAudioConfigCache();
+ }
mFlags = mOrigFlags;
// if the new IAudioRecord is created, createRecord_l() will modify the
@@ -1239,7 +1247,11 @@
Modulo<uint32_t> position(mProxy->getPosition());
mNewPosition = position + mUpdatePeriod;
status_t result = createRecord_l(position, mOpPackageName);
- if (result == NO_ERROR) {
+
+ if (result != NO_ERROR) {
+ ALOGW("%s(): createRecord_l failed, do not retry", __func__);
+ retries = 0;
+ } else {
if (mActive) {
// callback thread or sync event hasn't changed
// FIXME this fails if we have a new AudioFlinger instance
@@ -1248,6 +1260,14 @@
}
mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
}
+
+ if (result != NO_ERROR) {
+ ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+ if (--retries > 0) {
+ goto retry;
+ }
+ }
+
if (result != NO_ERROR) {
ALOGW("restoreRecord_l() failed status %d", result);
mActive = false;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 77cfe4d..8c9d3c1 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,6 +24,7 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
+#include <cutils/multiuser.h>
#include <media/TimeCheck.h>
#include <private/android_filesystem_config.h>
@@ -904,8 +905,7 @@
case SET_MIC_MUTE:
case SET_LOW_RAM_DEVICE:
case SYSTEM_READY: {
- uid_t multiUserClientUid = IPCThreadState::self()->getCallingUid() % AID_USER_OFFSET;
- if (multiUserClientUid >= AID_APP_START) {
+ if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index a49b2cb..35f9727 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -24,7 +24,7 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-
+#include <cutils/multiuser.h>
#include <media/AudioEffect.h>
#include <media/IAudioPolicyService.h>
#include <media/TimeCheck.h>
@@ -872,9 +872,10 @@
case INIT_STREAM_VOLUME:
case SET_STREAM_VOLUME:
case REGISTER_POLICY_MIXES:
- case SET_MASTER_MONO: {
- uid_t multiUserClientUid = IPCThreadState::self()->getCallingUid() % AID_USER_OFFSET;
- if (multiUserClientUid >= AID_APP_START) {
+ case SET_MASTER_MONO:
+ case START_AUDIO_SOURCE:
+ case STOP_AUDIO_SOURCE: {
+ if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
@@ -989,7 +990,7 @@
case GET_OUTPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_attributes_t attr;
+ audio_attributes_t attr = {};
bool hasAttributes = data.readInt32() != 0;
if (hasAttributes) {
data.read(&attr, sizeof(audio_attributes_t));
@@ -1058,7 +1059,7 @@
case GET_INPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_attributes_t attr;
+ audio_attributes_t attr = {};
data.read(&attr, sizeof(audio_attributes_t));
sanetizeAudioAttributes(&attr);
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
@@ -1160,8 +1161,11 @@
case GET_OUTPUT_FOR_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- effect_descriptor_t desc;
- data.read(&desc, sizeof(effect_descriptor_t));
+ effect_descriptor_t desc = {};
+ if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ android_errorWriteLog(0x534e4554, "73126106");
+ }
+ (void)sanitizeEffectDescriptor(&desc);
audio_io_handle_t output = getOutputForEffect(&desc);
reply->writeInt32(static_cast <int>(output));
return NO_ERROR;
@@ -1169,8 +1173,11 @@
case REGISTER_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- effect_descriptor_t desc;
- data.read(&desc, sizeof(effect_descriptor_t));
+ effect_descriptor_t desc = {};
+ if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ android_errorWriteLog(0x534e4554, "73126106");
+ }
+ (void)sanitizeEffectDescriptor(&desc);
audio_io_handle_t io = data.readInt32();
uint32_t strategy = data.readInt32();
audio_session_t session = (audio_session_t) data.readInt32();
@@ -1229,7 +1236,7 @@
count = AudioEffect::kMaxPreProcessing;
}
uint32_t retCount = count;
- effect_descriptor_t *descriptors = new effect_descriptor_t[count];
+ effect_descriptor_t *descriptors = new effect_descriptor_t[count]{};
status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
reply->writeInt32(status);
if (status != NO_ERROR && status != NO_MEMORY) {
@@ -1248,7 +1255,7 @@
case IS_OFFLOAD_SUPPORTED: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_offload_info_t info;
+ audio_offload_info_t info = {};
data.read(&info, sizeof(audio_offload_info_t));
bool isSupported = isOffloadSupported(info);
reply->writeInt32(isSupported);
@@ -1303,7 +1310,7 @@
case CREATE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_patch patch;
+ struct audio_patch patch = {};
data.read(&patch, sizeof(struct audio_patch));
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
@@ -1319,7 +1326,7 @@
case RELEASE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_patch_handle_t handle;
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
data.read(&handle, sizeof(audio_patch_handle_t));
status_t status = releaseAudioPatch(handle);
reply->writeInt32(status);
@@ -1358,8 +1365,9 @@
case SET_AUDIO_PORT_CONFIG: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_port_config config;
+ struct audio_port_config config = {};
data.read(&config, sizeof(struct audio_port_config));
+ (void)sanitizeAudioPortConfig(&config);
status_t status = setAudioPortConfig(&config);
reply->writeInt32(status);
return NO_ERROR;
@@ -1433,9 +1441,10 @@
case START_AUDIO_SOURCE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_port_config source;
+ struct audio_port_config source = {};
data.read(&source, sizeof(struct audio_port_config));
- audio_attributes_t attributes;
+ (void)sanitizeAudioPortConfig(&source);
+ audio_attributes_t attributes = {};
data.read(&attributes, sizeof(audio_attributes_t));
sanetizeAudioAttributes(&attributes);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
@@ -1488,6 +1497,14 @@
}
}
+/** returns true if string overflow was prevented by zero termination */
+template <size_t size>
+static bool preventStringOverflow(char (&s)[size]) {
+ if (strnlen(s, size) < size) return false;
+ s[size - 1] = '\0';
+ return true;
+}
+
void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
{
const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
@@ -1497,6 +1514,27 @@
attr->tags[tagsMaxSize - 1] = '\0';
}
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeEffectDescriptor(effect_descriptor_t* desc)
+{
+ if (preventStringOverflow(desc->name)
+ | /* always */ preventStringOverflow(desc->implementor)) {
+ android_errorWriteLog(0x534e4554, "73126106"); // SafetyNet logging
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeAudioPortConfig(struct audio_port_config* config)
+{
+ if (config->type == AUDIO_PORT_TYPE_DEVICE &&
+ preventStringOverflow(config->ext.device.address)) {
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 949d593..1114eeb 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -180,6 +180,8 @@
uint32_t flags = 0);
private:
void sanetizeAudioAttributes(audio_attributes_t* attr);
+ status_t sanitizeEffectDescriptor(effect_descriptor_t* desc);
+ status_t sanitizeAudioPortConfig(struct audio_port_config* config);
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
index 77df6b5..1c4be74 100644
--- a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
@@ -32,7 +32,8 @@
}
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0) {
+ if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
+ strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
return mHidlFactory->openDevice(name, device);
}
return mLocalFactory->openDevice(name, device);
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 0630285..e1c03f9 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -1966,7 +1966,8 @@
if (pContext->bEnabled == LVM_FALSE) {
if (pContext->SamplesToExitCount > 0) {
- pContext->SamplesToExitCount -= outBuffer->frameCount;
+ // signed - unsigned will trigger integer overflow if result becomes negative.
+ pContext->SamplesToExitCount -= (ssize_t)outBuffer->frameCount;
} else {
status = -ENODATA;
}
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3990e69..9d9ac8c 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -3,10 +3,12 @@
vendor_available: true,
export_include_dirs: ["include"],
header_libs:[
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
export_header_lib_headers: [
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
@@ -192,6 +194,14 @@
export_aidl_headers: true,
},
+ header_libs: [
+ "libstagefright_headers",
+ ],
+
+ export_header_lib_headers: [
+ "libstagefright_headers",
+ ],
+
shared_libs: [
"liblog",
"libcutils",
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index f725c97..214117b 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -166,15 +166,16 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
- sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly)
+ sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly, bool thumbnail)
{
- ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d)",
- index, colorFormat, metaOnly);
+ ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
Parcel data, reply;
data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt32(colorFormat);
data.writeInt32(metaOnly);
+ data.writeInt32(thumbnail);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
sendSchedPolicy(data);
#endif
@@ -356,12 +357,13 @@
int index = data.readInt32();
int colorFormat = data.readInt32();
bool metaOnly = (data.readInt32() != 0);
- ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
- index, colorFormat, metaOnly);
+ bool thumbnail = (data.readInt32() != 0);
+ ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d), thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
setSchedPolicy(data);
#endif
- sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly);
+ sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
if (bitmap != 0) { // Don't send NULL across the binder interface
reply->writeInt32(NO_ERROR);
reply->writeStrongBinder(IInterface::asBinder(bitmap));
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index 5491535..1a04552 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -45,7 +45,7 @@
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> getImageAtIndex(
- int index, int colorFormat, bool metaOnly) = 0;
+ int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 116b548..c45a964 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -46,7 +46,7 @@
virtual VideoFrame* getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
virtual VideoFrame* getImageAtIndex(
- int index, int colorFormat, bool metaOnly) = 0;
+ int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
virtual status_t getFrameAtIndex(
std::vector<VideoFrame*>* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
@@ -65,7 +65,7 @@
int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
{ return NULL; }
virtual VideoFrame* getImageAtIndex(
- int /*index*/, int /*colorFormat*/, bool /*metaOnly*/)
+ int /*index*/, int /*colorFormat*/, bool /*metaOnly*/, bool /*thumbnail*/)
{ return NULL; }
virtual status_t getFrameAtIndex(
std::vector<VideoFrame*>* /*frames*/,
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index b41da80..4cdeeb7 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -90,7 +90,7 @@
sp<IMemory> getFrameAtTime(int64_t timeUs, int option,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> getImageAtIndex(int index,
- int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+ int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
diff --git a/media/libmedia/include/media/omx/1.0/Conversion.h b/media/libmedia/include/media/omx/1.0/Conversion.h
index 94f2e8d..3700a23 100644
--- a/media/libmedia/include/media/omx/1.0/Conversion.h
+++ b/media/libmedia/include/media/omx/1.0/Conversion.h
@@ -20,6 +20,7 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
@@ -34,6 +35,8 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <gui/IGraphicBufferProducer.h>
#include <android/hardware/media/omx/1.0/types.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
@@ -197,26 +200,6 @@
}
/**
- * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
- * calls.
- *
- * \param[in] t The source `Return<Status>`.
- * \return The corresponding `status_t`.
- *
- * This function first check if \p t has a transport error. If it does, then the
- * return value is the transport error code. Otherwise, the return value is
- * converted from `Status` contained inside \p t.
- *
- * Note:
- * - This `Status` is omx-specific. It is defined in `types.hal`.
- * - The name of this function is not `convert`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
* \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
*
* \param[in] t The source `Return<void>`.
@@ -235,7 +218,47 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -246,7 +269,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 6a4204b..c10a907 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -155,15 +155,15 @@
}
sp<IMemory> MediaMetadataRetriever::getImageAtIndex(
- int index, int colorFormat, bool metaOnly) {
- ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d)",
- index, colorFormat, metaOnly);
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
Mutex::Autolock _l(mLock);
if (mRetriever == 0) {
ALOGE("retriever is not initialized");
return NULL;
}
- return mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+ return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
}
status_t MediaMetadataRetriever::getFrameAtIndex(
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 16ed530..3b3ac29 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -234,9 +234,9 @@
}
sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
- int index, int colorFormat, bool metaOnly) {
- ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d)",
- index, colorFormat, metaOnly);
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
+ ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
mThumbnail.clear();
@@ -244,7 +244,7 @@
ALOGE("retriever is not initialized");
return NULL;
}
- VideoFrame *frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+ VideoFrame *frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
if (frame == NULL) {
ALOGE("failed to extract image");
return NULL;
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index f71891a..e774c8f 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -53,7 +53,7 @@
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual sp<IMemory> getImageAtIndex(
- int index, int colorFormat, bool metaOnly);
+ int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual status_t getFrameAtIndex(
std::vector<sp<IMemory> > *frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 14ffb1d..0a1bdfe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1289,7 +1289,8 @@
ALOGV("Tear down audio with reason %d.", reason);
if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
// TimeoutWhenPaused is only for offload mode.
- ALOGW("Receive a stale message for teardown.");
+ ALOGW("Received a stale message for teardown, mPaused(%d), mOffloadAudio(%d)",
+ mPaused, mOffloadAudio);
break;
}
int64_t positionUs;
@@ -1789,6 +1790,8 @@
void NuPlayer::restartAudio(
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+ ALOGD("restartAudio timeUs(%lld), dontOffload(%d), createDecoder(%d)",
+ (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
mAudioDecoder.clear();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 63c887b..3e5bdd6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -669,6 +669,11 @@
notifyListener_l(MEDIA_STOPPED);
}
+ if (property_get_bool("persist.debug.sf.stats", false)) {
+ Vector<String16> args;
+ dump(-1, args);
+ }
+
mState = STATE_RESET_IN_PROGRESS;
mPlayer->resetAsync();
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 3d0aad1..a00d13a 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -42,30 +42,30 @@
static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
static const size_t kRetryCount = 20; // must be >0
-VideoFrame *FrameDecoder::allocVideoFrame(
- int32_t width, int32_t height, bool metaOnly) {
+//static
+VideoFrame *allocVideoFrame(const sp<MetaData> &trackMeta,
+ int32_t width, int32_t height, int32_t dstBpp, bool metaOnly = false) {
int32_t rotationAngle;
- if (!mTrackMeta->findInt32(kKeyRotation, &rotationAngle)) {
+ if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
}
-
uint32_t type;
const void *iccData;
size_t iccSize;
- if (!mTrackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
+ if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
iccData = NULL;
iccSize = 0;
}
int32_t sarWidth, sarHeight;
int32_t displayWidth, displayHeight;
- if (mTrackMeta->findInt32(kKeySARWidth, &sarWidth)
- && mTrackMeta->findInt32(kKeySARHeight, &sarHeight)
+ if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
+ && trackMeta->findInt32(kKeySARHeight, &sarHeight)
&& sarHeight != 0) {
displayWidth = (width * sarWidth) / sarHeight;
displayHeight = height;
- } else if (mTrackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
- && mTrackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
+ } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
+ && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
&& displayWidth > 0 && displayHeight > 0
&& width > 0 && height > 0) {
ALOGV("found display size %dx%d", displayWidth, displayHeight);
@@ -75,27 +75,66 @@
}
return new VideoFrame(width, height, displayWidth, displayHeight,
- rotationAngle, mDstBpp, !metaOnly, iccData, iccSize);
+ rotationAngle, dstBpp, !metaOnly, iccData, iccSize);
}
-bool FrameDecoder::setDstColorFormat(android_pixel_format_t colorFormat) {
+//static
+bool findThumbnailInfo(
+ const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
+ uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
+ uint32_t dummyType;
+ const void *dummyData;
+ size_t dummySize;
+ return trackMeta->findInt32(kKeyThumbnailWidth, width)
+ && trackMeta->findInt32(kKeyThumbnailHeight, height)
+ && trackMeta->findData(kKeyThumbnailHVCC,
+ type ?: &dummyType, data ?: &dummyData, size ?: &dummySize);
+}
+
+//static
+VideoFrame* FrameDecoder::getMetadataOnly(
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+ OMX_COLOR_FORMATTYPE dstFormat;
+ int32_t dstBpp;
+ if (!getDstColorFormat(
+ (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+ return NULL;
+ }
+
+ int32_t width, height;
+ if (thumbnail) {
+ if (!findThumbnailInfo(trackMeta, &width, &height)) {
+ return NULL;
+ }
+ } else {
+ CHECK(trackMeta->findInt32(kKeyWidth, &width));
+ CHECK(trackMeta->findInt32(kKeyHeight, &height));
+ }
+ return allocVideoFrame(trackMeta, width, height, dstBpp, true /*metaOnly*/);
+}
+
+//static
+bool FrameDecoder::getDstColorFormat(
+ android_pixel_format_t colorFormat,
+ OMX_COLOR_FORMATTYPE *dstFormat,
+ int32_t *dstBpp) {
switch (colorFormat) {
case HAL_PIXEL_FORMAT_RGB_565:
{
- mDstFormat = OMX_COLOR_Format16bitRGB565;
- mDstBpp = 2;
+ *dstFormat = OMX_COLOR_Format16bitRGB565;
+ *dstBpp = 2;
return true;
}
case HAL_PIXEL_FORMAT_RGBA_8888:
{
- mDstFormat = OMX_COLOR_Format32BitRGBA8888;
- mDstBpp = 4;
+ *dstFormat = OMX_COLOR_Format32BitRGBA8888;
+ *dstBpp = 4;
return true;
}
case HAL_PIXEL_FORMAT_BGRA_8888:
{
- mDstFormat = OMX_COLOR_Format32bitBGRA8888;
- mDstBpp = 4;
+ *dstFormat = OMX_COLOR_Format32bitBGRA8888;
+ *dstBpp = 4;
return true;
}
default:
@@ -108,18 +147,12 @@
}
VideoFrame* FrameDecoder::extractFrame(
- int64_t frameTimeUs, int option, int colorFormat, bool metaOnly) {
- if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+ int64_t frameTimeUs, int option, int colorFormat) {
+ if (!getDstColorFormat(
+ (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
return NULL;
}
- if (metaOnly) {
- int32_t width, height;
- CHECK(trackMeta()->findInt32(kKeyWidth, &width));
- CHECK(trackMeta()->findInt32(kKeyHeight, &height));
- return allocVideoFrame(width, height, true);
- }
-
status_t err = extractInternal(frameTimeUs, 1, option);
if (err != OK) {
return NULL;
@@ -131,7 +164,8 @@
status_t FrameDecoder::extractFrames(
int64_t frameTimeUs, size_t numFrames, int option, int colorFormat,
std::vector<VideoFrame*>* frames) {
- if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+ if (!getDstColorFormat(
+ (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
return ERROR_UNSUPPORTED;
}
@@ -435,9 +469,10 @@
}
VideoFrame *frame = allocVideoFrame(
+ trackMeta(),
(crop_right - crop_left + 1),
(crop_bottom - crop_top + 1),
- false /*metaOnly*/);
+ dstBpp());
addFrame(frame);
int32_t srcFormat;
@@ -466,28 +501,28 @@
int64_t frameTimeUs, size_t /*numFrames*/,
int /*seekMode*/, MediaSource::ReadOptions *options) {
sp<MetaData> overrideMeta;
+ mThumbnail = false;
if (frameTimeUs < 0) {
uint32_t type;
const void *data;
size_t size;
- int64_t thumbNailTime = 0;
- int32_t thumbnailWidth, thumbnailHeight;
+ int32_t thumbWidth, thumbHeight;
// if we have a stand-alone thumbnail, set up the override meta,
// and set seekTo time to -1.
- if (trackMeta()->findInt32(kKeyThumbnailWidth, &thumbnailWidth)
- && trackMeta()->findInt32(kKeyThumbnailHeight, &thumbnailHeight)
- && trackMeta()->findData(kKeyThumbnailHVCC, &type, &data, &size)){
- overrideMeta = new MetaData(*(trackMeta()));
- overrideMeta->remove(kKeyDisplayWidth);
- overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, thumbnailWidth);
- overrideMeta->setInt32(kKeyHeight, thumbnailHeight);
- overrideMeta->setData(kKeyHVCC, type, data, size);
- thumbNailTime = -1ll;
- ALOGV("thumbnail: %dx%d", thumbnailWidth, thumbnailHeight);
+ if (!findThumbnailInfo(trackMeta(),
+ &thumbWidth, &thumbHeight, &type, &data, &size)) {
+ ALOGE("Thumbnail not available");
+ return NULL;
}
- options->setSeekTo(thumbNailTime);
+ overrideMeta = new MetaData(*(trackMeta()));
+ overrideMeta->remove(kKeyDisplayWidth);
+ overrideMeta->remove(kKeyDisplayHeight);
+ overrideMeta->setInt32(kKeyWidth, thumbWidth);
+ overrideMeta->setInt32(kKeyHeight, thumbHeight);
+ overrideMeta->setData(kKeyHVCC, type, data, size);
+ options->setSeekTo(-1);
+ mThumbnail = true;
} else {
options->setSeekTo(frameTimeUs);
}
@@ -552,11 +587,16 @@
CHECK(outputFormat->findInt32("height", &height));
int32_t imageWidth, imageHeight;
- CHECK(trackMeta()->findInt32(kKeyWidth, &imageWidth));
- CHECK(trackMeta()->findInt32(kKeyHeight, &imageHeight));
+ if (mThumbnail) {
+ CHECK(trackMeta()->findInt32(kKeyThumbnailWidth, &imageWidth));
+ CHECK(trackMeta()->findInt32(kKeyThumbnailHeight, &imageHeight));
+ } else {
+ CHECK(trackMeta()->findInt32(kKeyWidth, &imageWidth));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &imageHeight));
+ }
if (mFrame == NULL) {
- mFrame = allocVideoFrame(imageWidth, imageHeight, false /*metaOnly*/);
+ mFrame = allocVideoFrame(trackMeta(), imageWidth, imageHeight, dstBpp());
addFrame(mFrame);
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 5ae5644..6ad6004 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -125,10 +125,10 @@
}
VideoFrame* StagefrightMetadataRetriever::getImageAtIndex(
- int index, int colorFormat, bool metaOnly) {
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
- ALOGV("getImageAtIndex: index: %d colorFormat: %d, metaOnly: %d",
- index, colorFormat, metaOnly);
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
if (mExtractor.get() == NULL) {
ALOGE("no extractor.");
@@ -163,6 +163,10 @@
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (metaOnly) {
+ return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
+ }
+
sp<IMediaSource> source = mExtractor->getTrack(i);
if (source.get() == NULL) {
@@ -190,7 +194,7 @@
const AString &componentName = matchingCodecs[i];
ImageDecoder decoder(componentName, trackMeta, source);
VideoFrame* frame = decoder.extractFrame(
- 0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat, metaOnly);
+ thumbnail ? -1 : 0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat);
if (frame != NULL) {
return frame;
@@ -265,6 +269,16 @@
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
i, MediaExtractor::kIncludeExtensiveMetaData);
+ if (metaOnly) {
+ if (outFrame != NULL) {
+ *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
+ if (*outFrame != NULL) {
+ return OK;
+ }
+ }
+ return UNKNOWN_ERROR;
+ }
+
sp<IMediaSource> source = mExtractor->getTrack(i);
if (source.get() == NULL) {
@@ -294,8 +308,7 @@
const AString &componentName = matchingCodecs[i];
VideoFrameDecoder decoder(componentName, trackMeta, source);
if (outFrame != NULL) {
- *outFrame = decoder.extractFrame(
- timeUs, option, colorFormat, metaOnly);
+ *outFrame = decoder.extractFrame(timeUs, option, colorFormat);
if (*outFrame != NULL) {
return OK;
}
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 03226c7..6819bba 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -129,6 +129,11 @@
numSamplesToUse = mNumSamples;
}
+ if ((period >> kPrecision) == 0 ) {
+ ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
+ return false;
+ }
+
int64_t sumX = 0;
int64_t sumXX = 0;
int64_t sumXY = 0;
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index 30008d9..37f9d50 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -16,8 +16,6 @@
cfi: true,
},
},
-
- shared_libs: ["libmedia"],
}
//###############################################################################
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index dfbe2cd..b67e928 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -44,11 +44,7 @@
mDstFormat(OMX_COLOR_Format16bitRGB565),
mDstBpp(2) {}
- VideoFrame* extractFrame(
- int64_t frameTimeUs,
- int option,
- int colorFormat,
- bool metaOnly);
+ VideoFrame* extractFrame(int64_t frameTimeUs, int option, int colorFormat);
status_t extractFrames(
int64_t frameTimeUs,
@@ -57,6 +53,9 @@
int colorFormat,
std::vector<VideoFrame*>* frames);
+ static VideoFrame* getMetadataOnly(
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
+
protected:
virtual ~FrameDecoder() {}
@@ -78,8 +77,6 @@
int64_t timeUs,
bool *done) = 0;
- VideoFrame *allocVideoFrame(int32_t width, int32_t height, bool metaOnly);
-
sp<MetaData> trackMeta() const { return mTrackMeta; }
OMX_COLOR_FORMATTYPE dstFormat() const { return mDstFormat; }
int32_t dstBpp() const { return mDstBpp; }
@@ -96,7 +93,11 @@
int32_t mDstBpp;
std::vector<std::unique_ptr<VideoFrame> > mFrames;
- bool setDstColorFormat(android_pixel_format_t colorFormat);
+ static bool getDstColorFormat(
+ android_pixel_format_t colorFormat,
+ OMX_COLOR_FORMATTYPE *dstFormat,
+ int32_t *dstBpp);
+
status_t extractInternal(int64_t frameTimeUs, size_t numFrames, int option);
DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
@@ -147,7 +148,8 @@
const sp<MetaData> &trackMeta,
const sp<IMediaSource> &source) :
FrameDecoder(componentName, trackMeta, source),
- mFrame(NULL), mGridRows(1), mGridCols(1), mTilesDecoded(0) {}
+ mFrame(NULL), mGridRows(1), mGridCols(1),
+ mTilesDecoded(0), mThumbnail(false) {}
protected:
virtual sp<AMessage> onGetFormatAndSeekOptions(
@@ -173,6 +175,7 @@
int32_t mGridRows;
int32_t mGridCols;
int32_t mTilesDecoded;
+ bool mThumbnail;
};
} // namespace android
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 58442fe..8443fbe 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -43,7 +43,7 @@
virtual VideoFrame* getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly);
virtual VideoFrame* getImageAtIndex(
- int index, int colorFormat, bool metaOnly);
+ int index, int colorFormat, bool metaOnly, bool thumbnail);
virtual status_t getFrameAtIndex(
std::vector<VideoFrame*>* frames,
int frameIndex, int numFrames, int colorFormat, bool metaOnly);
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
index 903a2b6..a9fce55 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
@@ -20,6 +20,7 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
@@ -35,6 +36,7 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/bqhelper/Conversion.h>
#include <android/hidl/memory/1.0/IMemory.h>
@@ -141,6 +143,37 @@
*/
/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
+}
+
+/**
* \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
* calls.
*
@@ -157,18 +190,7 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Status`.
- * \return the corresponding `status_t`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -179,7 +201,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 28524b0..fb56694 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -108,6 +108,7 @@
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
AMediaCodec_releaseCrypto; # introduced=28
+ AMediaCodec_releaseName; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
AMediaCodec_setAsyncNotifyCallback; # introduced=28
diff --git a/packages/MediaComponents/Android.mk b/packages/MediaComponents/Android.mk
index b0d8e7d..def9dc5 100644
--- a/packages/MediaComponents/Android.mk
+++ b/packages/MediaComponents/Android.mk
@@ -14,59 +14,59 @@
# limitations under the License.
#
-LOCAL_PATH := $(call my-dir)
-
-ifneq ($(TARGET_BUILD_PDK),true)
-# Build MediaComponents only if this is not a PDK build. MediaComponents won't
-# build in PDK builds because frameworks/base/core/java is not available but
-# IMediaSession2.aidl and IMediaController2.aidl are using classes from
-# frameworks/base/core/java.
-
-include $(CLEAR_VARS)
-
-LOCAL_PACKAGE_NAME := MediaComponents
-LOCAL_MODULE_OWNER := google
-
-# TODO: create a separate key for this package.
-LOCAL_CERTIFICATE := platform
-
-# TODO: Use System SDK once public APIs are approved
-# LOCAL_SDK_VERSION := system_current
-LOCAL_PRIVATE_PLATFORM_APIS := true
-
-LOCAL_SRC_FILES := \
- $(call all-java-files-under, src) \
- $(call all-Iaidl-files-under, src)
-
-LOCAL_PROGUARD_FLAG_FILES := proguard.cfg
-
-LOCAL_MULTILIB := first
-
-LOCAL_JAVA_LIBRARIES += android-support-annotations
-
-# To embed native libraries in package, uncomment the lines below.
-#LOCAL_MODULE_TAGS := samples
-#LOCAL_JNI_SHARED_LIBRARIES := \
-# libaacextractor \
-# libamrextractor \
-# libflacextractor \
-# libmidiextractor \
-# libmkvextractor \
-# libmp3extractor \
-# libmp4extractor \
-# libmpeg2extractor \
-# liboggextractor \
-# libwavextractor \
-
-# TODO: Remove dependency with other support libraries.
-LOCAL_STATIC_ANDROID_LIBRARIES += \
- android-support-v4 \
- android-support-v7-appcompat \
- android-support-v7-palette
-LOCAL_USE_AAPT2 := true
-
-include $(BUILD_PACKAGE)
-
-endif # ifneq ($(TARGET_BUILD_PDK),true)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
+# This package is excluded from build for now since APIs using this apk became hidden.
+#
+#LOCAL_PATH := $(call my-dir)
+#ifneq ($(TARGET_BUILD_PDK),true) # Build MediaComponents only if this is not a PDK build. MediaComponents won't
+## build in PDK builds because frameworks/base/core/java is not available but
+## IMediaSession2.aidl and IMediaController2.aidl are using classes from
+## frameworks/base/core/java.
+#
+#include $(CLEAR_VARS)
+#
+#LOCAL_PACKAGE_NAME := MediaComponents
+#LOCAL_MODULE_OWNER := google
+#
+## TODO: create a separate key for this package.
+#LOCAL_CERTIFICATE := platform
+#
+## TODO: Use System SDK once public APIs are approved
+## LOCAL_SDK_VERSION := system_current
+#LOCAL_PRIVATE_PLATFORM_APIS := true
+#
+#LOCAL_SRC_FILES := \
+# $(call all-java-files-under, src) \
+# $(call all-Iaidl-files-under, src)
+#
+#LOCAL_PROGUARD_FLAG_FILES := proguard.cfg
+#
+#LOCAL_MULTILIB := first
+#
+#LOCAL_JAVA_LIBRARIES += android-support-annotations
+#
+## To embed native libraries in package, uncomment the lines below.
+##LOCAL_MODULE_TAGS := samples
+##LOCAL_JNI_SHARED_LIBRARIES := \
+## libaacextractor \
+## libamrextractor \
+## libflacextractor \
+## libmidiextractor \
+## libmkvextractor \
+## libmp3extractor \
+## libmp4extractor \
+## libmpeg2extractor \
+## liboggextractor \
+## libwavextractor \
+#
+## TODO: Remove dependency with other support libraries.
+#LOCAL_STATIC_ANDROID_LIBRARIES += \
+# android-support-v4 \
+# android-support-v7-appcompat \
+# android-support-v7-palette
+#LOCAL_USE_AAPT2 := true
+#
+#include $(BUILD_PACKAGE)
+#
+#endif # ifneq ($(TARGET_BUILD_PDK),true)
+#
+#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/packages/MediaComponents/AndroidManifest.xml b/packages/MediaComponents/AndroidManifest.xml
index 061ae44..50fdca1 100644
--- a/packages/MediaComponents/AndroidManifest.xml
+++ b/packages/MediaComponents/AndroidManifest.xml
@@ -8,6 +8,7 @@
android:label="Media Components Update"
android:multiArch="true"
android:allowBackup="false"
+ android:hasCode="false"
android:extractNativeLibs="false">
</application>
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b38d37f..949b570 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -28,6 +28,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
+#include <cutils/multiuser.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
@@ -1000,14 +1001,12 @@
{
ALOGV("AudioFlinger::setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
- // TODO: Notify MmapThreads
-
AutoMutex lock(mLock);
for (size_t i = 0; i < mRecordThreads.size(); i++) {
- sp<RecordThread> thread = mRecordThreads.valueAt(i);
- if (thread != 0) {
- thread->setRecordSilenced(uid, silenced);
- }
+ mRecordThreads[i]->setRecordSilenced(uid, silenced);
+ }
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads[i]->setRecordSilenced(uid, silenced);
}
}
@@ -1180,16 +1179,59 @@
}
}
+// Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
+// Some keys are used for audio routing and audio path configuration and should be reserved for use
+// by audio policy and audio flinger for functional, privacy and security reasons.
+void AudioFlinger::filterReservedParameters(String8& keyValuePairs, uid_t callingUid)
+{
+ static const String8 kReservedParameters[] = {
+ String8(AudioParameter::keyRouting),
+ String8(AudioParameter::keySamplingRate),
+ String8(AudioParameter::keyFormat),
+ String8(AudioParameter::keyChannels),
+ String8(AudioParameter::keyFrameCount),
+ String8(AudioParameter::keyInputSource),
+ String8(AudioParameter::keyMonoOutput),
+ String8(AudioParameter::keyStreamConnect),
+ String8(AudioParameter::keyStreamDisconnect),
+ String8(AudioParameter::keyStreamSupportedFormats),
+ String8(AudioParameter::keyStreamSupportedChannels),
+ String8(AudioParameter::keyStreamSupportedSamplingRates),
+ };
+
+ // multiuser friendly app ID check for requests coming from audioserver
+ if (multiuser_get_app_id(callingUid) == AID_AUDIOSERVER) {
+ return;
+ }
+
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 value;
+ for (auto& key : kReservedParameters) {
+ if (param.get(key, value) == NO_ERROR) {
+ ALOGW("%s: filtering key %s value %s from uid %d",
+ __func__, key.string(), value.string(), callingUid);
+ param.remove(key);
+ }
+ }
+ keyValuePairs = param.toString();
+}
+
status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
- ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
- ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());
+ ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d calling uid %d",
+ ioHandle, keyValuePairs.string(),
+ IPCThreadState::self()->getCallingPid(), IPCThreadState::self()->getCallingUid());
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
+ String8 filteredKeyValuePairs = keyValuePairs;
+ filterReservedParameters(filteredKeyValuePairs, IPCThreadState::self()->getCallingUid());
+
+ ALOGV("%s: filtered keyvalue %s", __func__, filteredKeyValuePairs.string());
+
// AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
Mutex::Autolock _l(mLock);
@@ -1200,7 +1242,7 @@
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
- status_t result = dev->setParameters(keyValuePairs);
+ status_t result = dev->setParameters(filteredKeyValuePairs);
// return success if at least one audio device accepts the parameters as not all
// HALs are requested to support all parameters. If no audio device supports the
// requested parameters, the last error is reported.
@@ -1211,7 +1253,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
- AudioParameter param = AudioParameter(keyValuePairs);
+ AudioParameter param = AudioParameter(filteredKeyValuePairs);
String8 value;
if (param.get(String8(AudioParameter::keyBtNrec), value) == NO_ERROR) {
bool btNrecIsOff = (value == AudioParameter::valueOff);
@@ -1244,16 +1286,16 @@
}
} else if (thread == primaryPlaybackThread_l()) {
// indicate output device change to all input threads for pre processing
- AudioParameter param = AudioParameter(keyValuePairs);
+ AudioParameter param = AudioParameter(filteredKeyValuePairs);
int value;
if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
(value != 0)) {
- broacastParametersToRecordThreads_l(keyValuePairs);
+ broacastParametersToRecordThreads_l(filteredKeyValuePairs);
}
}
}
if (thread != 0) {
- return thread->setParameters(keyValuePairs);
+ return thread->setParameters(filteredKeyValuePairs);
}
return BAD_VALUE;
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0e2da4e..963a87d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,8 +19,11 @@
#define ANDROID_AUDIO_FLINGER_H
#include "Configuration.h"
+#include <atomic>
+#include <mutex>
#include <deque>
#include <map>
+#include <vector>
#include <stdint.h>
#include <sys/types.h>
#include <limits.h>
@@ -795,6 +798,8 @@
status_t checkStreamType(audio_stream_type_t stream) const;
+ void filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
+
#ifdef TEE_SINK
// all record threads serially share a common tee sink, which is re-created on format change
sp<NBAIO_Sink> mRecordTeeSink;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 979290f..dcf223c 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -591,6 +591,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
+ thread->isOutput() &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
// Older effects may require exact STEREO position mask.
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index a210a1b..6f546c3 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -43,6 +43,15 @@
static void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
+ // protected by MMapThread::mLock
+ void setSilenced_l(bool silenced) { mSilenced = silenced;
+ mSilencedNotified = false;}
+ // protected by MMapThread::mLock
+ bool isSilenced_l() const { return mSilenced; }
+ // protected by MMapThread::mLock
+ bool getAndSetSilencedNotified_l() { bool silencedNotified = mSilencedNotified;
+ mSilencedNotified = true;
+ return silencedNotified; }
private:
friend class MmapThread;
@@ -58,5 +67,7 @@
virtual void onTimestamp(const ExtendedTimestamp ×tamp);
pid_t mPid;
+ bool mSilenced; // protected by MMapThread::mLock
+ bool mSilencedNotified; // protected by MMapThread::mLock
}; // end of Track
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ea01a25..a78be99 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -93,6 +93,23 @@
const sp<media::VolumeShaper::Operation>& operation);
sp<media::VolumeShaper::State> getVolumeShaperState(int id);
sp<media::VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
+ /** Set the computed normalized final volume of the track.
+ * !masterMute * masterVolume * streamVolume * averageLRVolume */
+ void setFinalVolume(float volume);
+ float getFinalVolume() const { return mFinalVolume; }
+
+ /** @return true if the track has changed (metadata or volume) since
+ * the last time this function was called,
+ * true if this function was never called since the track creation,
+ * false otherwise.
+ * Thread safe.
+ */
+ bool readAndClearHasChanged() { return !mChangeNotified.test_and_set(); }
+
+ using SourceMetadatas = std::vector<playback_track_metadata_t>;
+ using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
+ /** Copy the track metadata in the provided iterator. Thread safe. */
+ virtual void copyMetadataTo(MetadataInserter& backInserter) const;
protected:
// for numerous
@@ -133,6 +150,8 @@
bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
void signalClientFlag(int32_t flag);
+ /** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
+ void setMetadataHasChanged() { mChangeNotified.clear(); }
public:
void triggerEvents(AudioSystem::sync_event_t type);
virtual void invalidate();
@@ -182,10 +201,13 @@
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
+ float mFinalVolume; // combine master volume, stream type volume and track volume
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
audio_output_flags_t mFlags;
+ // If the last track change was notified to the client with readAndClearHasChanged
+ std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
}; // end of Track
@@ -216,8 +238,11 @@
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
-private:
+ void copyMetadataTo(MetadataInserter& backInserter) const override;
+ /** Set the metadatas of the upstream tracks. Thread safe. */
+ void setMetadatas(const SourceMetadatas& metadatas);
+private:
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
uint32_t waitTimeMs);
void clearBufferQueue();
@@ -232,6 +257,20 @@
bool mActive;
DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
sp<AudioTrackClientProxy> mClientProxy;
+ /** Attributes of the source tracks.
+ *
+ * This member must be accessed with mTrackMetadatasMutex taken.
+ * There is one writer (duplicating thread) and one reader (downstream mixer).
+ *
+ * That means that the duplicating thread can block the downstream mixer
+ * thread and vice versa for the time of the copy.
+ * If this becomes an issue, the metadata could be stored in an atomic raw pointer,
+ * and a exchange with nullptr and delete can be used.
+ * Alternatively a read-copy-update might be implemented.
+ */
+ SourceMetadatas mTrackMetadatas;
+ /** Protects mTrackMetadatas against concurrent access. */
+ mutable std::mutex mTrackMetadatasMutex;
}; // end of OutputTrack
// playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 160aa45..1fa9e37 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2623,27 +2623,33 @@
void AudioFlinger::PlaybackThread::updateMetadata_l()
{
- // TODO: add volume support
- if (mOutput == nullptr || mOutput->stream == nullptr ||
- !mActiveTracks.readAndClearHasChanged()) {
- return;
+ if (mOutput == nullptr || mOutput->stream == nullptr ) {
+ return; // That should not happen
+ }
+ bool hasChanged = mActiveTracks.readAndClearHasChanged();
+ for (const sp<Track> &track : mActiveTracks) {
+ // Do not short-circuit as all hasChanged states must be reset
+ // as all the metadata are going to be sent
+ hasChanged |= track->readAndClearHasChanged();
+ }
+ if (!hasChanged) {
+ return; // nothing to do
}
StreamOutHalInterface::SourceMetadata metadata;
+ auto backInserter = std::back_inserter(metadata.tracks);
for (const sp<Track> &track : mActiveTracks) {
// No track is invalid as this is called after prepareTrack_l in the same critical section
- if (track->isOutputTrack()) {
- // TODO: OutputTrack (used for duplication) are currently not supported
- continue;
- }
- metadata.tracks.push_back({
- .usage = track->attributes().usage,
- .content_type = track->attributes().content_type,
- .gain = 1,
- });
+ track->copyMetadataTo(backInserter);
}
- mOutput->stream->updateSourceMetadata(metadata);
+ sendMetadataToBackend_l(metadata);
}
+void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
+{
+ mOutput->stream->updateSourceMetadata(metadata);
+};
+
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
{
if (halFrames == NULL || dspFrames == NULL) {
@@ -4381,13 +4387,19 @@
didModify = true;
// no acknowledgement required for newly active tracks
}
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
- track->mAudioTrackServerProxy->framesReleased()).first;
- track->mCachedVolume = masterVolume
+ proxy->framesReleased()).first;
+ float volume = masterVolume
* mStreamTypes[track->streamType()].volume
* vh;
+ track->mCachedVolume = volume;
+ gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+ float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
+ float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+ track->setFinalVolume((vlf + vrf) / 2.f);
++fastTracks;
} else {
// was it previously active?
@@ -4564,6 +4576,8 @@
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
+ track->setFinalVolume((vrf + vlf) / 2.f);
+
// Delegate volume control to effect in track effect chain if needed
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
// Do not ramp volume if volume is controlled by effect
@@ -4793,6 +4807,18 @@
track->reset();
}
+ // Track destruction may occur outside of threadLoop once it is removed from active tracks.
+ // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
+ // it ceases to be active, to allow safe removal from the AudioMixer at the start
+ // of prepareTracks_l(); this releases any outstanding buffer back to the track.
+ // See also the implementation of destroyTrack_l().
+ for (const auto &track : *tracksToRemove) {
+ const int name = track->name();
+ if (mAudioMixer->exists(name)) { // Normal tracks here, fast tracks in FastMixer.
+ mAudioMixer->setBufferProvider(name, nullptr /* bufferProvider */);
+ }
+ }
+
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
@@ -5096,6 +5122,7 @@
}
if (lastTrack) {
+ track->setFinalVolume((left + right) / 2.f);
if (left != mLeftVolFloat || right != mRightVolFloat) {
mLeftVolFloat = left;
mRightVolFloat = right;
@@ -6153,14 +6180,12 @@
return true;
}
-void AudioFlinger::DuplicatingThread::updateMetadata_l()
+void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
{
- // TODO: The duplicated track metadata needs to be pushed to downstream
- // but this information can be read at any time by the downstream threads.
- // Taking the lock of any downstream threads is no possible due to cross deadlock risks
- // (eg: during effect move).
- // A lock-free structure needs to be used to shared the metadata, probably an atomic
- // pointer to a metadata vector in each output tracks.
+ for (auto& outputTrack : outputTracks) { // not mOutputTracks
+ outputTrack->setMetadatas(metadata.tracks);
+ }
}
uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
@@ -7873,7 +7898,7 @@
mSessionId(AUDIO_SESSION_NONE),
mDeviceId(AUDIO_PORT_HANDLE_NONE), mPortId(AUDIO_PORT_HANDLE_NONE),
mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
- mActiveTracks(&this->mLocalLog)
+ mActiveTracks(&this->mLocalLog), mNoCallbackWarningCount(0)
{
mStandby = true;
readHalParameters_l();
@@ -7891,7 +7916,14 @@
void AudioFlinger::MmapThread::disconnect()
{
- for (const sp<MmapTrack> &t : mActiveTracks) {
+ ActiveTracks<MmapTrack> activeTracks;
+ {
+ Mutex::Autolock _l(mLock);
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ activeTracks.add(t);
+ }
+ }
+ for (const sp<MmapTrack> &t : activeTracks) {
stop(t->portId());
}
// This will decrement references and may cause the destruction of this thread.
@@ -7936,6 +7968,17 @@
return mHalStream->getMmapPosition(position);
}
+status_t AudioFlinger::MmapThread::exitStandby()
+{
+ status_t ret = mHalStream->start();
+ if (ret != NO_ERROR) {
+ ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+ return ret;
+ }
+ mStandby = false;
+ return NO_ERROR;
+}
+
status_t AudioFlinger::MmapThread::start(const AudioClient& client,
audio_port_handle_t *handle)
{
@@ -7949,13 +7992,7 @@
if (*handle == mPortId) {
// for the first track, reuse portId and session allocated when the stream was opened
- ret = mHalStream->start();
- if (ret != NO_ERROR) {
- ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
- return ret;
- }
- mStandby = false;
- return NO_ERROR;
+ return exitStandby();
}
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -8003,33 +8040,43 @@
return BAD_VALUE;
}
+ bool silenced = false;
if (isOutput()) {
ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
} else {
- // TODO: Block recording for idle UIDs (b/72134552)
- bool silenced;
ret = AudioSystem::startInput(portId, &silenced);
}
+ Mutex::Autolock _l(mLock);
// abort if start is rejected by audio policy manager
if (ret != NO_ERROR) {
ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
if (mActiveTracks.size() != 0) {
+ mLock.unlock();
if (isOutput()) {
AudioSystem::releaseOutput(mId, streamType(), mSessionId);
} else {
AudioSystem::releaseInput(portId);
}
+ mLock.lock();
} else {
mHalStream->stop();
}
return PERMISSION_DENIED;
}
+ if (!isOutput() && !silenced) {
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (track->isSilenced_l() && track->uid() != client.clientUid)
+ track->invalidate();
+ }
+ }
+
// Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
client.clientUid, client.clientPid, portId);
+ track->setSilenced_l(silenced);
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(mSessionId);
if (chain != 0) {
@@ -8059,6 +8106,8 @@
return NO_ERROR;
}
+ Mutex::Autolock _l(mLock);
+
sp<MmapTrack> track;
for (const sp<MmapTrack> &t : mActiveTracks) {
if (handle == t->portId()) {
@@ -8072,6 +8121,7 @@
mActiveTracks.remove(track);
+ mLock.unlock();
if (isOutput()) {
AudioSystem::stopOutput(mId, streamType(), track->sessionId());
AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
@@ -8079,6 +8129,7 @@
AudioSystem::stopInput(track->portId());
AudioSystem::releaseInput(track->portId());
}
+ mLock.lock();
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
@@ -8330,7 +8381,9 @@
sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
sp<MmapStreamCallback> callback = mCallback.promote();
if (mDeviceId != deviceId && callback != 0) {
+ mLock.unlock();
callback->onRoutingChanged(deviceId);
+ mLock.lock();
}
mDeviceId = deviceId;
}
@@ -8339,7 +8392,9 @@
sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
sp<MmapStreamCallback> callback = mCallback.promote();
if (mDeviceId != deviceId && callback != 0) {
+ mLock.unlock();
callback->onRoutingChanged(deviceId);
+ mLock.lock();
}
mDeviceId = deviceId;
}
@@ -8505,9 +8560,13 @@
if (track->isInvalid()) {
sp<MmapStreamCallback> callback = mCallback.promote();
if (callback != 0) {
- callback->onTearDown();
+ mLock.unlock();
+ callback->onTearDown(track->portId());
+ mLock.lock();
+ } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+ ALOGW("Could not notify MMAP stream tear down: no onTearDown callback!");
+ mNoCallbackWarningCount++;
}
- break;
}
}
}
@@ -8562,7 +8621,6 @@
mStreamVolume(1.0),
mStreamMute(false),
mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
- mNoCallbackWarningCount(0),
mOutput(output)
{
snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
@@ -8699,9 +8757,11 @@
for (int i = 0; i < channelCount; i++) {
values.add(volume);
}
- callback->onVolumeChanged(mChannelMask, values);
mHalVolFloat = volume; // SW volume control worked, so update value.
mNoCallbackWarningCount = 0;
+ mLock.unlock();
+ callback->onVolumeChanged(mChannelMask, values);
+ mLock.lock();
} else {
if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
ALOGW("Could not set MMAP stream volume: no volume callback!");
@@ -8767,6 +8827,12 @@
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
}
+status_t AudioFlinger::MmapCaptureThread::exitStandby()
+{
+ mInput->stream->setGain(1.0f);
+ return MmapThread::exitStandby();
+}
+
AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
{
Mutex::Autolock _l(mLock);
@@ -8775,6 +8841,34 @@
return input;
}
+
+void AudioFlinger::MmapCaptureThread::processVolume_l()
+{
+ bool changed = false;
+ bool silenced = false;
+
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback == 0) {
+ if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+ ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
+ mNoCallbackWarningCount++;
+ }
+ }
+
+ // After a change occurred in track silenced state, mute capture in audio DSP if at least one
+ // track is silenced and unmute otherwise
+ for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
+ if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
+ changed = true;
+ silenced = mActiveTracks[i]->isSilenced_l();
+ }
+ }
+
+ if (changed) {
+ mInput->stream->setGain(silenced ? 0.0f: 1.0f);
+ }
+}
+
void AudioFlinger::MmapCaptureThread::updateMetadata_l()
{
if (mInput == nullptr || mInput->stream == nullptr ||
@@ -8792,4 +8886,15 @@
mInput->stream->updateSinkMetadata(metadata);
}
+void AudioFlinger::MmapCaptureThread::setRecordSilenced(uid_t uid, bool silenced)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mActiveTracks.size() ; i++) {
+ if (mActiveTracks[i]->uid() == uid) {
+ mActiveTracks[i]->setSilenced_l(silenced);
+ broadcast_l();
+ }
+ }
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index bb81224..bc4a534 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -566,8 +566,8 @@
// periodically called in the threadLoop() to update power state uids.
void updatePowerState(sp<ThreadBase> thread, bool force = false);
- /** @return true if the active tracks have changed since the last time
- * this function was called or the vector was created. */
+ /** @return true if one or move active tracks was added or removed since the
+ * last time this function was called or the vector was created. */
bool readAndClearHasChanged();
private:
@@ -588,7 +588,7 @@
int mLastActiveTracksGeneration;
wp<T> mLatestActiveTrack; // latest track added to ActiveTracks
SimpleLog * const mLocalLog;
- // If the active tracks have changed since last call to readAndClearHasChanged
+ // If the vector has changed since last call to readAndClearHasChanged
bool mHasChanged = false;
};
@@ -927,7 +927,8 @@
void removeTrack_l(const sp<Track>& track);
void readOutputParameters_l();
- void updateMetadata_l() override;
+ void updateMetadata_l() final;
+ virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -1287,7 +1288,8 @@
void removeOutputTrack(MixerThread* thread);
uint32_t waitTimeMs() const { return mWaitTimeMs; }
- void updateMetadata_l() override;
+ void sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata) override;
protected:
virtual uint32_t activeSleepTimeUs() const;
@@ -1587,6 +1589,7 @@
virtual void threadLoop_exit();
virtual void threadLoop_standby();
virtual bool shouldStandby_l() { return false; }
+ virtual status_t exitStandby();
virtual status_t initCheck() const { return (mHalStream == 0) ? NO_INIT : NO_ERROR; }
virtual size_t frameCount() const { return mFrameCount; }
@@ -1619,6 +1622,9 @@
virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
+ // Sets the UID records silence
+ virtual void setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
+
void dump(int fd, const Vector<String16>& args);
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -1635,6 +1641,9 @@
sp<DeviceHalInterface> mHalDevice;
AudioHwDevice* const mAudioHwDev;
ActiveTracks<MmapTrack> mActiveTracks;
+
+ int32_t mNoCallbackWarningCount;
+ static constexpr int32_t kMaxNoCallbackWarnings = 5;
};
class MmapPlaybackThread : public MmapThread, public VolumeInterface
@@ -1668,7 +1677,7 @@
virtual audio_stream_type_t streamType() { return mStreamType; }
virtual void checkSilentMode_l();
- virtual void processVolume_l();
+ void processVolume_l() override;
virtual void dumpInternals(int fd, const Vector<String16>& args);
@@ -1684,8 +1693,6 @@
bool mMasterMute;
bool mStreamMute;
float mHalVolFloat;
- int32_t mNoCallbackWarningCount;
- static constexpr int32_t kMaxNoCallbackWarnings = 5;
AudioStreamOut* mOutput;
};
@@ -1700,9 +1707,12 @@
AudioStreamIn* clearInput();
+ status_t exitStandby() override;
virtual bool isOutput() const override { return false; }
void updateMetadata_l() override;
+ void processVolume_l() override;
+ void setRecordSilenced(uid_t uid, bool silenced) override;
protected:
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 236412b..aff1239 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -407,6 +407,9 @@
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
+ /* The track might not play immediately after being active, similarly as if its volume was 0.
+ * When the track starts playing, its volume will be computed. */
+ mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -997,6 +1000,23 @@
return mVolumeHandler->getVolumeShaperState(id);
}
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+{
+ if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
+ mFinalVolume = volume;
+ setMetadataHasChanged();
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ *backInserter++ = {
+ .usage = mAttr.usage,
+ .content_type = mAttr.content_type,
+ .gain = mFinalVolume,
+ };
+}
+
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
if (!isOffloaded() && !isDirect()) {
@@ -1427,6 +1447,21 @@
return outputBufferFull;
}
+void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
+ {
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ mTrackMetadatas = metadatas;
+ }
+ // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
+ setMetadataHasChanged();
+}
+
status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
@@ -1747,14 +1782,14 @@
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt\n");
+ result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt Sil\n");
}
void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
{
result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
"%08X %08X %6u "
- "%08X %6zu\n",
+ "%08X %6zu %3c\n",
isFastTrack() ? 'F' : ' ',
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
@@ -1767,7 +1802,8 @@
mSampleRate,
mCblk->mServer,
- mFrameCount
+ mFrameCount,
+ isSilenced() ? 's' : 'n'
);
}
@@ -1910,7 +1946,7 @@
sessionId, uid, false /* isOut */,
ALLOC_NONE,
TYPE_DEFAULT, portId),
- mPid(pid)
+ mPid(pid), mSilenced(false), mSilencedNotified(false)
{
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index caf3c02..09a86dd 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -52,7 +52,7 @@
void setGains(const AudioGainCollection &gains) { mGains = gains; }
const AudioGainCollection &getGains() const { return mGains; }
- void setFlags(uint32_t flags)
+ virtual void setFlags(uint32_t flags)
{
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 24fe7cb..67ac9bc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -43,6 +43,20 @@
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
+ // FIXME: this is needed because shared MMAP stream clients use the same audio session.
+ // Once capture clients are tracked individually and not per session this can be removed
+ // MMAP no IRQ input streams do not have the default limitation of one active client
+ // max as they can be used in shared mode by the same application.
+ // NOTE: this works for explicit values set in audio_policy_configuration.xml because
+ // flags are parsed before maxActiveCount by the serializer.
+ void setFlags(uint32_t flags) override
+ {
+ AudioPort::setFlags(flags);
+ if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+ maxActiveCount = 0;
+ }
+ }
+
// This method is used for input and direct output, and is not used for other output.
// If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
// For input, flags is interpreted as audio_input_flags_t.
@@ -55,7 +69,9 @@
audio_format_t *updatedFormat,
audio_channel_mask_t channelMask,
audio_channel_mask_t *updatedChannelMask,
- uint32_t flags) const;
+ // FIXME parameter type
+ uint32_t flags,
+ bool exactMatchRequiredForInputFlags = false) const;
void dump(int fd);
void log();
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 69dd06b..fbc2384 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -35,7 +35,9 @@
audio_format_t *updatedFormat,
audio_channel_mask_t channelMask,
audio_channel_mask_t *updatedChannelMask,
- uint32_t flags) const
+ // FIXME type punning here
+ uint32_t flags,
+ bool exactMatchRequiredForInputFlags) const
{
const bool isPlaybackThread =
getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
@@ -90,7 +92,7 @@
// An existing normal stream is compatible with a fast track request,
// but the fast request will be denied by AudioFlinger and converted to normal track.
if (isRecordThread && ((getFlags() ^ flags) &
- ~AUDIO_INPUT_FLAG_FAST)) {
+ ~(exactMatchRequiredForInputFlags ? AUDIO_INPUT_FLAG_NONE : AUDIO_INPUT_FLAG_FAST))) {
return false;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index ec861c1..ac3f1bc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -29,6 +29,13 @@
size_t nbCurvePoints = mCurvePoints.size();
// the volume index in the UI is relative to the min and max volume indices for this stream
int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+ if (indexInUi < volIndexMin) {
+ ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
+ indexInUi = volIndexMin;
+ } else if (indexInUi > volIndexMax) {
+ ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
+ indexInUi = volIndexMax;
+ }
int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
// Where would this volume index been inserted in the curve point
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index be7f7ec..83aec3b 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -266,7 +266,7 @@
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -409,7 +409,7 @@
// If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
// handleIncallSonification().
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -613,6 +613,23 @@
uint32_t device = AUDIO_DEVICE_NONE;
+ // when a call is active, force device selection to match source VOICE_COMMUNICATION
+ // for most other input sources to avoid rerouting call TX audio
+ if (isInCall()) {
+ switch (inputSource) {
+ case AUDIO_SOURCE_DEFAULT:
+ case AUDIO_SOURCE_MIC:
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ case AUDIO_SOURCE_UNPROCESSED:
+ case AUDIO_SOURCE_HOTWORD:
+ case AUDIO_SOURCE_CAMCORDER:
+ inputSource = AUDIO_SOURCE_VOICE_COMMUNICATION;
+ break;
+ default:
+ break;
+ }
+ }
+
switch (inputSource) {
case AUDIO_SOURCE_VOICE_UPLINK:
if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ee68900..a54a71f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1468,14 +1468,19 @@
}
// For MMAP mode, the first call to getInputForAttr() is made on behalf of audioflinger.
// The second call is for the first active client and sets the UID. Any further call
- // corresponds to a new client and is only permitted from the same UId.
+ // corresponds to a new client and is only permitted from the same UID.
+ // If the first UID is silenced, allow a new UID connection and replace with new UID
if (audioSession->openCount() == 1) {
audioSession->setUid(uid);
} else if (audioSession->uid() != uid) {
- ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
- uid, session, audioSession->uid());
- status = INVALID_OPERATION;
- goto error;
+ if (!audioSession->isSilenced()) {
+ ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
+ uid, session, audioSession->uid());
+ status = INVALID_OPERATION;
+ goto error;
+ }
+ audioSession->setUid(uid);
+ audioSession->setSilenced(false);
}
audioSession->changeOpenCount(1);
*inputType = API_INPUT_LEGACY;
@@ -1597,10 +1602,11 @@
// sampling rate and flags may be updated by getInputProfile
uint32_t profileSamplingRate = (config->sample_rate == 0) ?
SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
- audio_format_t profileFormat = config->format;
+ audio_format_t profileFormat;
audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
+ profileFormat = config->format; // reset each time through loop, in case it is updated
profile = getInputProfile(device, address,
profileSamplingRate, profileFormat, profileChannelMask,
profileFlags);
@@ -4530,10 +4536,13 @@
}
}
+ // If we are not in call and no client is active on this input, this methods returns
+ // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
- if (isInCall()) {
- device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
- } else if (source != AUDIO_SOURCE_DEFAULT) {
+ if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
+ source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+ }
+ if (source != AUDIO_SOURCE_DEFAULT) {
device = getDeviceAndMixForInputSource(source);
}
@@ -5049,21 +5058,46 @@
// TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
// the best matching profile, not the first one.
+ sp<IOProfile> firstInexact;
+ uint32_t updatedSamplingRate = 0;
+ audio_format_t updatedFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t updatedChannelMask = AUDIO_CHANNEL_INVALID;
for (const auto& hwModule : mHwModules) {
for (const auto& profile : hwModule->getInputProfiles()) {
// profile->log();
+ //updatedFormat = format;
if (profile->isCompatibleProfile(device, address, samplingRate,
- &samplingRate /*updatedSamplingRate*/,
+ &samplingRate /*updatedSamplingRate*/,
format,
- &format /*updatedFormat*/,
+ &format, /*updatedFormat*/
channelMask,
- &channelMask /*updatedChannelMask*/,
- (audio_output_flags_t) flags)) {
-
+ &channelMask /*updatedChannelMask*/,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ true /*exactMatchRequiredForInputFlags*/)) {
return profile;
}
+ if (firstInexact == nullptr && profile->isCompatibleProfile(device, address,
+ samplingRate,
+ &updatedSamplingRate,
+ format,
+ &updatedFormat,
+ channelMask,
+ &updatedChannelMask,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ false /*exactMatchRequiredForInputFlags*/)) {
+ firstInexact = profile;
+ }
+
}
}
+ if (firstInexact != nullptr) {
+ samplingRate = updatedSamplingRate;
+ format = updatedFormat;
+ channelMask = updatedChannelMask;
+ return firstInexact;
+ }
return NULL;
}
@@ -5117,7 +5151,8 @@
}
// in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) && isInCall()) {
+ if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
@@ -5127,8 +5162,11 @@
case AUDIO_STREAM_ENFORCED_AUDIBLE:
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
- const float maxVoiceVolDb = computeVolume(AUDIO_STREAM_VOICE_CALL, index, device)
- + IN_CALL_EARPIECE_HEADROOM_DB;
+ int voiceVolumeIndex =
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ const float maxVoiceVolDb =
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ + IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
stream, volumeDB, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 8f0c846..cf24c13 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -44,6 +44,7 @@
ALOGV("setDeviceConnectionState()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setDeviceConnectionState(device, state,
device_address, device_name);
}
@@ -55,6 +56,7 @@
if (mAudioPolicyManager == NULL) {
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getDeviceConnectionState(device,
device_address);
}
@@ -72,6 +74,7 @@
ALOGV("handleDeviceConfigChange()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
device_name);
}
@@ -94,10 +97,10 @@
// operation from policy manager standpoint (no other operation (e.g track start or stop)
// can be interleaved).
Mutex::Autolock _l(mLock);
-
// TODO: check if it is more appropriate to do it in platform specific policy manager
AudioSystem::setMode(state);
+ AutoCallerClear acc;
mAudioPolicyManager->setPhoneState(state);
mPhoneState = state;
return NO_ERROR;
@@ -126,6 +129,7 @@
}
ALOGV("setForceUse()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
mAudioPolicyManager->setForceUse(usage, config);
return NO_ERROR;
}
@@ -138,6 +142,7 @@
if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
return AUDIO_POLICY_FORCE_NONE;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getForceUse(usage);
}
@@ -151,6 +156,7 @@
}
ALOGV("getOutput()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getOutput(stream);
}
@@ -178,6 +184,7 @@
uid = callingUid;
}
audio_output_flags_t originalFlags = flags;
+ AutoCallerClear acc;
status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
config,
&flags, selectedDeviceId, portId);
@@ -223,6 +230,7 @@
}
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->startOutput(output, stream, session);
}
@@ -259,6 +267,7 @@
}
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->stopOutput(output, stream, session);
}
@@ -279,6 +288,7 @@
{
ALOGV("doReleaseOutput from tid %d", gettid());
Mutex::Autolock _l(mLock);
+ // called from internal thread: no need to clear caller identity
mAudioPolicyManager->releaseOutput(output, stream, session);
}
@@ -337,11 +347,14 @@
AudioPolicyInterface::input_type_t inputType;
Mutex::Autolock _l(mLock);
- // the audio_in_acoustics_t parameter is ignored by get_input()
- status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
- config,
- flags, selectedDeviceId,
- &inputType, portId);
+ {
+ AutoCallerClear acc;
+ // the audio_in_acoustics_t parameter is ignored by get_input()
+ status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
+ config,
+ flags, selectedDeviceId,
+ &inputType, portId);
+ }
audioPolicyEffects = mAudioPolicyEffects;
if (status == NO_ERROR) {
@@ -372,6 +385,7 @@
if (status != NO_ERROR) {
if (status == PERMISSION_DENIED) {
+ AutoCallerClear acc;
mAudioPolicyManager->releaseInput(*input, session);
}
return status;
@@ -425,8 +439,12 @@
AudioPolicyInterface::concurrency_type__mask_t concurrency =
AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE;
- status_t status = mAudioPolicyManager->startInput(
- client->input, client->session, *silenced, &concurrency);
+ status_t status;
+ {
+ AutoCallerClear acc;
+ status = mAudioPolicyManager->startInput(
+ client->input, client->session, *silenced, &concurrency);
+ }
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
@@ -461,7 +479,7 @@
// finish the recording app op
finishRecording(client->opPackageName, client->uid);
-
+ AutoCallerClear acc;
return mAudioPolicyManager->stopInput(client->input, client->session);
}
@@ -494,6 +512,7 @@
}
{
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
mAudioPolicyManager->releaseInput(client->input, client->session);
}
}
@@ -512,6 +531,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
return NO_ERROR;
}
@@ -530,6 +550,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setStreamVolumeIndex(stream,
index,
device);
@@ -546,6 +567,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getStreamVolumeIndex(stream,
index,
device);
@@ -559,6 +581,7 @@
if (mAudioPolicyManager == NULL) {
return 0;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getStrategyForStream(stream);
}
@@ -573,6 +596,7 @@
return AUDIO_DEVICE_NONE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getDevicesForStream(stream);
}
@@ -583,6 +607,7 @@
return 0;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getOutputForEffect(desc);
}
@@ -596,6 +621,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
}
@@ -605,6 +631,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->unregisterEffect(id);
}
@@ -614,6 +641,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setEffectEnabled(id, enabled);
}
@@ -626,6 +654,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isStreamActive(stream, inPastMs);
}
@@ -638,6 +667,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
}
@@ -647,6 +677,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isSourceActive(source);
}
@@ -680,6 +711,7 @@
Mutex::Autolock _l(mLock);
Mutex::Autolock _le(mEffectsLock); // isOffloadSupported queries for
// non-offloadable effects
+ AutoCallerClear acc;
return mAudioPolicyManager->isOffloadSupported(info);
}
@@ -693,7 +725,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->listAudioPorts(role, type, num_ports, ports, generation);
}
@@ -703,7 +735,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->getAudioPort(port);
}
@@ -717,6 +749,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->createAudioPatch(patch, handle,
IPCThreadState::self()->getCallingUid());
}
@@ -730,7 +763,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->releaseAudioPatch(handle,
IPCThreadState::self()->getCallingUid());
}
@@ -743,7 +776,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->listAudioPatches(num_patches, patches, generation);
}
@@ -756,7 +789,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->setAudioPortConfig(config);
}
@@ -768,7 +801,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->acquireSoundTriggerSession(session, ioHandle, device);
}
@@ -778,7 +811,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->releaseSoundTriggerSession(session);
}
@@ -791,6 +824,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ AutoCallerClear acc;
if (registration) {
return mAudioPolicyManager->registerPolicyMixes(mixes);
} else {
@@ -806,7 +840,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->startAudioSource(source, attributes, handle,
IPCThreadState::self()->getCallingUid());
}
@@ -817,7 +851,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->stopAudioSource(handle);
}
@@ -830,6 +864,7 @@
return PERMISSION_DENIED;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setMasterMono(mono);
}
@@ -839,6 +874,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getMasterMono(mono);
}
@@ -850,6 +886,7 @@
return NAN;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 082923a..f3cddc3 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -26,6 +26,7 @@
#include <sys/time.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
+#include <cutils/multiuser.h>
#include <cutils/properties.h>
#include <binder/IPCThreadState.h>
#include <binder/ActivityManager.h>
@@ -151,6 +152,7 @@
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
+ // called from binder death notification: no need to clear caller identity
mAudioPolicyManager->releaseResourcesForUid(uid);
}
}
@@ -273,7 +275,7 @@
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
const String8& regId, int32_t state)
{
- if (mAudioPolicyServiceClient != 0 && (mUid % AID_USER_OFFSET) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
}
}
@@ -283,7 +285,7 @@
const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
audio_patch_handle_t patchHandle)
{
- if (mAudioPolicyServiceClient != 0 && (mUid % AID_USER_OFFSET) < AID_APP_START) {
+ if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
clientConfig, deviceConfig, patchHandle);
}
@@ -335,6 +337,7 @@
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
+ AutoCallerClear acc;
mAudioPolicyManager->setRecordSilenced(uid, silenced);
}
}
@@ -575,7 +578,7 @@
}
bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
- return uid % AID_USER_OFFSET < AID_APP_START;
+ return multiuser_get_app_id(uid) < AID_APP_START;
}
void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index b3bc12b..d8dd797 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -640,6 +640,22 @@
bool isVirtualDevice; // uses vitual device: updated by APM::getInputForAttr()
};
+ // A class automatically clearing and restoring binder caller identity inside
+ // a code block (scoped variable)
+ // Declare one systematically before calling AudioPolicyManager methods so that they are
+ // executed with the same level of privilege as audioserver process.
+ class AutoCallerClear {
+ public:
+ AutoCallerClear() :
+ mToken(IPCThreadState::self()->clearCallingIdentity()) {}
+ ~AutoCallerClear() {
+ IPCThreadState::self()->restoreCallingIdentity(mToken);
+ }
+
+ private:
+ const int64_t mToken;
+ };
+
// Internal dump utilities.
status_t dumpPermissionDenial(int fd);
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 5b7571c..d505cfe 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -32,7 +32,6 @@
libmpeg2extractor \
liboggextractor \
libwavextractor \
- MediaComponents \
LOCAL_SRC_FILES := main_extractorservice.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 549a4e9..7264a9b 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -21,6 +21,8 @@
#include <assert.h>
#include <binder/IPCThreadState.h>
+#include <iomanip>
+#include <iostream>
#include <map>
#include <mutex>
#include <utils/Singleton.h>
@@ -39,7 +41,6 @@
: Singleton<AAudioClientTracker>() {
}
-
std::string AAudioClientTracker::dump() const {
std::stringstream result;
const bool isLocked = AAudio_tryUntilTrue(
@@ -198,7 +199,9 @@
result << " client: pid = " << mProcessId << " has " << mStreams.size() << " streams\n";
for (const auto& serviceStream : mStreams) {
- result << " stream: 0x" << std::hex << serviceStream->getHandle() << std::dec << "\n";
+ result << " stream: 0x" << std::setfill('0') << std::setw(8) << std::hex
+ << serviceStream->getHandle()
+ << std::dec << std::setfill(' ') << "\n";
}
if (isLocked) {
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 11fd9f6..ab8f4ed 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -67,11 +67,17 @@
result << "Exclusive MMAP Endpoints: " << mExclusiveStreams.size() << "\n";
index = 0;
- for (const auto &output : mExclusiveStreams) {
+ for (const auto &stream : mExclusiveStreams) {
result << " #" << index++ << ":";
- result << output->dump() << "\n";
+ result << stream->dump() << "\n";
}
+ result << " ExclusiveSearchCount: " << mExclusiveSearchCount << "\n";
+ result << " ExclusiveFoundCount: " << mExclusiveFoundCount << "\n";
+ result << " ExclusiveOpenCount: " << mExclusiveOpenCount << "\n";
+ result << " ExclusiveCloseCount: " << mExclusiveCloseCount << "\n";
+ result << "\n";
+
if (isExclusiveLocked) {
mExclusiveLock.unlock();
}
@@ -79,11 +85,17 @@
result << "Shared Endpoints: " << mSharedStreams.size() << "\n";
index = 0;
- for (const auto &input : mSharedStreams) {
+ for (const auto &stream : mSharedStreams) {
result << " #" << index++ << ":";
- result << input->dump() << "\n";
+ result << stream->dump() << "\n";
}
+ result << " SharedSearchCount: " << mSharedSearchCount << "\n";
+ result << " SharedFoundCount: " << mSharedFoundCount << "\n";
+ result << " SharedOpenCount: " << mSharedOpenCount << "\n";
+ result << " SharedCloseCount: " << mSharedCloseCount << "\n";
+ result << "\n";
+
if (isSharedLocked) {
mSharedLock.unlock();
}
@@ -95,8 +107,10 @@
sp<AAudioServiceEndpoint> AAudioEndpointManager::findExclusiveEndpoint_l(
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpoint> endpoint;
+ mExclusiveSearchCount++;
for (const auto ep : mExclusiveStreams) {
if (ep->matches(configuration)) {
+ mExclusiveFoundCount++;
endpoint = ep;
break;
}
@@ -111,8 +125,10 @@
sp<AAudioServiceEndpointShared> AAudioEndpointManager::findSharedEndpoint_l(
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpointShared> endpoint;
+ mSharedSearchCount++;
for (const auto ep : mSharedStreams) {
if (ep->matches(configuration)) {
+ mSharedFoundCount++;
endpoint = ep;
break;
}
@@ -146,21 +162,21 @@
// If we find an existing one then this one cannot be exclusive.
if (endpoint.get() != nullptr) {
- ALOGE("openExclusiveEndpoint() already in use");
+ ALOGW("openExclusiveEndpoint() already in use");
// Already open so do not allow a second stream.
return nullptr;
} else {
sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP();
- ALOGD("openExclusiveEndpoint(), no match so try to open MMAP %p for dev %d",
+ ALOGV("openExclusiveEndpoint(), no match so try to open MMAP %p for dev %d",
endpointMMap.get(), configuration.getDeviceId());
endpoint = endpointMMap;
aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
- ALOGE("openExclusiveEndpoint(), open failed");
endpoint.clear();
} else {
mExclusiveStreams.push_back(endpointMMap);
+ mExclusiveOpenCount++;
}
}
@@ -201,13 +217,13 @@
if (endpoint.get() != nullptr) {
aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
- ALOGE("%s(), open failed", __func__);
endpoint.clear();
} else {
mSharedStreams.push_back(endpoint);
+ mSharedOpenCount++;
}
}
- ALOGD("%s(), created endpoint %p, requested device = %d, dir = %d",
+ ALOGV("%s(), created endpoint %p, requested device = %d, dir = %d",
__func__, endpoint.get(), configuration.getDeviceId(), (int)direction);
IPCThreadState::self()->restoreCallingIdentity(token);
}
@@ -244,7 +260,8 @@
mExclusiveStreams.end());
serviceEndpoint->close();
- ALOGD("%s() %p for device %d",
+ mExclusiveCloseCount++;
+ ALOGV("%s() %p for device %d",
__func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
}
}
@@ -266,7 +283,8 @@
mSharedStreams.end());
serviceEndpoint->close();
- ALOGD("%s() %p for device %d",
+ mSharedCloseCount++;
+ ALOGV("%s() %p for device %d",
__func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
}
}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index f6aeb5a..193bdee 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -87,8 +87,17 @@
mutable std::mutex mExclusiveLock;
std::vector<android::sp<AAudioServiceEndpointMMAP>> mExclusiveStreams;
+ // Modified under a lock.
+ int32_t mExclusiveSearchCount = 0; // number of times we SEARCHED for an exclusive endpoint
+ int32_t mExclusiveFoundCount = 0; // number of times we FOUND an exclusive endpoint
+ int32_t mExclusiveOpenCount = 0; // number of times we OPENED an exclusive endpoint
+ int32_t mExclusiveCloseCount = 0; // number of times we CLOSED an exclusive endpoint
+ // Same as above but for SHARED endpoints.
+ int32_t mSharedSearchCount = 0;
+ int32_t mSharedFoundCount = 0;
+ int32_t mSharedOpenCount = 0;
+ int32_t mSharedCloseCount = 0;
};
-
} /* namespace aaudio */
#endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 52990da..5f1de76 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -84,6 +84,7 @@
const audio_content_type_t contentType =
AAudioConvert_contentTypeToInternal(getContentType());
+ // Usage only used for OUTPUT
const audio_usage_t usage = (direction == AAUDIO_DIRECTION_OUTPUT)
? AAudioConvert_usageToInternal(getUsage())
: AUDIO_USAGE_UNKNOWN;
@@ -343,8 +344,9 @@
}
-void AAudioServiceEndpointMMAP::onTearDown() {
+void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t handle __unused) {
ALOGD("%s(%p) called", __func__, this);
+ //TODO: disconnect only stream corresponding to handle received
disconnectRegisteredStreams();
};
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 16b6269..c4c943d 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -68,7 +68,7 @@
aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
// -------------- Callback functions for MmapStreamCallback ---------------------
- void onTearDown() override;
+ void onTearDown(audio_port_handle_t handle) override;
void onVolumeChanged(audio_channel_mask_t channels,
android::Vector<float> values) override;
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index f08a52f..63b9983 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,6 @@
setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
setDeviceId(mStreamInternal->getDeviceId());
setSessionId(mStreamInternal->getSessionId());
- ALOGD("open() deviceId = %d, sessionId = %d", getDeviceId(), getSessionId());
mFramesPerBurst = mStreamInternal->getFramesPerBurst();
return result;
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 75d88cf..864a008 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -122,19 +122,18 @@
aaudio_result_t result = AAudioServiceStreamBase::open(request, AAUDIO_SHARING_MODE_SHARED);
if (result != AAUDIO_OK) {
- ALOGE("open() returned %d", result);
+ ALOGE("%s() returned %d", __func__, result);
return result;
}
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
-
// Is the request compatible with the shared endpoint?
setFormat(configurationInput.getFormat());
if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
setFormat(AAUDIO_FORMAT_PCM_FLOAT);
} else if (getFormat() != AAUDIO_FORMAT_PCM_FLOAT) {
- ALOGE("open() mAudioFormat = %d, need FLOAT", getFormat());
+ ALOGD("%s() mAudioFormat = %d, need FLOAT", __func__, getFormat());
result = AAUDIO_ERROR_INVALID_FORMAT;
goto error;
}
@@ -143,8 +142,8 @@
if (getSampleRate() == AAUDIO_UNSPECIFIED) {
setSampleRate(mServiceEndpoint->getSampleRate());
} else if (getSampleRate() != mServiceEndpoint->getSampleRate()) {
- ALOGE("open() mSampleRate = %d, need %d",
- getSampleRate(), mServiceEndpoint->getSampleRate());
+ ALOGD("%s() mSampleRate = %d, need %d",
+ __func__, getSampleRate(), mServiceEndpoint->getSampleRate());
result = AAUDIO_ERROR_INVALID_RATE;
goto error;
}
@@ -153,8 +152,8 @@
if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
setSamplesPerFrame(mServiceEndpoint->getSamplesPerFrame());
} else if (getSamplesPerFrame() != mServiceEndpoint->getSamplesPerFrame()) {
- ALOGE("open() mSamplesPerFrame = %d, need %d",
- getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
+ ALOGD("%s() mSamplesPerFrame = %d, need %d",
+ __func__, getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
}
@@ -173,16 +172,13 @@
mAudioDataQueue = new SharedRingBuffer();
result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
if (result != AAUDIO_OK) {
- ALOGE("open() could not allocate FIFO with %d frames",
- getBufferCapacity());
+ ALOGE("%s() could not allocate FIFO with %d frames",
+ __func__, getBufferCapacity());
result = AAUDIO_ERROR_NO_MEMORY;
goto error;
}
}
- ALOGD("open() actual rate = %d, channels = %d, deviceId = %d",
- getSampleRate(), getSamplesPerFrame(), mServiceEndpoint->getDeviceId());
-
result = mServiceEndpoint->registerStream(keep);
if (result != AAUDIO_OK) {
goto error;
@@ -217,7 +213,7 @@
{
std::lock_guard<std::mutex> lock(mAudioDataQueueLock);
if (mAudioDataQueue == nullptr) {
- ALOGE("getAudioDataDescription(): mUpMessageQueue null! - stream not open");
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return AAUDIO_ERROR_NULL;
}
// Gather information on the data queue.
@@ -255,8 +251,8 @@
int64_t offset = mTimestampPositionOffset.load();
// TODO, do not go below starting value
position -= offset; // Offset from shared MMAP stream
- ALOGV("getHardwareTimestamp() %8lld = %8lld - %8lld",
- (long long) position, (long long) (position + offset), (long long) offset);
+ ALOGV("%s() %8lld = %8lld - %8lld",
+ __func__, (long long) position, (long long) (position + offset), (long long) offset);
}
*positionFrames = position;
return result;