Merge "Exclude MediaComponents from the build." into pi-dev
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index ed9534f..73ed8c3 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -55,7 +55,7 @@
status_t ClearKeyCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
@@ -83,7 +83,7 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
- uint64_t appData, CasPluginCallback callback)
+ void *appData, CasPluginCallback callback)
: mCallback(callback), mAppData(appData) {
ALOGV("CTOR");
}
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index b7134e4..42cfb8f 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -44,7 +44,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
@@ -62,7 +62,7 @@
class ClearKeyCasPlugin : public CasPlugin {
public:
- ClearKeyCasPlugin(uint64_t appData, CasPluginCallback callback);
+ ClearKeyCasPlugin(void *appData, CasPluginCallback callback);
virtual ~ClearKeyCasPlugin();
virtual status_t setPrivateData(
@@ -94,7 +94,7 @@
Mutex mKeyFetcherLock;
std::unique_ptr<KeyFetcher> mKeyFetcher;
CasPluginCallback mCallback;
- uint64_t mAppData;
+ void* mAppData;
};
class ClearKeyDescramblerPlugin : public DescramblerPlugin {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 06516b5..8404a83 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -49,7 +49,7 @@
status_t MockCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t /*appData*/,
+ void* /*appData*/,
CasPluginCallback /*callback*/,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 9632492..8106990 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -39,7 +39,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index b4fa3c5..ca119d5 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -60,6 +60,8 @@
volatile int32_t mRear; // written by producer (output: client, input: server)
volatile int32_t mFlush; // incremented by client to indicate a request to flush;
// server notices and discards all data between mFront and mRear
+ volatile int32_t mStop; // set by client to indicate a stop frame position; server
+ // will not read beyond this position until start is called.
volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
volatile uint32_t mUnderrunCount; // server increments for each underrun occurrence
};
@@ -335,6 +337,8 @@
mTimestamp.clear();
}
+ virtual void stop() { }; // called by client in AudioTrack::stop()
+
private:
// This is a copy of mCblk->mBufferSizeInFrames
uint32_t mBufferSizeInFrames; // effective size of the buffer
@@ -383,8 +387,14 @@
mPlaybackRateMutator.push(playbackRate);
}
+ // Sends flush and stop position information from the client to the server,
+ // used by streaming AudioTrack flush() or stop().
+ void sendStreamingFlushStop(bool flush);
+
virtual void flush();
+ void stop() override;
+
virtual uint32_t getUnderrunFrames() const {
return mCblk->u.mStreaming.mUnderrunFrames;
}
@@ -410,6 +420,8 @@
virtual void flush();
+ void stop() override;
+
#define MIN_LOOP 16 // minimum length of each loop iteration in frames
// setLoop(), setBufferPosition(), and setBufferPositionAndLoop() set the
@@ -532,6 +544,10 @@
// client will be notified via Futex
virtual void flushBufferIfNeeded();
+ // Returns the rear position of the AudioTrack shared ring buffer, limited by
+ // the stop frame position level.
+ virtual int32_t getRear() const = 0;
+
// Total count of the number of flushed frames since creation (never reset).
virtual int64_t framesFlushed() const { return mFlushed; }
@@ -607,10 +623,18 @@
return mDrained.load();
}
+ int32_t getRear() const override;
+
+ // Called on server side track start().
+ virtual void start();
+
private:
AudioPlaybackRate mPlaybackRate; // last observed playback rate
PlaybackRateQueue::Observer mPlaybackRateObserver;
+ // Last client stop-at position when start() was called. Used for streaming AudioTracks.
+ std::atomic<int32_t> mStopLast{0};
+
// The server keeps a copy here where it is safe from the client.
uint32_t mUnderrunCount; // echoed to mCblk
bool mUnderrunning; // used to detect edge of underrun
@@ -634,6 +658,10 @@
virtual void tallyUnderrunFrames(uint32_t frameCount);
virtual uint32_t getUnderrunFrames() const { return 0; }
+ int32_t getRear() const override;
+
+ void start() override { } // ignore for static tracks
+
private:
status_t updateStateWithLoop(StaticAudioTrackState *localState,
const StaticAudioTrackState &update) const;
@@ -661,6 +689,10 @@
size_t frameSize, bool clientInServer)
: ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
+ int32_t getRear() const override {
+ return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
+ }
+
protected:
virtual ~AudioRecordServerProxy() { }
};
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 9dc5f05..67ec244 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -18,6 +18,7 @@
#include <inttypes.h>
+#include <algorithm>
#include <vector>
#include <math.h>
@@ -61,8 +62,8 @@
const float* lensShadingMap) {
uint32_t activeAreaWidth = activeAreaRight - activeAreaLeft;
uint32_t activeAreaHeight = activeAreaBottom - activeAreaTop;
- double spacingV = 1.0 / lsmHeight;
- double spacingH = 1.0 / lsmWidth;
+ double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
+ double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
std::vector<float> redMapVector(lsmWidth * lsmHeight);
float *redMap = redMapVector.data();
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index e5ad2d9..c1ff34b 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -26,23 +26,22 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-// TODO support FLOAT
-#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16
#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
static const int FRAMES_PER_LINE = 20000;
int main(int argc, const char **argv)
{
- AAudioArgsParser argParser;
- aaudio_result_t result;
- AAudioSimpleRecorder recorder;
- int actualSamplesPerFrame;
- int actualSampleRate;
- aaudio_format_t actualDataFormat;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ AAudioStream *aaudioStream = nullptr;
- AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
+ aaudio_format_t actualDataFormat;
aaudio_stream_state_t state;
+
+ int32_t actualSamplesPerFrame;
+ int32_t actualSampleRate;
int32_t framesPerBurst = 0;
int32_t framesPerRead = 0;
int32_t framesToRecord = 0;
@@ -50,18 +49,18 @@
int32_t nextFrameCount = 0;
int32_t frameCount = 0;
int32_t xRunCount = 0;
- int64_t previousFramePosition = -1;
- int16_t *data = nullptr;
- float peakLevel = 0.0;
int32_t deviceId;
+ int16_t *shortData = nullptr;
+ float *floatData = nullptr;
+ float peakLevel = 0.0;
+
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Monitor input level using AAudio read, V0.1.2\n", argv[0]);
+ printf("%s - Monitor input level using AAudio read, V0.1.3\n", argv[0]);
- argParser.setFormat(REQUIRED_FORMAT);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
}
@@ -69,6 +68,7 @@
result = recorder.open(argParser);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto finish;
}
aaudioStream = recorder.getStream();
@@ -96,17 +96,18 @@
printf("DataFormat: framesPerRead = %d\n",framesPerRead);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n",
- REQUIRED_FORMAT, actualDataFormat);
- // TODO handle other data formats
- assert(actualDataFormat == REQUIRED_FORMAT);
// Allocate a buffer for the PCM_16 audio data.
- data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
- goto finish;
+ switch (actualDataFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ shortData = new int16_t[framesPerRead * actualSamplesPerFrame];
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ floatData = new float[framesPerRead * actualSamplesPerFrame];
+ break;
+ default:
+ fprintf(stderr, "UNEXPECTED FORMAT! %d", actualDataFormat);
+ goto finish;
}
// Start the stream.
@@ -126,7 +127,12 @@
// Read audio data from the stream.
const int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
- int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+ int actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_read(aaudioStream, shortData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_read(aaudioStream, floatData, minFrames, timeoutNanos);
+ }
if (actual < 0) {
fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
result = actual;
@@ -140,7 +146,12 @@
// Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
- float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ float sample = 0.0f;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ sample = shortData[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ sample = floatData[frameIndex * actualSamplesPerFrame];
+ }
if (sample > peakLevel) {
peakLevel = sample;
}
@@ -151,17 +162,15 @@
displayPeakLevel(peakLevel);
peakLevel = 0.0;
nextFrameCount += FRAMES_PER_LINE;
- }
- // Print timestamps.
- int64_t framePosition = 0;
- int64_t frameTime = 0;
- aaudio_result_t timeResult;
- timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
- &framePosition, &frameTime);
+ // Print timestamps.
+ int64_t framePosition = 0;
+ int64_t frameTime = 0;
+ aaudio_result_t timeResult;
+ timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+ &framePosition, &frameTime);
- if (timeResult == AAUDIO_OK) {
- if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
+ if (timeResult == AAUDIO_OK) {
int64_t realTime = getNanoseconds();
int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);
@@ -175,11 +184,15 @@
(long long) framePosition,
(long long) frameTime,
latencyMillis);
- previousFramePosition = framePosition;
+ } else {
+ printf("WARNING - AAudioStream_getTimestamp() returned %d\n", timeResult);
}
}
}
+ state = AAudioStream_getState(aaudioStream);
+ printf("after loop, state = %s\n", AAudio_convertStreamStateToText(state));
+
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
@@ -192,7 +205,8 @@
finish:
recorder.close();
- delete[] data;
+ delete[] shortData;
+ delete[] floatData;
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 893795b..d10f812 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -26,29 +26,39 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-#define NUM_SECONDS 5
-
-int main(int argc, char **argv)
+int main(int argc, const char **argv)
{
- (void)argc; // unused
- AAudioSimpleRecorder recorder;
- PeakTrackerData_t myData = {0.0};
- aaudio_result_t result;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ PeakTrackerData_t myData = {0.0};
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
aaudio_stream_state_t state;
+
+ int loopsNeeded = 0;
const int displayRateHz = 20; // arbitrary
- const int loopsNeeded = NUM_SECONDS * displayRateHz;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Display audio input using an AAudio callback, V0.1.2\n", argv[0]);
+ printf("%s - Display audio input using an AAudio callback, V0.1.3\n", argv[0]);
- result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
- SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
+ if (argParser.parseArgs(argc, argv)) {
+ return EXIT_FAILURE;
+ }
+
+ result = recorder.open(argParser,
+ SimpleRecorderDataCallbackProc,
+ SimpleRecorderErrorCallbackProc,
+ &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto error;
}
+ aaudioStream = recorder.getStream();
+ argParser.compareWithStream(aaudioStream);
+
printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
printf("recorder.getSamplesPerFrame() = %d\n", recorder.getSamplesPerFrame());
@@ -58,7 +68,9 @@
goto error;
}
- printf("Sleep for %d seconds while audio record in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio record in a callback thread.\n",
+ argParser.getDurationSeconds());
+ loopsNeeded = argParser.getDurationSeconds() * displayRateHz;
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -67,7 +79,7 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- result = AAudioStream_waitForStateChange(recorder.getStream(),
+ result = AAudioStream_waitForStateChange(aaudioStream,
AAUDIO_STREAM_STATE_CLOSED,
&state,
0);
@@ -93,7 +105,8 @@
goto error;
}
- printf("Sleep for %d seconds while audio records in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio records in a callback thread.\n",
+ argParser.getDurationSeconds());
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -102,13 +115,14 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- state = AAudioStream_getState(recorder.getStream());
+ state = AAudioStream_getState(aaudioStream);
if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
break;
}
}
printf("Woke up now.\n");
+ argParser.compareWithStream(aaudioStream);
result = recorder.stop();
if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 39d079e..026ff0f 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -151,8 +151,7 @@
static void MyErrorCallbackProc(
AAudioStream *stream __unused,
void *userData __unused,
- aaudio_result_t error)
-{
+ aaudio_result_t error) {
printf("Error Callback, error: %d\n",(int)error);
LoopbackData *myData = (LoopbackData *) userData;
myData->outputError = error;
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index eb6925a..88d7401 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -87,7 +87,6 @@
return;
}
-// TODO use this as a base class within AAudio
class AAudioParameters {
public:
@@ -262,6 +261,9 @@
case 'd':
setDeviceId(atoi(&arg[2]));
break;
+ case 'f':
+ setFormat(atoi(&arg[2]));
+ break;
case 'i':
setInputPreset(atoi(&arg[2]));
break;
@@ -326,6 +328,10 @@
printf(" -b{bufferCapacity} frames\n");
printf(" -c{channels} for example 2 for stereo\n");
printf(" -d{deviceId} default is %d\n", AAUDIO_UNSPECIFIED);
+ printf(" -f{0|1|2} set format\n");
+ printf(" 0 = UNSPECIFIED\n");
+ printf(" 1 = PCM_I16\n");
+ printf(" 2 = FLOAT\n");
printf(" -i{inputPreset} eg. 5 for AAUDIO_INPUT_PRESET_CAMCORDER\n");
printf(" -m{0|1|2|3} set MMAP policy\n");
printf(" 0 = _UNSPECIFIED, use aaudio.mmap_policy system property, default\n");
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 38e1e4c..8e33a31 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -57,7 +57,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using AAudio V0.1.2\n", argv[0]);
+ printf("%s - Play a sine wave using AAudio V0.1.3\n", argv[0]);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index e167773..e33e9f8 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -204,7 +204,7 @@
AAudioArgsParser::usage();
printf(" -l{count} loopCount start/stop, every other one is silent\n");
printf(" -t{msec} play a high pitched tone at the beginning\n");
- printf(" -f force periodic underruns by sleeping in callback\n");
+ printf(" -z force periodic underruns by sleeping in callback\n");
}
int main(int argc, const char **argv)
@@ -219,7 +219,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V0.1.3\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback V0.1.4\n", argv[0]);
for (int i = 1; i < argc; i++) {
const char *arg = argv[i];
@@ -234,8 +234,8 @@
case 't':
prefixToneMsec = atoi(&arg[2]);
break;
- case 'f':
- forceUnderruns = true;
+ case 'z':
+ forceUnderruns = true; // Zzzzzzz
break;
default:
usage();
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index e40a6cd..2207cb8c 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -146,6 +146,8 @@
* to make more refined volume or routing decisions.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
/**
@@ -220,6 +222,8 @@
* enforce audio focus.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
@@ -252,6 +256,8 @@
* configuration.
*
* Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
+ *
+ * Added in API level 28.
*/
enum {
/**
@@ -288,6 +294,8 @@
* Do not allocate a session ID.
* Effects cannot be used with this stream.
* Default.
+ *
+ * Added in API level 28.
*/
AAUDIO_SESSION_ID_NONE = -1,
@@ -297,6 +305,8 @@
* Note that the use of this flag may result in higher latency.
*
* Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
+ *
+ * Added in API level 28.
*/
AAUDIO_SESSION_ID_ALLOCATE = 0,
};
@@ -481,6 +491,8 @@
*
* The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param usage the desired usage, eg. AAUDIO_USAGE_GAME
*/
@@ -496,6 +508,8 @@
*
* The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
*/
@@ -514,6 +528,8 @@
* That is because VOICE_RECOGNITION is the preset with the lowest latency
* on many platforms.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param inputPreset the desired configuration for recording
*/
@@ -540,6 +556,8 @@
*
* Allocated session IDs will always be positive and nonzero.
*
+ * Added in API level 28.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
*/
@@ -1059,6 +1077,8 @@
*
* The sessionID for a stream should not change once the stream has been opened.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return session ID or AAUDIO_SESSION_ID_NONE
*/
@@ -1094,6 +1114,8 @@
/**
* Return the use case for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
*/
@@ -1102,6 +1124,8 @@
/**
* Return the content type for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
*/
@@ -1110,6 +1134,8 @@
/**
* Return the input preset for the stream.
*
+ * Added in API level 28.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
*/
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index 3352b33..8bbb9d9 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -179,19 +179,17 @@
int64_t localPosition;
status_t status = extendedTimestamp->getBestTimestamp(&localPosition, timeNanoseconds,
timebase, &location);
- // use MonotonicCounter to prevent retrograde motion.
- mTimestampPosition.update32((int32_t)localPosition);
- *framePosition = mTimestampPosition.get();
+ if (status == OK) {
+ // use MonotonicCounter to prevent retrograde motion.
+ mTimestampPosition.update32((int32_t) localPosition);
+ *framePosition = mTimestampPosition.get();
+ }
// ALOGD("getBestTimestamp() fposition: server = %6lld, kernel = %6lld, location = %d",
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_SERVER],
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_KERNEL],
// (int)location);
- if (status == WOULD_BLOCK) {
- return AAUDIO_ERROR_INVALID_STATE;
- } else {
- return AAudioConvert_androidToAAudioResult(status);
- }
+ return AAudioConvert_androidToAAudioResult(status);
}
void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index ac2e46e..86791c2 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -770,6 +770,7 @@
mReleased = 0;
}
+ mProxy->stop(); // notify server not to read beyond current client position until start().
mProxy->interrupt();
mAudioTrack->stop();
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 7bf4f99..b4c179d 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -393,19 +393,50 @@
// ---------------------------------------------------------------------------
-__attribute__((no_sanitize("integer")))
void AudioTrackClientProxy::flush()
{
+ sendStreamingFlushStop(true /* flush */);
+}
+
+void AudioTrackClientProxy::stop()
+{
+ sendStreamingFlushStop(false /* flush */);
+}
+
+// Sets the client-written mFlush and mStop positions, which control server behavior.
+//
+// @param flush indicates whether the operation is a flush or stop.
+// A client stop sets mStop to the current write position;
+// the server will not read past this point until start() or subsequent flush().
+// A client flush sets both mStop and mFlush to the current write position.
+// This advances the server read limit (if previously set) and on the next
+// server read advances the server read position to this limit.
+//
+void AudioTrackClientProxy::sendStreamingFlushStop(bool flush)
+{
+ // TODO: Replace this by 64 bit counters - avoids wrap complication.
// This works for mFrameCountP2 <= 2^30
- size_t increment = mFrameCountP2 << 1;
- size_t mask = increment - 1;
- audio_track_cblk_t* cblk = mCblk;
// mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
// Should newFlush = cblk->u.mStreaming.mRear? Only problem is
// if you want to flush twice to the same rear location after a 32 bit wrap.
- int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
- ((cblk->u.mStreaming.mFlush & ~mask) + increment);
- android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
+
+ const size_t increment = mFrameCountP2 << 1;
+ const size_t mask = increment - 1;
+ // No need for client atomic synchronization on mRear, mStop, mFlush
+ // as AudioTrack client only read/writes to them under client lock. Server only reads.
+ const int32_t rearMasked = mCblk->u.mStreaming.mRear & mask;
+
+ // update stop before flush so that the server front
+ // never advances beyond a (potential) previous stop's rear limit.
+ int32_t stopBits; // the following add can overflow
+ __builtin_add_overflow(mCblk->u.mStreaming.mStop & ~mask, increment, &stopBits);
+ android_atomic_release_store(rearMasked | stopBits, &mCblk->u.mStreaming.mStop);
+
+ if (flush) {
+ int32_t flushBits; // the following add can overflow
+ __builtin_add_overflow(mCblk->u.mStreaming.mFlush & ~mask, increment, &flushBits);
+ android_atomic_release_store(rearMasked | flushBits, &mCblk->u.mStreaming.mFlush);
+ }
}
bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -540,6 +571,11 @@
LOG_ALWAYS_FATAL("static flush");
}
+void StaticAudioTrackClientProxy::stop()
+{
+ ; // no special handling required for static tracks.
+}
+
void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
{
// This can only happen on a 64-bit client
@@ -638,6 +674,7 @@
if (flush != mFlush) {
ALOGV("ServerProxy::flushBufferIfNeeded() mStreaming.mFlush = 0x%x, mFlush = 0x%0x",
flush, mFlush);
+ // shouldn't matter, but for range safety use mRear instead of getRear().
int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
int32_t front = cblk->u.mStreaming.mFront;
@@ -677,6 +714,45 @@
}
__attribute__((no_sanitize("integer")))
+int32_t AudioTrackServerProxy::getRear() const
+{
+ const int32_t stop = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+ const int32_t rear = android_atomic_acquire_load(&mCblk->u.mStreaming.mRear);
+ const int32_t stopLast = mStopLast.load(std::memory_order_acquire);
+ if (stop != stopLast) {
+ const int32_t front = mCblk->u.mStreaming.mFront;
+ const size_t overflowBit = mFrameCountP2 << 1;
+ const size_t mask = overflowBit - 1;
+ int32_t newRear = (rear & ~mask) | (stop & mask);
+ ssize_t filled = newRear - front;
+ if (filled < 0) {
+ // front and rear offsets span the overflow bit of the p2 mask
+ // so rebasing newrear.
+ ALOGV("stop wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+ newRear += overflowBit;
+ filled += overflowBit;
+ }
+ if (0 <= filled && (size_t) filled <= mFrameCount) {
+ // we're stopped, return the stop level as newRear
+ return newRear;
+ }
+
+ // A corrupt stop. Log error and ignore.
+ ALOGE("mStopLast %#x -> stop %#x, front %#x, rear %#x, mask %#x, newRear %#x, "
+ "filled %zd=%#x",
+ stopLast, stop, front, rear,
+ (unsigned)mask, newRear, filled, (unsigned)filled);
+ // Don't reset mStopLast as this is const.
+ }
+ return rear;
+}
+
+void AudioTrackServerProxy::start()
+{
+ mStopLast = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+}
+
+__attribute__((no_sanitize("integer")))
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0,
@@ -693,7 +769,7 @@
// See notes on barriers at ClientProxy::obtainBuffer()
if (mIsOut) {
flushBufferIfNeeded(); // might modify mFront
- rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ rear = getRear();
front = cblk->u.mStreaming.mFront;
} else {
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -825,8 +901,7 @@
// FIXME should return an accurate value, but over-estimate is better than under-estimate
return mFrameCount;
}
- // the acquire might not be necessary since not doing a subsequent read
- int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ const int32_t rear = getRear();
ssize_t filled = rear - cblk->u.mStreaming.mFront;
// pipe should not already be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
@@ -852,7 +927,7 @@
if (flush != mFlush) {
return mFrameCount;
}
- const int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ const int32_t rear = getRear();
const ssize_t filled = rear - cblk->u.mStreaming.mFront;
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
return 0; // error condition, silently return 0.
@@ -1149,6 +1224,12 @@
}
}
+int32_t StaticAudioTrackServerProxy::getRear() const
+{
+ LOG_ALWAYS_FATAL("getRear() not permitted for static tracks");
+ return 0;
+}
+
// ---------------------------------------------------------------------------
} // namespace android
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 0630285..e1c03f9 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -1966,7 +1966,8 @@
if (pContext->bEnabled == LVM_FALSE) {
if (pContext->SamplesToExitCount > 0) {
- pContext->SamplesToExitCount -= outBuffer->frameCount;
+ // signed - unsigned will trigger integer overflow if result becomes negative.
+ pContext->SamplesToExitCount -= (ssize_t)outBuffer->frameCount;
} else {
status = -ENODATA;
}
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3990e69..9d9ac8c 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -3,10 +3,12 @@
vendor_available: true,
export_include_dirs: ["include"],
header_libs:[
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
export_header_lib_headers: [
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
@@ -192,6 +194,14 @@
export_aidl_headers: true,
},
+ header_libs: [
+ "libstagefright_headers",
+ ],
+
+ export_header_lib_headers: [
+ "libstagefright_headers",
+ ],
+
shared_libs: [
"liblog",
"libcutils",
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 3511253..b41da80 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -66,6 +66,8 @@
METADATA_KEY_IMAGE_HEIGHT = 30,
METADATA_KEY_IMAGE_ROTATION = 31,
METADATA_KEY_VIDEO_FRAME_COUNT = 32,
+ METADATA_KEY_EXIF_OFFSET = 33,
+ METADATA_KEY_EXIF_LENGTH = 34,
// Add more here...
};
diff --git a/media/libmedia/include/media/omx/1.0/Conversion.h b/media/libmedia/include/media/omx/1.0/Conversion.h
index 94f2e8d..3700a23 100644
--- a/media/libmedia/include/media/omx/1.0/Conversion.h
+++ b/media/libmedia/include/media/omx/1.0/Conversion.h
@@ -20,6 +20,7 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
@@ -34,6 +35,8 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <gui/IGraphicBufferProducer.h>
#include <android/hardware/media/omx/1.0/types.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
@@ -197,26 +200,6 @@
}
/**
- * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
- * calls.
- *
- * \param[in] t The source `Return<Status>`.
- * \return The corresponding `status_t`.
- *
- * This function first check if \p t has a transport error. If it does, then the
- * return value is the transport error code. Otherwise, the return value is
- * converted from `Status` contained inside \p t.
- *
- * Note:
- * - This `Status` is omx-specific. It is defined in `types.hal`.
- * - The name of this function is not `convert`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
* \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
*
* \param[in] t The source `Return<void>`.
@@ -235,7 +218,47 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -246,7 +269,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 14ffb1d..0a1bdfe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1289,7 +1289,8 @@
ALOGV("Tear down audio with reason %d.", reason);
if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
// TimeoutWhenPaused is only for offload mode.
- ALOGW("Receive a stale message for teardown.");
+ ALOGW("Received a stale message for teardown, mPaused(%d), mOffloadAudio(%d)",
+ mPaused, mOffloadAudio);
break;
}
int64_t positionUs;
@@ -1789,6 +1790,8 @@
void NuPlayer::restartAudio(
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+ ALOGD("restartAudio timeUs(%lld), dontOffload(%d), createDecoder(%d)",
+ (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
mAudioDecoder.clear();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 63c887b..3e5bdd6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -669,6 +669,11 @@
notifyListener_l(MEDIA_STOPPED);
}
+ if (property_get_bool("persist.debug.sf.stats", false)) {
+ Vector<String16> args;
+ dump(-1, args);
+ }
+
mState = STATE_RESET_IN_PROGRESS;
mPlayer->resetAsync();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index cc7f688..a762e76 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1617,14 +1617,7 @@
// internal buffer before resuming playback.
// FIXME: this is ignored after flush().
mAudioSink->stop();
- if (mPaused) {
- // Race condition: if renderer is paused and audio sink is stopped,
- // we need to make sure that the audio track buffer fully drains
- // before delivering data.
- // FIXME: remove this if we can detect if stop() is complete.
- const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
- mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
- } else {
+ if (!mPaused) {
mAudioSink->start();
}
mNumFramesWritten = 0;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a8c6d15..3bbba49 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3291,6 +3291,22 @@
return err;
}
+ if (compressionFormat == OMX_VIDEO_CodingHEVC) {
+ int32_t profile;
+ if (msg->findInt32("profile", &profile)) {
+ // verify if Main10 profile is supported at all, and fail
+ // immediately if it's not supported.
+ if (profile == OMX_VIDEO_HEVCProfileMain10 ||
+ profile == OMX_VIDEO_HEVCProfileMain10HDR10) {
+ err = verifySupportForProfileAndLevel(
+ kPortIndexInput, profile, 0);
+ if (err != OK) {
+ return err;
+ }
+ }
+ }
+ }
+
if (compressionFormat == OMX_VIDEO_CodingVP9) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
InitOMXParams(¶ms);
@@ -4059,7 +4075,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4131,7 +4147,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4266,7 +4282,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4280,7 +4296,7 @@
// Use largest supported profile for AVC recording if profile is not specified.
for (OMX_VIDEO_AVCPROFILETYPE profile : {
OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCProfileMain }) {
- if (verifySupportForProfileAndLevel(profile, 0) == OK) {
+ if (verifySupportForProfileAndLevel(kPortIndexOutput, profile, 0) == OK) {
h264type.eProfile = profile;
break;
}
@@ -4457,7 +4473,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
}
@@ -4602,10 +4618,10 @@
}
status_t ACodec::verifySupportForProfileAndLevel(
- int32_t profile, int32_t level) {
+ OMX_U32 portIndex, int32_t profile, int32_t level) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
InitOMXParams(¶ms);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
params.nProfileIndex = index;
@@ -4906,8 +4922,8 @@
rect.nHeight = videoDef->nFrameHeight;
}
- if (rect.nLeft < 0 ||
- rect.nTop < 0 ||
+ if (rect.nLeft < 0 || rect.nTop < 0 ||
+ rect.nWidth == 0 || rect.nHeight == 0 ||
rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
ALOGE("Wrong cropped rect (%d, %d, %u, %u) vs. frame (%u, %u)",
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 91deca5..f152a38 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -162,6 +162,8 @@
reader.skipBits(1);
// Skip vps_max_layers_minus_1
reader.skipBits(6);
+ // Skip vps_max_sub_layers_minus1
+ reader.skipBits(3);
// Skip vps_temporal_id_nesting_flags
reader.skipBits(1);
// Skip reserved
@@ -422,7 +424,7 @@
uint8_t *header = hvcc;
header[0] = 1;
- header[1] = (kGeneralProfileSpace << 6) | (kGeneralTierFlag << 5) | kGeneralProfileIdc;
+ header[1] = (generalProfileSpace << 6) | (generalTierFlag << 5) | generalProfileIdc;
header[2] = (compatibilityFlags >> 24) & 0xff;
header[3] = (compatibilityFlags >> 16) & 0xff;
header[4] = (compatibilityFlags >> 8) & 0xff;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index b874df4..f25d1f1 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -82,7 +82,6 @@
// NB: These are not yet exposed as public Java API constants.
static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
-static const char *kCodecBytesIn = "android.media.mediacodec.bytesin"; /* 0..n */
static const char *kCodecProfile = "android.media.mediacodec.profile"; /* 0..n */
static const char *kCodecLevel = "android.media.mediacodec.level"; /* 0..n */
static const char *kCodecMaxWidth = "android.media.mediacodec.maxwidth"; /* 0..n */
@@ -3202,10 +3201,6 @@
info->mData.clear();
statsBufferSent(timeUs);
-
- if (mAnalyticsItem != NULL) {
- mAnalyticsItem->addInt64(kCodecBytesIn, size);
- }
}
return err;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 179e0e6..5ae5644 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -438,6 +438,15 @@
mMetaData.add(METADATA_KEY_CAPTURE_FRAMERATE, String8(tmp));
}
+ int64_t exifOffset, exifSize;
+ if (meta->findInt64(kKeyExifOffset, &exifOffset)
+ && meta->findInt64(kKeyExifSize, &exifSize)) {
+ sprintf(tmp, "%lld", (long long)exifOffset);
+ mMetaData.add(METADATA_KEY_EXIF_OFFSET, String8(tmp));
+ sprintf(tmp, "%lld", (long long)exifSize);
+ mMetaData.add(METADATA_KEY_EXIF_LENGTH, String8(tmp));
+ }
+
bool hasAudio = false;
bool hasVideo = false;
int32_t videoWidth = -1;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 0c6e988..c61f4b5 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -303,6 +303,8 @@
const static ALookup<uint8_t, OMX_VIDEO_HEVCPROFILETYPE> profiles {
{ 1, OMX_VIDEO_HEVCProfileMain },
{ 2, OMX_VIDEO_HEVCProfileMain10 },
+ // use Main for Main Still Picture decoding
+ { 3, OMX_VIDEO_HEVCProfileMain },
};
// set profile & level if they are recognized
@@ -310,6 +312,7 @@
OMX_VIDEO_HEVCLEVELTYPE codecLevel;
if (!profiles.map(profile, &codecProfile)) {
if (ptr[2] & 0x40 /* general compatibility flag 1 */) {
+ // Note that this case covers Main Still Picture too
codecProfile = OMX_VIDEO_HEVCProfileMain;
} else if (ptr[2] & 0x20 /* general compatibility flag 2 */) {
codecProfile = OMX_VIDEO_HEVCProfileMain10;
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 03226c7..6819bba 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -129,6 +129,11 @@
numSamplesToUse = mNumSamples;
}
+ if ((period >> kPrecision) == 0 ) {
+ ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
+ return false;
+ }
+
int64_t sumX = 0;
int64_t sumXX = 0;
int64_t sumXY = 0;
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
index 13b6d05..2c0f224 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
@@ -302,7 +302,7 @@
List<BufferInfo *> &outQueue = getPortQueue(1);
ALOGV("onQueueFilled %d/%d:", inQueue.empty(), outQueue.empty());
- while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty()) {
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty() && !mFinishedDecoder) {
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
short *outBuffer = reinterpret_cast<short *>(outHeader->pBuffer + outHeader->nOffset);
@@ -318,6 +318,21 @@
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
ALOGV("saw EOS");
mSawInputEOS = true;
+ if (mInputBufferCount == 0 && inHeader->nFilledLen == 0) {
+ // first buffer was empty and EOS: signal EOS on output and return
+ ALOGV("empty first EOS");
+ outHeader->nFilledLen = 0;
+ outHeader->nTimeStamp = inHeader->nTimeStamp;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ notifyFillBufferDone(outHeader);
+ mFinishedDecoder = true;
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+ return;
+ }
}
if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
@@ -377,7 +392,7 @@
ALOGV("no output, trying again");
continue;
}
- } else if (mSawInputEOS && !mFinishedDecoder) {
+ } else if (mSawInputEOS) {
status_t decoderErr = mFLACDecoder->decodeOneFrame(NULL, 0, outBuffer, &outBufferSize);
mFinishedDecoder = true;
if (decoderErr != OK) {
@@ -388,10 +403,8 @@
}
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
} else {
- ALOGE("no input buffer but did not get EOS");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
- return;
+ // no more input buffers at this time, loop and see if there is more output
+ continue;
}
outHeader->nFilledLen = outBufferSize;
@@ -412,9 +425,12 @@
void SoftFlacDecoder::drainDecoder() {
mFLACDecoder->flush();
+ mSawInputEOS = false;
+ mFinishedDecoder = false;
}
void SoftFlacDecoder::onReset() {
+ ALOGV("onReset");
drainDecoder();
memset(&mStreamInfo, 0, sizeof(mStreamInfo));
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index a0e46c3..fdc8975 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -357,7 +357,7 @@
FLAC__bool ok = true;
- while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty()) {
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty() && !mSentOutputEOS) {
if (!inQueue.empty()) {
BufferInfo *inInfo = *inQueue.begin();
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
@@ -415,7 +415,7 @@
mEncoderReturnedEncodedData = false;
} else {
ALOGV(" encoder process_interleaved returned without data to write");
- if (mSawInputEOS && !mSentOutputEOS) {
+ if (mSawInputEOS) {
ALOGV("finishing encoder");
mSentOutputEOS = true;
FLAC__stream_encoder_finish(mFlacStreamEncoder);
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 103fc22..bb7d361 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -48,7 +48,8 @@
(IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES
static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
+ { OMX_VIDEO_HEVCProfileMainStill, OMX_VIDEO_HEVCMainTierLevel51 },
};
SoftHEVC::SoftHEVC(
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index eae73fc..1b38852 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -818,7 +818,8 @@
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
- const uint8_t *src_y = (const uint8_t *)src.mBits;
+ const uint8_t *src_y =
+ (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
const uint8_t *src_u =
(const uint8_t *)src_y + src.mWidth * (src.mHeight - src.mCropTop / 2);
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 1a5304b..64caeed 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -493,7 +493,8 @@
status_t setupHEVCEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
status_t setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
- status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
+ status_t verifySupportForProfileAndLevel(
+ OMX_U32 portIndex, int32_t profile, int32_t level);
status_t configureImageGrid(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
status_t configureBitrate(
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
index 903a2b6..a9fce55 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
@@ -20,6 +20,7 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
@@ -35,6 +36,7 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/bqhelper/Conversion.h>
#include <android/hidl/memory/1.0/IMemory.h>
@@ -141,6 +143,37 @@
*/
/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
+}
+
+/**
* \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
* calls.
*
@@ -157,18 +190,7 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Status`.
- * \return the corresponding `status_t`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -179,7 +201,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 28524b0..fb56694 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -108,6 +108,7 @@
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
AMediaCodec_releaseCrypto; # introduced=28
+ AMediaCodec_releaseName; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
AMediaCodec_setAsyncNotifyCallback; # introduced=28
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0e2da4e..3302868 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,8 +19,11 @@
#define ANDROID_AUDIO_FLINGER_H
#include "Configuration.h"
+#include <atomic>
+#include <mutex>
#include <deque>
#include <map>
+#include <vector>
#include <stdint.h>
#include <sys/types.h>
#include <limits.h>
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 979290f..dcf223c 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -591,6 +591,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
+ thread->isOutput() &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
// Older effects may require exact STEREO position mask.
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ea01a25..a78be99 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -93,6 +93,23 @@
const sp<media::VolumeShaper::Operation>& operation);
sp<media::VolumeShaper::State> getVolumeShaperState(int id);
sp<media::VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
+ /** Set the computed normalized final volume of the track.
+ * !masterMute * masterVolume * streamVolume * averageLRVolume */
+ void setFinalVolume(float volume);
+ float getFinalVolume() const { return mFinalVolume; }
+
+ /** @return true if the track has changed (metadata or volume) since
+ * the last time this function was called,
+ * true if this function was never called since the track creation,
+ * false otherwise.
+ * Thread safe.
+ */
+ bool readAndClearHasChanged() { return !mChangeNotified.test_and_set(); }
+
+ using SourceMetadatas = std::vector<playback_track_metadata_t>;
+ using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
+ /** Copy the track metadata in the provided iterator. Thread safe. */
+ virtual void copyMetadataTo(MetadataInserter& backInserter) const;
protected:
// for numerous
@@ -133,6 +150,8 @@
bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
void signalClientFlag(int32_t flag);
+ /** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
+ void setMetadataHasChanged() { mChangeNotified.clear(); }
public:
void triggerEvents(AudioSystem::sync_event_t type);
virtual void invalidate();
@@ -182,10 +201,13 @@
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
+ float mFinalVolume; // combine master volume, stream type volume and track volume
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
audio_output_flags_t mFlags;
+ // If the last track change was notified to the client with readAndClearHasChanged
+ std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
}; // end of Track
@@ -216,8 +238,11 @@
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
-private:
+ void copyMetadataTo(MetadataInserter& backInserter) const override;
+ /** Set the metadatas of the upstream tracks. Thread safe. */
+ void setMetadatas(const SourceMetadatas& metadatas);
+private:
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
uint32_t waitTimeMs);
void clearBufferQueue();
@@ -232,6 +257,20 @@
bool mActive;
DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
sp<AudioTrackClientProxy> mClientProxy;
+ /** Attributes of the source tracks.
+ *
+ * This member must be accessed with mTrackMetadatasMutex taken.
+ * There is one writer (duplicating thread) and one reader (downstream mixer).
+ *
+ * That means that the duplicating thread can block the downstream mixer
+ * thread and vice versa for the time of the copy.
+ * If this becomes an issue, the metadata could be stored in an atomic raw pointer,
+ * and a exchange with nullptr and delete can be used.
+ * Alternatively a read-copy-update might be implemented.
+ */
+ SourceMetadatas mTrackMetadatas;
+ /** Protects mTrackMetadatas against concurrent access. */
+ mutable std::mutex mTrackMetadatasMutex;
}; // end of OutputTrack
// playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b5b50f8..ab65601 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2623,23 +2623,33 @@
void AudioFlinger::PlaybackThread::updateMetadata_l()
{
- // TODO: add volume support
- if (mOutput == nullptr || mOutput->stream == nullptr ||
- !mActiveTracks.readAndClearHasChanged()) {
- return;
+ if (mOutput == nullptr || mOutput->stream == nullptr ) {
+ return; // That should not happen
+ }
+ bool hasChanged = mActiveTracks.readAndClearHasChanged();
+ for (const sp<Track> &track : mActiveTracks) {
+ // Do not short-circuit as all hasChanged states must be reset
+ // as all the metadata are going to be sent
+ hasChanged |= track->readAndClearHasChanged();
+ }
+ if (!hasChanged) {
+ return; // nothing to do
}
StreamOutHalInterface::SourceMetadata metadata;
+ auto backInserter = std::back_inserter(metadata.tracks);
for (const sp<Track> &track : mActiveTracks) {
// No track is invalid as this is called after prepareTrack_l in the same critical section
- metadata.tracks.push_back({
- .usage = track->attributes().usage,
- .content_type = track->attributes().content_type,
- .gain = 1,
- });
+ track->copyMetadataTo(backInserter);
}
- mOutput->stream->updateSourceMetadata(metadata);
+ sendMetadataToBackend_l(metadata);
}
+void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
+{
+ mOutput->stream->updateSourceMetadata(metadata);
+};
+
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
{
if (halFrames == NULL || dspFrames == NULL) {
@@ -4377,13 +4387,19 @@
didModify = true;
// no acknowledgement required for newly active tracks
}
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
- track->mAudioTrackServerProxy->framesReleased()).first;
- track->mCachedVolume = masterVolume
+ proxy->framesReleased()).first;
+ float volume = masterVolume
* mStreamTypes[track->streamType()].volume
* vh;
+ track->mCachedVolume = volume;
+ gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+ float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
+ float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+ track->setFinalVolume((vlf + vrf) / 2.f);
++fastTracks;
} else {
// was it previously active?
@@ -4560,6 +4576,8 @@
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
+ track->setFinalVolume((vrf + vlf) / 2.f);
+
// Delegate volume control to effect in track effect chain if needed
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
// Do not ramp volume if volume is controlled by effect
@@ -4789,6 +4807,18 @@
track->reset();
}
+ // Track destruction may occur outside of threadLoop once it is removed from active tracks.
+ // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
+ // it ceases to be active, to allow safe removal from the AudioMixer at the start
+ // of prepareTracks_l(); this releases any outstanding buffer back to the track.
+ // See also the implementation of destroyTrack_l().
+ for (const auto &track : *tracksToRemove) {
+ const int name = track->name();
+ if (mAudioMixer->exists(name)) { // Normal tracks here, fast tracks in FastMixer.
+ mAudioMixer->setBufferProvider(name, nullptr /* bufferProvider */);
+ }
+ }
+
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
@@ -5092,6 +5122,7 @@
}
if (lastTrack) {
+ track->setFinalVolume((left + right) / 2.f);
if (left != mLeftVolFloat || right != mRightVolFloat) {
mLeftVolFloat = left;
mRightVolFloat = right;
@@ -6149,15 +6180,12 @@
return true;
}
-void AudioFlinger::DuplicatingThread::updateMetadata_l()
+void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
{
- // TODO: The duplicated track metadata are stored in other threads
- // (accessible through mActiveTracks::OutputTrack::thread()::mActiveTracks::Track::attributes())
- // but this information can be mutated at any time by the owning threads.
- // Taking the lock of any other owning threads is no possible due to timing constrains.
- // Similarly, the other threads can not push the metadatas in this thread as cross deadlock
- // would be possible.
- // A lock-free structure needs to be used to shared the metadata (maybe an atomic shared_ptr ?).
+ for (auto& outputTrack : outputTracks) { // not mOutputTracks
+ outputTrack->setMetadatas(metadata.tracks);
+ }
}
uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index bb81224..5a5961a 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -566,8 +566,8 @@
// periodically called in the threadLoop() to update power state uids.
void updatePowerState(sp<ThreadBase> thread, bool force = false);
- /** @return true if the active tracks have changed since the last time
- * this function was called or the vector was created. */
+ /** @return true if one or move active tracks was added or removed since the
+ * last time this function was called or the vector was created. */
bool readAndClearHasChanged();
private:
@@ -588,7 +588,7 @@
int mLastActiveTracksGeneration;
wp<T> mLatestActiveTrack; // latest track added to ActiveTracks
SimpleLog * const mLocalLog;
- // If the active tracks have changed since last call to readAndClearHasChanged
+ // If the vector has changed since last call to readAndClearHasChanged
bool mHasChanged = false;
};
@@ -927,7 +927,8 @@
void removeTrack_l(const sp<Track>& track);
void readOutputParameters_l();
- void updateMetadata_l() override;
+ void updateMetadata_l() final;
+ virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -1287,7 +1288,8 @@
void removeOutputTrack(MixerThread* thread);
uint32_t waitTimeMs() const { return mWaitTimeMs; }
- void updateMetadata_l() override;
+ void sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata) override;
protected:
virtual uint32_t activeSleepTimeUs() const;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 44ce3aa..3fe41d8 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -407,6 +407,9 @@
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
+ /* The track might not play immediately after being active, similarly as if its volume was 0.
+ * When the track starts playing, its volume will be computed. */
+ mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -764,6 +767,12 @@
mState = state;
}
}
+
+ if (status == NO_ERROR || status == ALREADY_EXISTS) {
+ // for streaming tracks, remove the buffer read stop limit.
+ mAudioTrackServerProxy->start();
+ }
+
// track was already in the active list, not a problem
if (status == ALREADY_EXISTS) {
status = NO_ERROR;
@@ -991,6 +1000,23 @@
return mVolumeHandler->getVolumeShaperState(id);
}
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+{
+ if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
+ mFinalVolume = volume;
+ setMetadataHasChanged();
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ *backInserter++ = {
+ .usage = mAttr.usage,
+ .content_type = mAttr.content_type,
+ .gain = mFinalVolume,
+ };
+}
+
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
if (!isOffloaded() && !isDirect()) {
@@ -1421,6 +1447,21 @@
return outputBufferFull;
}
+void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
+ {
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ mTrackMetadatas = metadatas;
+ }
+ // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
+ setMetadataHasChanged();
+}
+
status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
@@ -1741,14 +1782,14 @@
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt\n");
+ result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt Sil\n");
}
void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
{
result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
"%08X %08X %6u "
- "%08X %6zu\n",
+ "%08X %6zu %3c\n",
isFastTrack() ? 'F' : ' ',
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
@@ -1761,7 +1802,8 @@
mSampleRate,
mCblk->mServer,
- mFrameCount
+ mFrameCount,
+ isSilenced() ? 's' : 'n'
);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 094ff65..d85562e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -391,6 +391,7 @@
mSamplingRate = 0;
mChannelMask = AUDIO_CHANNEL_NONE;
mFormat = AUDIO_FORMAT_INVALID;
+ memset(&mGain, 0, sizeof(struct audio_gain_config));
mGain.index = -1;
}
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index be7f7ec..08bcf4d 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -266,7 +266,7 @@
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -409,7 +409,7 @@
// If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
// handleIncallSonification().
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 8f6db46..29ec961 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2141,6 +2141,7 @@
}
inputDesc->close();
}
+ mInputRoutes.clear();
mInputs.clear();
SoundTrigger::setCaptureState(false);
nextAudioPortGeneration();
@@ -5116,7 +5117,8 @@
}
// in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) && isInCall()) {
+ if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
@@ -5126,8 +5128,11 @@
case AUDIO_STREAM_ENFORCED_AUDIBLE:
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
- const float maxVoiceVolDb = computeVolume(AUDIO_STREAM_VOICE_CALL, index, device)
- + IN_CALL_EARPIECE_HEADROOM_DB;
+ int voiceVolumeIndex =
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ const float maxVoiceVolDb =
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ + IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
stream, volumeDB, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);