Merge "AudioFinger: Release buffers in AudioMixer when track becomes inactive" into pi-dev
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index e5ad2d9..c1ff34b 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -26,23 +26,22 @@
 #include "AAudioExampleUtils.h"
 #include "AAudioSimpleRecorder.h"
 
-// TODO support FLOAT
-#define REQUIRED_FORMAT    AAUDIO_FORMAT_PCM_I16
 #define MIN_FRAMES_TO_READ 48  /* arbitrary, 1 msec at 48000 Hz */
 
 static const int FRAMES_PER_LINE = 20000;
 
 int main(int argc, const char **argv)
 {
-    AAudioArgsParser   argParser;
-    aaudio_result_t result;
-    AAudioSimpleRecorder recorder;
-    int actualSamplesPerFrame;
-    int actualSampleRate;
-    aaudio_format_t       actualDataFormat;
+    AAudioArgsParser      argParser;
+    AAudioSimpleRecorder  recorder;
+    AAudioStream         *aaudioStream = nullptr;
 
-    AAudioStream *aaudioStream = nullptr;
+    aaudio_result_t       result;
+    aaudio_format_t       actualDataFormat;
     aaudio_stream_state_t state;
+
+    int32_t actualSamplesPerFrame;
+    int32_t actualSampleRate;
     int32_t framesPerBurst = 0;
     int32_t framesPerRead = 0;
     int32_t framesToRecord = 0;
@@ -50,18 +49,18 @@
     int32_t nextFrameCount = 0;
     int32_t frameCount = 0;
     int32_t xRunCount = 0;
-    int64_t previousFramePosition = -1;
-    int16_t *data = nullptr;
-    float peakLevel = 0.0;
     int32_t deviceId;
 
+    int16_t *shortData = nullptr;
+    float   *floatData = nullptr;
+    float    peakLevel = 0.0;
+
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("%s - Monitor input level using AAudio read, V0.1.2\n", argv[0]);
+    printf("%s - Monitor input level using AAudio read, V0.1.3\n", argv[0]);
 
-    argParser.setFormat(REQUIRED_FORMAT);
     if (argParser.parseArgs(argc, argv)) {
         return EXIT_FAILURE;
     }
@@ -69,6 +68,7 @@
     result = recorder.open(argParser);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  recorder.open() returned %d\n", result);
+        printf("IMPORTANT - Did you remember to enter:   adb root\n");
         goto finish;
     }
     aaudioStream = recorder.getStream();
@@ -96,17 +96,18 @@
     printf("DataFormat: framesPerRead  = %d\n",framesPerRead);
 
     actualDataFormat = AAudioStream_getFormat(aaudioStream);
-    printf("DataFormat: requested      = %d, actual = %d\n",
-           REQUIRED_FORMAT, actualDataFormat);
-    // TODO handle other data formats
-    assert(actualDataFormat == REQUIRED_FORMAT);
 
     // Allocate a buffer for the PCM_16 audio data.
-    data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
-    if (data == nullptr) {
-        fprintf(stderr, "ERROR - could not allocate data buffer\n");
-        result = AAUDIO_ERROR_NO_MEMORY;
-        goto finish;
+    switch (actualDataFormat) {
+        case AAUDIO_FORMAT_PCM_I16:
+            shortData = new int16_t[framesPerRead * actualSamplesPerFrame];
+            break;
+        case AAUDIO_FORMAT_PCM_FLOAT:
+            floatData = new float[framesPerRead * actualSamplesPerFrame];
+            break;
+        default:
+            fprintf(stderr, "UNEXPECTED FORMAT! %d", actualDataFormat);
+            goto finish;
     }
 
     // Start the stream.
@@ -126,7 +127,12 @@
         // Read audio data from the stream.
         const int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
         int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
-        int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+        int actual = 0;
+        if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+            actual = AAudioStream_read(aaudioStream, shortData, minFrames, timeoutNanos);
+        } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+            actual = AAudioStream_read(aaudioStream, floatData, minFrames, timeoutNanos);
+        }
         if (actual < 0) {
             fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
             result = actual;
@@ -140,7 +146,12 @@
 
         // Peak finder.
         for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
-            float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+            float sample = 0.0f;
+            if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+                sample = shortData[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+            } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+                sample = floatData[frameIndex * actualSamplesPerFrame];
+            }
             if (sample > peakLevel) {
                 peakLevel = sample;
             }
@@ -151,17 +162,15 @@
             displayPeakLevel(peakLevel);
             peakLevel = 0.0;
             nextFrameCount += FRAMES_PER_LINE;
-        }
 
-        // Print timestamps.
-        int64_t framePosition = 0;
-        int64_t frameTime = 0;
-        aaudio_result_t timeResult;
-        timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
-                                               &framePosition, &frameTime);
+            // Print timestamps.
+            int64_t framePosition = 0;
+            int64_t frameTime = 0;
+            aaudio_result_t timeResult;
+            timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+                                                   &framePosition, &frameTime);
 
-        if (timeResult == AAUDIO_OK) {
-            if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
+            if (timeResult == AAUDIO_OK) {
                 int64_t realTime = getNanoseconds();
                 int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);
 
@@ -175,11 +184,15 @@
                        (long long) framePosition,
                        (long long) frameTime,
                        latencyMillis);
-                previousFramePosition = framePosition;
+            } else {
+                printf("WARNING - AAudioStream_getTimestamp() returned %d\n", timeResult);
             }
         }
     }
 
+    state = AAudioStream_getState(aaudioStream);
+    printf("after loop, state = %s\n", AAudio_convertStreamStateToText(state));
+
     xRunCount = AAudioStream_getXRunCount(aaudioStream);
     printf("AAudioStream_getXRunCount %d\n", xRunCount);
 
@@ -192,7 +205,8 @@
 
 finish:
     recorder.close();
-    delete[] data;
+    delete[] shortData;
+    delete[] floatData;
     printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
     return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
 }
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 893795b..986158f 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -47,6 +47,7 @@
                        SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  recorder.open() returned %d\n", result);
+        printf("IMPORTANT - Did you remember to enter:   adb root\n");
         goto error;
     }
     printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 39d079e..026ff0f 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -151,8 +151,7 @@
 static void MyErrorCallbackProc(
         AAudioStream *stream __unused,
         void *userData __unused,
-        aaudio_result_t error)
-{
+        aaudio_result_t error) {
     printf("Error Callback, error: %d\n",(int)error);
     LoopbackData *myData = (LoopbackData *) userData;
     myData->outputError = error;
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index eb6925a..88d7401 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -87,7 +87,6 @@
     return;
 }
 
-// TODO use this as a base class within AAudio
 class AAudioParameters {
 public:
 
@@ -262,6 +261,9 @@
                 case 'd':
                     setDeviceId(atoi(&arg[2]));
                     break;
+                case 'f':
+                    setFormat(atoi(&arg[2]));
+                    break;
                 case 'i':
                     setInputPreset(atoi(&arg[2]));
                     break;
@@ -326,6 +328,10 @@
         printf("      -b{bufferCapacity} frames\n");
         printf("      -c{channels} for example 2 for stereo\n");
         printf("      -d{deviceId} default is %d\n", AAUDIO_UNSPECIFIED);
+        printf("      -f{0|1|2} set format\n");
+        printf("          0 = UNSPECIFIED\n");
+        printf("          1 = PCM_I16\n");
+        printf("          2 = FLOAT\n");
         printf("      -i{inputPreset} eg. 5 for AAUDIO_INPUT_PRESET_CAMCORDER\n");
         printf("      -m{0|1|2|3} set MMAP policy\n");
         printf("          0 = _UNSPECIFIED, use aaudio.mmap_policy system property, default\n");
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 38e1e4c..8e33a31 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -57,7 +57,7 @@
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("%s - Play a sine wave using AAudio V0.1.2\n", argv[0]);
+    printf("%s - Play a sine wave using AAudio V0.1.3\n", argv[0]);
 
     if (argParser.parseArgs(argc, argv)) {
         return EXIT_FAILURE;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index e167773..e33e9f8 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -204,7 +204,7 @@
     AAudioArgsParser::usage();
     printf("      -l{count} loopCount start/stop, every other one is silent\n");
     printf("      -t{msec}  play a high pitched tone at the beginning\n");
-    printf("      -f        force periodic underruns by sleeping in callback\n");
+    printf("      -z        force periodic underruns by sleeping in callback\n");
 }
 
 int main(int argc, const char **argv)
@@ -219,7 +219,7 @@
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("%s - Play a sine sweep using an AAudio callback V0.1.3\n", argv[0]);
+    printf("%s - Play a sine sweep using an AAudio callback V0.1.4\n", argv[0]);
 
     for (int i = 1; i < argc; i++) {
         const char *arg = argv[i];
@@ -234,8 +234,8 @@
                     case 't':
                         prefixToneMsec = atoi(&arg[2]);
                         break;
-                    case 'f':
-                        forceUnderruns = true;
+                    case 'z':
+                        forceUnderruns = true;  // Zzzzzzz
                         break;
                     default:
                         usage();
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index e40a6cd..2207cb8c 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -146,6 +146,8 @@
  * to make more refined volume or routing decisions.
  *
  * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
  */
 enum {
     /**
@@ -220,6 +222,8 @@
  * enforce audio focus.
  *
  * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
  */
 enum {
 
@@ -252,6 +256,8 @@
  * configuration.
  *
  * Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
+ *
+ * Added in API level 28.
  */
 enum {
     /**
@@ -288,6 +294,8 @@
      * Do not allocate a session ID.
      * Effects cannot be used with this stream.
      * Default.
+     *
+     * Added in API level 28.
      */
     AAUDIO_SESSION_ID_NONE = -1,
 
@@ -297,6 +305,8 @@
      * Note that the use of this flag may result in higher latency.
      *
      * Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
+     *
+     * Added in API level 28.
      */
     AAUDIO_SESSION_ID_ALLOCATE = 0,
 };
@@ -481,6 +491,8 @@
  *
  * The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
  *
+ * Added in API level 28.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param usage the desired usage, eg. AAUDIO_USAGE_GAME
  */
@@ -496,6 +508,8 @@
  *
  * The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
  *
+ * Added in API level 28.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
  */
@@ -514,6 +528,8 @@
  * That is because VOICE_RECOGNITION is the preset with the lowest latency
  * on many platforms.
  *
+ * Added in API level 28.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param inputPreset the desired configuration for recording
  */
@@ -540,6 +556,8 @@
  *
  * Allocated session IDs will always be positive and nonzero.
  *
+ * Added in API level 28.
+ *
  * @param builder reference provided by AAudio_createStreamBuilder()
  * @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
  */
@@ -1059,6 +1077,8 @@
  *
  * The sessionID for a stream should not change once the stream has been opened.
  *
+ * Added in API level 28.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return session ID or AAUDIO_SESSION_ID_NONE
  */
@@ -1094,6 +1114,8 @@
 /**
  * Return the use case for the stream.
  *
+ * Added in API level 28.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return frames read
  */
@@ -1102,6 +1124,8 @@
 /**
  * Return the content type for the stream.
  *
+ * Added in API level 28.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
  */
@@ -1110,6 +1134,8 @@
 /**
  * Return the input preset for the stream.
  *
+ * Added in API level 28.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
  */
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 14ffb1d..0a1bdfe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1289,7 +1289,8 @@
                 ALOGV("Tear down audio with reason %d.", reason);
                 if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
                     // TimeoutWhenPaused is only for offload mode.
-                    ALOGW("Receive a stale message for teardown.");
+                    ALOGW("Received a stale message for teardown, mPaused(%d), mOffloadAudio(%d)",
+                          mPaused, mOffloadAudio);
                     break;
                 }
                 int64_t positionUs;
@@ -1789,6 +1790,8 @@
 
 void NuPlayer::restartAudio(
         int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+    ALOGD("restartAudio timeUs(%lld), dontOffload(%d), createDecoder(%d)",
+          (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
     if (mAudioDecoder != NULL) {
         mAudioDecoder->pause();
         mAudioDecoder.clear();
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 0e2da4e..3302868 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,8 +19,11 @@
 #define ANDROID_AUDIO_FLINGER_H
 
 #include "Configuration.h"
+#include <atomic>
+#include <mutex>
 #include <deque>
 #include <map>
+#include <vector>
 #include <stdint.h>
 #include <sys/types.h>
 #include <limits.h>
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ea01a25..a78be99 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -93,6 +93,23 @@
                                 const sp<media::VolumeShaper::Operation>& operation);
     sp<media::VolumeShaper::State> getVolumeShaperState(int id);
     sp<media::VolumeHandler>   getVolumeHandler() { return mVolumeHandler; }
+    /** Set the computed normalized final volume of the track.
+     * !masterMute * masterVolume * streamVolume * averageLRVolume */
+    void                setFinalVolume(float volume);
+    float               getFinalVolume() const { return mFinalVolume; }
+
+    /** @return true if the track has changed (metadata or volume) since
+     *          the last time this function was called,
+     *          true if this function was never called since the track creation,
+     *          false otherwise.
+     *  Thread safe.
+     */
+    bool            readAndClearHasChanged() { return !mChangeNotified.test_and_set(); }
+
+    using SourceMetadatas = std::vector<playback_track_metadata_t>;
+    using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
+    /** Copy the track metadata in the provided iterator. Thread safe. */
+    virtual void    copyMetadataTo(MetadataInserter& backInserter) const;
 
 protected:
     // for numerous
@@ -133,6 +150,8 @@
     bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
     void signalClientFlag(int32_t flag);
 
+    /** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
+    void setMetadataHasChanged() { mChangeNotified.clear(); }
 public:
     void triggerEvents(AudioSystem::sync_event_t type);
     virtual void invalidate();
@@ -182,10 +201,13 @@
     volatile float      mCachedVolume;  // combined master volume and stream type volume;
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
+    float               mFinalVolume; // combine master volume, stream type volume and track volume
     sp<AudioTrackServerProxy>  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
     audio_output_flags_t mFlags;
+    // If the last track change was notified to the client with readAndClearHasChanged
+    std::atomic_flag     mChangeNotified = ATOMIC_FLAG_INIT;
 };  // end of Track
 
 
@@ -216,8 +238,11 @@
             bool        isActive() const { return mActive; }
     const wp<ThreadBase>& thread() const { return mThread; }
 
-private:
+            void        copyMetadataTo(MetadataInserter& backInserter) const override;
+    /** Set the metadatas of the upstream tracks. Thread safe. */
+            void        setMetadatas(const SourceMetadatas& metadatas);
 
+private:
     status_t            obtainBuffer(AudioBufferProvider::Buffer* buffer,
                                      uint32_t waitTimeMs);
     void                clearBufferQueue();
@@ -232,6 +257,20 @@
     bool                        mActive;
     DuplicatingThread* const    mSourceThread; // for waitTimeMs() in write()
     sp<AudioTrackClientProxy>   mClientProxy;
+    /** Attributes of the source tracks.
+     *
+     * This member must be accessed with mTrackMetadatasMutex taken.
+     * There is one writer (duplicating thread) and one reader (downstream mixer).
+     *
+     * That means that the duplicating thread can block the downstream mixer
+     * thread and vice versa for the time of the copy.
+     * If this becomes an issue, the metadata could be stored in an atomic raw pointer,
+     * and a exchange with nullptr and delete can be used.
+     * Alternatively a read-copy-update might be implemented.
+     */
+    SourceMetadatas mTrackMetadatas;
+    /** Protects mTrackMetadatas against concurrent access. */
+    mutable std::mutex mTrackMetadatasMutex;
 };  // end of OutputTrack
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index adeef31..9efa6da 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2623,27 +2623,33 @@
 
 void AudioFlinger::PlaybackThread::updateMetadata_l()
 {
-    // TODO: add volume support
-    if (mOutput == nullptr || mOutput->stream == nullptr ||
-            !mActiveTracks.readAndClearHasChanged()) {
-        return;
+    if (mOutput == nullptr || mOutput->stream == nullptr ) {
+        return; // That should not happen
+    }
+    bool hasChanged = mActiveTracks.readAndClearHasChanged();
+    for (const sp<Track> &track : mActiveTracks) {
+        // Do not short-circuit as all hasChanged states must be reset
+        // as all the metadata are going to be sent
+        hasChanged |= track->readAndClearHasChanged();
+    }
+    if (!hasChanged) {
+        return; // nothing to do
     }
     StreamOutHalInterface::SourceMetadata metadata;
+    auto backInserter = std::back_inserter(metadata.tracks);
     for (const sp<Track> &track : mActiveTracks) {
         // No track is invalid as this is called after prepareTrack_l in the same critical section
-        if (track->isOutputTrack()) {
-            // TODO: OutputTrack (used for duplication) are currently not supported
-            continue;
-        }
-        metadata.tracks.push_back({
-                .usage = track->attributes().usage,
-                .content_type = track->attributes().content_type,
-                .gain = 1,
-        });
+        track->copyMetadataTo(backInserter);
     }
-    mOutput->stream->updateSourceMetadata(metadata);
+    sendMetadataToBackend_l(metadata);
 }
 
+void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
+        const StreamOutHalInterface::SourceMetadata& metadata)
+{
+    mOutput->stream->updateSourceMetadata(metadata);
+};
+
 status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
 {
     if (halFrames == NULL || dspFrames == NULL) {
@@ -4381,13 +4387,19 @@
                     didModify = true;
                     // no acknowledgement required for newly active tracks
                 }
+                sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
                 // cache the combined master volume and stream type volume for fast mixer; this
                 // lacks any synchronization or barrier so VolumeProvider may read a stale value
                 const float vh = track->getVolumeHandler()->getVolume(
-                        track->mAudioTrackServerProxy->framesReleased()).first;
-                track->mCachedVolume = masterVolume
+                        proxy->framesReleased()).first;
+                float volume = masterVolume
                         * mStreamTypes[track->streamType()].volume
                         * vh;
+                track->mCachedVolume = masterVolume;
+                gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+                float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
+                float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+                track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
             } else {
                 // was it previously active?
@@ -4564,6 +4576,8 @@
                 vaf = v * sendLevel * (1. / MAX_GAIN_INT);
             }
 
+            track->setFinalVolume((vrf + vlf) / 2.f);
+
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
@@ -5108,6 +5122,7 @@
     }
 
     if (lastTrack) {
+        track->setFinalVolume((left + right) / 2.f);
         if (left != mLeftVolFloat || right != mRightVolFloat) {
             mLeftVolFloat = left;
             mRightVolFloat = right;
@@ -6165,14 +6180,12 @@
     return true;
 }
 
-void AudioFlinger::DuplicatingThread::updateMetadata_l()
+void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
+        const StreamOutHalInterface::SourceMetadata& metadata)
 {
-    // TODO: The duplicated track metadata needs to be pushed to downstream
-    // but this information can be read at any time by the downstream threads.
-    // Taking the lock of any downstream threads is no possible due to cross deadlock risks
-    // (eg: during effect move).
-    // A lock-free structure needs to be used to shared the metadata, probably an atomic
-    // pointer to a metadata vector in each output tracks.
+    for (auto& outputTrack : outputTracks) { // not mOutputTracks
+        outputTrack->setMetadatas(metadata.tracks);
+    }
 }
 
 uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index bb81224..5a5961a 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -566,8 +566,8 @@
                     // periodically called in the threadLoop() to update power state uids.
                     void            updatePowerState(sp<ThreadBase> thread, bool force = false);
 
-                    /** @return true if the active tracks have changed since the last time
-                     *          this function was called or the vector was created. */
+                    /** @return true if one or move active tracks was added or removed since the
+                     *          last time this function was called or the vector was created. */
                     bool            readAndClearHasChanged();
 
                 private:
@@ -588,7 +588,7 @@
                     int                 mLastActiveTracksGeneration;
                     wp<T>               mLatestActiveTrack; // latest track added to ActiveTracks
                     SimpleLog * const   mLocalLog;
-                    // If the active tracks have changed since last call to readAndClearHasChanged
+                    // If the vector has changed since last call to readAndClearHasChanged
                     bool                mHasChanged = false;
                 };
 
@@ -927,7 +927,8 @@
     void        removeTrack_l(const sp<Track>& track);
 
     void        readOutputParameters_l();
-    void        updateMetadata_l() override;
+    void        updateMetadata_l() final;
+    virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
 
     virtual void dumpInternals(int fd, const Vector<String16>& args);
     void        dumpTracks(int fd, const Vector<String16>& args);
@@ -1287,7 +1288,8 @@
                 void        removeOutputTrack(MixerThread* thread);
                 uint32_t    waitTimeMs() const { return mWaitTimeMs; }
 
-                void        updateMetadata_l() override;
+                void        sendMetadataToBackend_l(
+                        const StreamOutHalInterface::SourceMetadata& metadata) override;
 protected:
     virtual     uint32_t    activeSleepTimeUs() const;
 
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 236412b..ee9ce84 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -407,6 +407,9 @@
     // mSinkTimestamp
     mFastIndex(-1),
     mCachedVolume(1.0),
+    /* The track might not play immediately after being active, similarly as if its volume was 0.
+     * When the track starts playing, its volume will be computed. */
+    mFinalVolume(0.f),
     mResumeToStopping(false),
     mFlushHwPending(false),
     mFlags(flags)
@@ -997,6 +1000,23 @@
     return mVolumeHandler->getVolumeShaperState(id);
 }
 
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+{
+    if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
+        mFinalVolume = volume;
+        setMetadataHasChanged();
+    }
+}
+
+void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
+{
+    *backInserter++ = {
+            .usage = mAttr.usage,
+            .content_type = mAttr.content_type,
+            .gain = mFinalVolume,
+    };
+}
+
 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
 {
     if (!isOffloaded() && !isDirect()) {
@@ -1427,6 +1447,21 @@
     return outputBufferFull;
 }
 
+void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+    std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+    backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
+    {
+        std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+        mTrackMetadatas = metadatas;
+    }
+    // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
+    setMetadataHasChanged();
+}
+
 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
 {