Merge "Force DIRECT flag on direct output thread." into rvc-dev
diff --git a/include/drm/TEST_MAPPING b/include/drm/TEST_MAPPING
new file mode 100644
index 0000000..28e432e
--- /dev/null
+++ b/include/drm/TEST_MAPPING
@@ -0,0 +1,26 @@
+{
+  "presubmit": [
+    {
+      "name": "GtsMediaTestCases",
+      "options" : [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
+        }
+      ]
+    },
+    {
+      "name": "GtsExoPlayerTestCases",
+      "options" : [
+        {
+          "include-annotation": "android.platform.test.annotations.SocPresubmit"
+        },
+        {
+          "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+        }
+      ]
+    }
+  ]
+}
diff --git a/include/media/TEST_MAPPING b/include/media/TEST_MAPPING
new file mode 100644
index 0000000..ac697da
--- /dev/null
+++ b/include/media/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "imports": [
+    {
+      "path": "frameworks/av/include/drm"
+    }
+  ]
+}
diff --git a/include/private/media/TEST_MAPPING b/include/private/media/TEST_MAPPING
new file mode 100644
index 0000000..ac697da
--- /dev/null
+++ b/include/private/media/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "imports": [
+    {
+      "path": "frameworks/av/include/drm"
+    }
+  ]
+}
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 6cb0c73..f05c2d2 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -20,6 +20,14 @@
     # Keep the original service names for backward compatibility
     stop vendor.audio-hal-2-0
     stop audio-hal-2-0
+    # See b/155364397. Need to have HAL service running for VTS.
+    # Can't use 'restart' because then HAL service would restart
+    # audioserver bringing it back into running state.
+    start vendor.audio-hal
+    start vendor.audio-hal-4-0-msd
+    # Keep the original service names for backward compatibility
+    start vendor.audio-hal-2-0
+    start audio-hal-2-0
 
 on property:init.svc.audioserver=running
     start vendor.audio-hal
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index ecc56f5..823e11b 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -41,7 +41,7 @@
         : C2Buffer({block->share(C2Rect(block->width(), block->height()), ::C2Fence())}) {}
 };
 
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
+static std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>>
         kEncodeTestParameters;
 static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
         kEncodeResolutionTestParameters;
@@ -100,6 +100,7 @@
         }
         mEos = false;
         mCsd = false;
+        mConfigBPictures = false;
         mFramesReceived = 0;
         mFailedWorkReceived = 0;
         mTimestampUs = 0u;
@@ -120,7 +121,7 @@
     // Get the test parameters from GetParam call.
     virtual void getParams() {}
 
-    bool setupConfigParam(int32_t nWidth, int32_t nHeight);
+    bool setupConfigParam(int32_t nWidth, int32_t nHeight, int32_t nBFrame = 0);
 
     // callback function to process onWorkDone received by Listener
     void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
@@ -130,8 +131,10 @@
                 // previous timestamp
                 typedef std::unique_lock<std::mutex> ULock;
                 if (!mTimestampUslist.empty()) {
-                    EXPECT_GE((work->worklets.front()->output.ordinal.timestamp.peeku()),
-                              mTimestampUs);
+                    if (!mConfigBPictures) {
+                        EXPECT_GE((work->worklets.front()->output.ordinal.timestamp.peeku()),
+                                  mTimestampUs);
+                    }
                     mTimestampUs = work->worklets.front()->output.ordinal.timestamp.peeku();
                     // Currently this lock is redundant as no mTimestampUslist is only initialized
                     // before queuing any work to component. Once AdaptiveTest is added similar to
@@ -192,7 +195,7 @@
     bool mEos;
     bool mCsd;
     bool mDisableTest;
-    bool mConfig;
+    bool mConfigBPictures;
     bool mTimestampDevTest;
     standardComp mCompName;
     uint32_t mFramesReceived;
@@ -269,13 +272,27 @@
 }
 
 // Set Default config param.
-bool Codec2VideoEncHidlTestBase::setupConfigParam(int32_t nWidth, int32_t nHeight) {
+bool Codec2VideoEncHidlTestBase::setupConfigParam(int32_t nWidth, int32_t nHeight,
+                                                  int32_t nBFrame) {
+    c2_status_t status = C2_OK;
+    std::vector<std::unique_ptr<C2Param>> configParam;
     std::vector<std::unique_ptr<C2SettingResult>> failures;
-    C2StreamPictureSizeInfo::input inputSize(0u, nWidth, nHeight);
-    std::vector<C2Param*> configParam{&inputSize};
-    c2_status_t status = mComponent->config(configParam, C2_DONT_BLOCK, &failures);
-    if (status == C2_OK && failures.size() == 0u) return true;
-    return false;
+
+    configParam.push_back(std::make_unique<C2StreamPictureSizeInfo::input>(0u, nWidth, nHeight));
+
+    if (nBFrame > 0) {
+        std::unique_ptr<C2StreamGopTuning::output> gop =
+                C2StreamGopTuning::output::AllocUnique(2 /* flexCount */, 0u /* stream */);
+        gop->m.values[0] = {P_FRAME, UINT32_MAX};
+        gop->m.values[1] = {C2Config::picture_type_t(P_FRAME | B_FRAME), uint32_t(nBFrame)};
+        configParam.push_back(std::move(gop));
+    }
+
+    for (const std::unique_ptr<C2Param>& param : configParam) {
+        status = mComponent->config({param.get()}, C2_DONT_BLOCK, &failures);
+        if (status != C2_OK || failures.size() != 0u) return false;
+    }
+    return true;
 }
 
 // LookUpTable of clips for component testing
@@ -388,7 +405,7 @@
 class Codec2VideoEncEncodeTest
     : public Codec2VideoEncHidlTestBase,
       public ::testing::WithParamInterface<
-              std::tuple<std::string, std::string, std::string, std::string>> {
+              std::tuple<std::string, std::string, std::string, std::string, std::string>> {
     void getParams() {
         mInstanceName = std::get<0>(GetParam());
         mComponentName = std::get<1>(GetParam());
@@ -405,6 +422,7 @@
     bool signalEOS = !std::get<2>(GetParam()).compare("true");
     // Send an empty frame to receive CSD data from encoder.
     bool sendEmptyFirstFrame = !std::get<3>(GetParam()).compare("true");
+    mConfigBPictures = !std::get<4>(GetParam()).compare("true");
 
     strcpy(mURL, sResourceDir.c_str());
     GetURLForComponent(mURL);
@@ -428,10 +446,30 @@
         inputFrames--;
     }
 
-    if (!setupConfigParam(nWidth, nHeight)) {
+    if (!setupConfigParam(nWidth, nHeight, mConfigBPictures ? 1 : 0)) {
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
+    std::vector<std::unique_ptr<C2Param>> inParams;
+    c2_status_t c2_status = mComponent->query({}, {C2StreamGopTuning::output::PARAM_TYPE},
+                                              C2_DONT_BLOCK, &inParams);
+
+    if (c2_status != C2_OK || inParams.size() == 0) {
+        std::cout << "[   WARN   ] Bframe not supported for " << mComponentName
+                  << " resetting num BFrames to 0\n";
+        mConfigBPictures = false;
+    } else {
+        size_t offset = sizeof(C2Param);
+        C2Param* param = inParams[0].get();
+        int32_t numBFrames = *(int32_t*)((uint8_t*)param + offset);
+
+        if (!numBFrames) {
+            std::cout << "[   WARN   ] Bframe not supported for " << mComponentName
+                      << " resetting num BFrames to 0\n";
+            mConfigBPictures = false;
+        }
+    }
+
     ASSERT_EQ(mComponent->start(), C2_OK);
 
     if (sendEmptyFirstFrame) {
@@ -816,14 +854,14 @@
 int main(int argc, char** argv) {
     kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER);
     for (auto params : kTestParameters) {
-        kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "true"));
-        kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "false"));
-        kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "true"));
-        kEncodeTestParameters.push_back(
-                std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "false"));
+        constexpr char const* kBoolString[] = { "false", "true" };
+        for (size_t i = 0; i < 1 << 3; ++i) {
+            kEncodeTestParameters.push_back(std::make_tuple(
+                    std::get<0>(params), std::get<1>(params),
+                    kBoolString[i & 1],
+                    kBoolString[(i >> 1) & 1],
+                    kBoolString[(i >> 2) & 1]));
+        }
 
         kEncodeResolutionTestParameters.push_back(
                 std::make_tuple(std::get<0>(params), std::get<1>(params), "52", "18"));
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3773528..553c013 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -868,14 +868,19 @@
                 .minLuminance = hdrStaticInfo->mastering.minLuminance,
             };
 
-            struct android_cta861_3_metadata cta861_meta = {
-                .maxContentLightLevel = hdrStaticInfo->maxCll,
-                .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
-            };
-
-            hdr.validTypes = HdrMetadata::SMPTE2086 | HdrMetadata::CTA861_3;
+            hdr.validTypes = HdrMetadata::SMPTE2086;
             hdr.smpte2086 = smpte2086_meta;
-            hdr.cta8613 = cta861_meta;
+
+            // If the content light level fields are 0, do not use them, it
+            // indicates the value may not be present in the stream.
+            if (hdrStaticInfo->maxCll > 0.0f && hdrStaticInfo->maxFall > 0.0f) {
+                struct android_cta861_3_metadata cta861_meta = {
+                    .maxContentLightLevel = hdrStaticInfo->maxCll,
+                    .maxFrameAverageLightLevel = hdrStaticInfo->maxFall,
+                };
+                hdr.validTypes |= HdrMetadata::CTA861_3;
+                hdr.cta8613 = cta861_meta;
+            }
         }
         if (hdr10PlusInfo) {
             hdr.validTypes |= HdrMetadata::HDR10PLUS;
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
index bb42580..7c4e62f 100644
--- a/media/extractors/Android.bp
+++ b/media/extractors/Android.bp
@@ -24,6 +24,9 @@
         "libmediandk#29",
     ],
 
+    // extractors are supposed to work on Q(29)
+    min_sdk_version: "29",
+
     relative_install_path: "extractors",
 
     compile_multilib: "first",
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index a051648..b88e4e8 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -814,11 +814,13 @@
             int32_t sampleRate;
             if (!AMediaFormat_getInt32(trackInfo->mMeta, AMEDIAFORMAT_KEY_SAMPLE_RATE,
                                        &sampleRate)) {
+                mbuf->release();
                 return AMEDIA_ERROR_MALFORMED;
             }
             int64_t durationUs;
             if (!AMediaFormat_getInt64(trackInfo->mMeta, AMEDIAFORMAT_KEY_DURATION,
                                        &durationUs)) {
+                mbuf->release();
                 return AMEDIA_ERROR_MALFORMED;
             }
             // TODO: Explore if this can be handled similar to MPEG4 extractor where padding is
@@ -981,6 +983,7 @@
         while (mPendingFrames.empty()) {
             media_status_t err = readBlock();
             if (err != OK) {
+                buffer->release();
                 clearPendingFrames();
                 return err;
             }
@@ -1000,6 +1003,7 @@
             while (mPendingFrames.empty()) {
                 media_status_t err = readBlock();
                 if (err != OK) {
+                    buffer->release();
                     clearPendingFrames();
                     return err;
                 }
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 3981454..62927a0 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -44,6 +44,8 @@
 struct AAudioMessageEvent {
     aaudio_service_event_t event;
     union {
+        // Align so that 32 and 64-bit code can exchange messages through shared memory.
+        alignas(8)
         double  dataDouble;
         int64_t dataLong;
     };
@@ -57,8 +59,10 @@
         EVENT,
     };
 
-    code what;
+    code      what;
     union {
+        // Align so that 32 and 64-bit code can exchange messages through shared memory.
+        alignas(8)
         AAudioMessageTimestamp timestamp; // what == TIMESTAMP
         AAudioMessageEvent event;         // what == EVENT
     };
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index ec0e133..586a1a3 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -92,6 +92,7 @@
 #define AMEDIAMETRICS_PROP_SUFFIX_CHAR_DUPLICATES_ALLOWED '#'
 
 #define AMEDIAMETRICS_PROP_ALLOWUID       "_allowUid"      // int32_t, allow client uid to post
+#define AMEDIAMETRICS_PROP_AUDIOMODE      "audioMode"      // string (audio.flinger)
 #define AMEDIAMETRICS_PROP_AUXEFFECTID    "auxEffectId"    // int32 (AudioTrack)
 #define AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES "bufferSizeFrames" // int32
 #define AMEDIAMETRICS_PROP_BUFFERCAPACITYFRAMES "bufferCapacityFrames" // int32
@@ -146,6 +147,7 @@
 #define AMEDIAMETRICS_PROP_UNDERRUN       "underrun"       // int32
 #define AMEDIAMETRICS_PROP_UNDERRUNFRAMES "underrunFrames" // int64_t from Thread
 #define AMEDIAMETRICS_PROP_USAGE          "usage"          // string attributes (ATrack)
+#define AMEDIAMETRICS_PROP_VOICEVOLUME    "voiceVolume"    // double (audio.flinger)
 #define AMEDIAMETRICS_PROP_VOLUME_LEFT    "volume.left"    // double (AudioTrack)
 #define AMEDIAMETRICS_PROP_VOLUME_RIGHT   "volume.right"   // double (AudioTrack)
 #define AMEDIAMETRICS_PROP_WHERE          "where"          // string value
@@ -170,7 +172,9 @@
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_PAUSE      "pause"  // AudioTrack
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS "readParameters" // Thread
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_RESTORE    "restore"
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETMODE    "setMode" // AudioFlinger
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_SETPLAYBACKPARAM "setPlaybackParam" // AudioTrack
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME "setVoiceVolume" // AudioFlinger
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOLUME  "setVolume"  // AudioTrack
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_START      "start"  // AudioTrack, AudioRecord
 #define AMEDIAMETRICS_PROP_EVENT_VALUE_STOP       "stop"   // AudioTrack, AudioRecord
diff --git a/media/libstagefright/codecs/amrnb/common/Android.bp b/media/libstagefright/codecs/amrnb/common/Android.bp
index bcf63d5..59a791d 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.bp
+++ b/media/libstagefright/codecs/amrnb/common/Android.bp
@@ -2,6 +2,7 @@
     name: "libstagefright_amrnb_common",
     vendor_available: true,
     host_supported: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/add.cpp",
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.bp b/media/libstagefright/codecs/amrnb/dec/Android.bp
index 3381d2e..b8e00b3 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.bp
+++ b/media/libstagefright/codecs/amrnb/dec/Android.bp
@@ -2,6 +2,7 @@
     name: "libstagefright_amrnbdec",
     vendor_available: true,
     host_supported: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/a_refl.cpp",
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.bp b/media/libstagefright/codecs/amrnb/enc/Android.bp
index 438ed04..73a1d4b 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.bp
+++ b/media/libstagefright/codecs/amrnb/enc/Android.bp
@@ -1,6 +1,7 @@
 cc_library_static {
     name: "libstagefright_amrnbenc",
     vendor_available: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/amrencode.cpp",
diff --git a/media/libstagefright/codecs/amrwb/Android.bp b/media/libstagefright/codecs/amrwb/Android.bp
index d8cb568..204cbe3 100644
--- a/media/libstagefright/codecs/amrwb/Android.bp
+++ b/media/libstagefright/codecs/amrwb/Android.bp
@@ -2,6 +2,7 @@
     name: "libstagefright_amrwbdec",
     vendor_available: true,
     host_supported: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/agc2_amr_wb.cpp",
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index 084be0a..64f302c 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -1,6 +1,7 @@
 cc_library_static {
     name: "libstagefright_amrwbenc",
     vendor_available: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/autocorr.c",
diff --git a/media/libstagefright/codecs/common/Android.bp b/media/libstagefright/codecs/common/Android.bp
index c5a076a..260a60a 100644
--- a/media/libstagefright/codecs/common/Android.bp
+++ b/media/libstagefright/codecs/common/Android.bp
@@ -1,6 +1,7 @@
 cc_library {
     name: "libstagefright_enc_common",
     vendor_available: true,
+    min_sdk_version: "29",
 
     srcs: ["cmnMemory.c"],
 
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index b630524..96106f1 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -1,6 +1,7 @@
 cc_library_static {
     name: "libstagefright_mp3dec",
     vendor_available: true,
+    min_sdk_version: "29",
 
     srcs: [
         "src/pvmp3_normalize.cpp",
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index d65a663..32b2075 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -1,6 +1,7 @@
 cc_library {
     name: "libstagefright_flacdec",
     vendor_available: true,
+    min_sdk_version: "29",
 
     srcs: [
         "FLACDecoder.cpp",
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index e0324e3..4324c45 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -615,7 +615,7 @@
                 if (mIsVariantPlaylist) {
                     return ERROR_MALFORMED;
                 }
-                err = parseCipherInfo(line, &itemMeta, mBaseURI);
+                err = parseCipherInfo(line, &itemMeta);
             } else if (line.startsWith("#EXT-X-ENDLIST")) {
                 mIsComplete = true;
             } else if (line.startsWith("#EXT-X-PLAYLIST-TYPE:EVENT")) {
@@ -936,7 +936,7 @@
 
 // static
 status_t M3UParser::parseCipherInfo(
-        const AString &line, sp<AMessage> *meta, const AString &baseURI) {
+        const AString &line, sp<AMessage> *meta) {
     ssize_t colonPos = line.find(":");
 
     if (colonPos < 0) {
@@ -985,13 +985,9 @@
                     val = tmp;
                 }
 
-                AString absURI;
-                if (MakeURL(baseURI.c_str(), val.c_str(), &absURI)) {
-                    val = absURI;
-                } else {
-                    ALOGE("failed to make absolute url for %s.",
-                            uriDebugString(baseURI).c_str());
-                }
+                // To save space, we only store the partial Uri here
+                // The full Uri will be constructed from this plus
+                // the base Uri as needed by PlaylistFetcher
             }
 
             key.insert(AString("cipher-"), 0);
@@ -1003,6 +999,14 @@
     return OK;
 }
 
+AString M3UParser::getFullCipherUri(const AString &partial) {
+    AString full;
+    if (MakeURL(mBaseURI.c_str(), partial.c_str(), &full)) {
+        return full;
+    }
+    return AString("");
+}
+
 // static
 status_t M3UParser::parseByteRange(
         const AString &line, uint64_t curOffset,
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index c85335a..9f7a66a 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -55,6 +55,8 @@
     bool getTypeURI(size_t index, const char *key, AString *uri) const;
     bool hasType(size_t index, const char *key) const;
 
+    AString getFullCipherUri(const AString &partial);
+
 protected:
     virtual ~M3UParser();
 
@@ -99,7 +101,7 @@
             const AString &line, sp<AMessage> *meta) const;
 
     static status_t parseCipherInfo(
-            const AString &line, sp<AMessage> *meta, const AString &baseURI);
+            const AString &line, sp<AMessage> *meta);
 
     static status_t parseByteRange(
             const AString &line, uint64_t curOffset,
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index fdcde29..b23aa8a 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -345,6 +345,7 @@
         ALOGE("Missing key uri");
         return ERROR_MALFORMED;
     }
+    keyURI = mPlaylist->getFullCipherUri(keyURI);
 
     ssize_t index = mAESKeyForURI.indexOfKey(keyURI);
 
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index 42afea3..fbb2d0c 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -46,4 +46,6 @@
     whole_static_libs: [
         "libstagefright_metadatautils",
     ],
+
+    min_sdk_version: "29",
 }
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index de7ae40..f014209 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1176,6 +1176,10 @@
             mPlaybackThreads.valueAt(i)->setMode(mode);
     }
 
+    mediametrics::LogItem(mMetricsId)
+        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETMODE)
+        .set(AMEDIAMETRICS_PROP_AUDIOMODE, toString(mode))
+        .record();
     return ret;
 }
 
@@ -1741,6 +1745,10 @@
     ret = dev->setVoiceVolume(value);
     mHardwareStatus = AUDIO_HW_IDLE;
 
+    mediametrics::LogItem(mMetricsId)
+        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME)
+        .set(AMEDIAMETRICS_PROP_VOICEVOLUME, (double)value)
+        .record();
     return ret;
 }
 
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 8e5bc79..c87fbd9 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -36,6 +36,7 @@
 
     srcs: [
         "AudioAnalytics.cpp",
+        "AudioPowerUsage.cpp",
         "iface_statsd.cpp",
         "MediaMetricsService.cpp",
         "statsd_audiopolicy.cpp",
@@ -55,6 +56,7 @@
 
     shared_libs: [
         "libbinder",
+        "libcutils",
         "liblog",
         "libmediametrics",
         "libmediautils",
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index fe3a34d..e50fbe8 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -132,6 +132,49 @@
             [this](const std::shared_ptr<const android::mediametrics::Item> &item){
                 mDeviceConnection.createPatch(item);
             }));
+
+    // Handle power usage
+    mActions.addAction(
+        AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "*." AMEDIAMETRICS_PROP_EVENT,
+        std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAUDIOINTERVALGROUP),
+        std::make_shared<AnalyticsActions::Function>(
+            [this](const std::shared_ptr<const android::mediametrics::Item> &item){
+                mAudioPowerUsage.checkTrackRecord(item, true /* isTrack */);
+            }));
+
+    mActions.addAction(
+        AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "*." AMEDIAMETRICS_PROP_EVENT,
+        std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAUDIOINTERVALGROUP),
+        std::make_shared<AnalyticsActions::Function>(
+            [this](const std::shared_ptr<const android::mediametrics::Item> &item){
+                mAudioPowerUsage.checkTrackRecord(item, false /* isTrack */);
+            }));
+
+    mActions.addAction(
+        AMEDIAMETRICS_KEY_AUDIO_FLINGER "." AMEDIAMETRICS_PROP_EVENT,
+        std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_SETMODE),
+        std::make_shared<AnalyticsActions::Function>(
+            [this](const std::shared_ptr<const android::mediametrics::Item> &item){
+                // ALOGD("(key=%s) Audioflinger setMode", item->getKey().c_str());
+                mAudioPowerUsage.checkMode(item);
+            }));
+
+    mActions.addAction(
+        AMEDIAMETRICS_KEY_AUDIO_FLINGER "." AMEDIAMETRICS_PROP_EVENT,
+        std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOICEVOLUME),
+        std::make_shared<AnalyticsActions::Function>(
+            [this](const std::shared_ptr<const android::mediametrics::Item> &item){
+                // ALOGD("(key=%s) Audioflinger setVoiceVolume", item->getKey().c_str());
+                mAudioPowerUsage.checkVoiceVolume(item);
+            }));
+
+    mActions.addAction(
+        AMEDIAMETRICS_KEY_PREFIX_AUDIO_THREAD "*." AMEDIAMETRICS_PROP_EVENT,
+        std::string("createAudioPatch"),
+        std::make_shared<AnalyticsActions::Function>(
+            [this](const std::shared_ptr<const android::mediametrics::Item> &item){
+                mAudioPowerUsage.checkCreatePatch(item);
+            }));
 }
 
 AudioAnalytics::~AudioAnalytics()
@@ -173,6 +216,12 @@
         ss << s;
         ll -= l;
     }
+
+    if (ll > 0 && prefix == nullptr) {
+        auto [s, l] = mAudioPowerUsage.dump(ll);
+        ss << s;
+        ll -= l;
+    }
     return { ss.str(), lines - ll };
 }
 
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index eb9c228..f9c776d 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -19,6 +19,7 @@
 #include <android-base/thread_annotations.h>
 #include "AnalyticsActions.h"
 #include "AnalyticsState.h"
+#include "AudioPowerUsage.h"
 #include "TimedAction.h"
 #include "Wrap.h"
 
@@ -26,6 +27,9 @@
 
 class AudioAnalytics
 {
+    // AudioAnalytics action / state helper classes
+    friend AudioPowerUsage;
+
 public:
     AudioAnalytics();
     ~AudioAnalytics();
@@ -72,6 +76,9 @@
         // underlying state is locked.
         mPreviousAnalyticsState->clear();
         mAnalyticsState->clear();
+
+        // Clear power usage state.
+        mAudioPowerUsage.clear();
     }
 
 private:
@@ -149,6 +156,8 @@
         int32_t mA2dpConnectedSuccesses GUARDED_BY(mLock) = 0;
         int32_t mA2dpConnectedFailures GUARDED_BY(mLock) = 0;
     } mDeviceConnection{*this};
+
+    AudioPowerUsage mAudioPowerUsage{this};
 };
 
 } // namespace android::mediametrics
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
new file mode 100644
index 0000000..e311bc8
--- /dev/null
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AudioPowerUsage"
+#include <utils/Log.h>
+
+#include "AudioAnalytics.h"
+#include "MediaMetricsService.h"
+#include <map>
+#include <sstream>
+#include <string>
+#include <audio_utils/clock.h>
+#include <cutils/properties.h>
+#include <statslog.h>
+#include <sys/timerfd.h>
+#include <system/audio-base.h>
+
+// property to disable audio power use metrics feature, default is enabled
+#define PROP_AUDIO_METRICS_DISABLED "persist.media.audio_metrics.power_usage_disabled"
+#define AUDIO_METRICS_DISABLED_DEFAULT (false)
+
+// property to set how long to send audio power use metrics data to westworld, default is 24hrs
+#define PROP_AUDIO_METRICS_INTERVAL_HR "persist.media.audio_metrics.interval_hr"
+#define INTERVAL_HR_DEFAULT (24)
+
+// for Audio Power Usage Metrics
+#define AUDIO_POWER_USAGE_KEY_AUDIO_USAGE     "audio.power.usage"
+
+#define AUDIO_POWER_USAGE_PROP_DEVICE         "device"     // int32
+#define AUDIO_POWER_USAGE_PROP_DURATION_NS    "durationNs" // int64
+#define AUDIO_POWER_USAGE_PROP_TYPE           "type"       // int32
+#define AUDIO_POWER_USAGE_PROP_VOLUME         "volume"     // double
+
+namespace android::mediametrics {
+
+/* static */
+bool AudioPowerUsage::typeFromString(const std::string& type_string, int32_t& type) {
+    static std::map<std::string, int32_t> typeTable = {
+        { "AUDIO_STREAM_VOICE_CALL",          VOIP_CALL_TYPE },
+        { "AUDIO_STREAM_SYSTEM",              MEDIA_TYPE },
+        { "AUDIO_STREAM_RING",                RINGTONE_NOTIFICATION_TYPE },
+        { "AUDIO_STREAM_MUSIC",               MEDIA_TYPE },
+        { "AUDIO_STREAM_ALARM",               ALARM_TYPE },
+        { "AUDIO_STREAM_NOTIFICATION",        RINGTONE_NOTIFICATION_TYPE },
+
+        { "AUDIO_CONTENT_TYPE_SPEECH",        VOIP_CALL_TYPE },
+        { "AUDIO_CONTENT_TYPE_MUSIC",         MEDIA_TYPE },
+        { "AUDIO_CONTENT_TYPE_MOVIE",         MEDIA_TYPE },
+        { "AUDIO_CONTENT_TYPE_SONIFICATION",  RINGTONE_NOTIFICATION_TYPE },
+
+        { "AUDIO_USAGE_MEDIA",                MEDIA_TYPE },
+        { "AUDIO_USAGE_VOICE_COMMUNICATION",  VOIP_CALL_TYPE },
+        { "AUDIO_USAGE_ALARM",                ALARM_TYPE },
+        { "AUDIO_USAGE_NOTIFICATION",         RINGTONE_NOTIFICATION_TYPE },
+
+        { "AUDIO_SOURCE_CAMCORDER",           CAMCORDER_TYPE },
+        { "AUDIO_SOURCE_VOICE_COMMUNICATION", VOIP_CALL_TYPE },
+        { "AUDIO_SOURCE_DEFAULT",             RECORD_TYPE },
+        { "AUDIO_SOURCE_MIC",                 RECORD_TYPE },
+        { "AUDIO_SOURCE_UNPROCESSED",         RECORD_TYPE },
+        { "AUDIO_SOURCE_VOICE_RECOGNITION",   RECORD_TYPE },
+    };
+
+    auto it = typeTable.find(type_string);
+    if (it == typeTable.end()) {
+        type = UNKNOWN_TYPE;
+        return false;
+    }
+
+    type = it->second;
+    return true;
+}
+
+/* static */
+bool AudioPowerUsage::deviceFromString(const std::string& device_string, int32_t& device) {
+    static std::map<std::string, int32_t> deviceTable = {
+        { "AUDIO_DEVICE_OUT_EARPIECE",             OUTPUT_EARPIECE },
+        { "AUDIO_DEVICE_OUT_SPEAKER_SAFE",         OUTPUT_SPEAKER_SAFE },
+        { "AUDIO_DEVICE_OUT_SPEAKER",              OUTPUT_SPEAKER },
+        { "AUDIO_DEVICE_OUT_WIRED_HEADSET",        OUTPUT_WIRED_HEADSET },
+        { "AUDIO_DEVICE_OUT_WIRED_HEADPHONE",      OUTPUT_WIRED_HEADSET },
+        { "AUDIO_DEVICE_OUT_BLUETOOTH_SCO",        OUTPUT_BLUETOOTH_SCO },
+        { "AUDIO_DEVICE_OUT_BLUETOOTH_A2DP",       OUTPUT_BLUETOOTH_A2DP },
+        { "AUDIO_DEVICE_OUT_USB_HEADSET",          OUTPUT_USB_HEADSET },
+        { "AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET", OUTPUT_BLUETOOTH_SCO },
+
+        { "AUDIO_DEVICE_IN_BUILTIN_MIC",           INPUT_BUILTIN_MIC },
+        { "AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET", INPUT_BLUETOOTH_SCO },
+        { "AUDIO_DEVICE_IN_WIRED_HEADSET",         INPUT_WIRED_HEADSET_MIC },
+        { "AUDIO_DEVICE_IN_USB_DEVICE",            INPUT_USB_HEADSET_MIC },
+        { "AUDIO_DEVICE_IN_BACK_MIC",              INPUT_BUILTIN_BACK_MIC },
+    };
+
+    auto it = deviceTable.find(device_string);
+    if (it == deviceTable.end()) {
+        device = 0;
+        return false;
+    }
+
+    device = it->second;
+    return true;
+}
+
+int32_t AudioPowerUsage::deviceFromStringPairs(const std::string& device_strings) {
+    int32_t deviceMask = 0;
+    const auto devaddrvec = MediaMetricsService::getDeviceAddressPairs(device_strings);
+    for (const auto &[device, addr] : devaddrvec) {
+        int32_t combo_device = 0;
+        deviceFromString(device, combo_device);
+        deviceMask |= combo_device;
+    }
+    return deviceMask;
+}
+
+/* static */
+void AudioPowerUsage::sendItem(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    int32_t type;
+    if (!item->getInt32(AUDIO_POWER_USAGE_PROP_TYPE, &type)) return;
+
+    int32_t device;
+    if (!item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &device)) return;
+
+    int64_t duration_ns;
+    if (!item->getInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, &duration_ns)) return;
+
+    double volume;
+    if (!item->getDouble(AUDIO_POWER_USAGE_PROP_VOLUME, &volume)) return;
+
+#ifdef STATSD
+    (void)android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
+                                         device,
+                                         (int32_t)(duration_ns / NANOS_PER_SECOND),
+                                         (float)volume,
+                                         type);
+#endif
+}
+
+bool AudioPowerUsage::saveAsItem_l(
+        int32_t device, int64_t duration_ns, int32_t type, double average_vol)
+{
+    ALOGV("%s: (%#x, %d, %lld, %f)", __func__, device, type,
+                                   (long long)duration_ns, average_vol );
+    if (duration_ns == 0) {
+        return true; // skip duration 0 usage
+    }
+    if (device == 0) {
+        return true; //ignore unknown device
+    }
+
+    for (auto item : mItems) {
+        int32_t item_type = 0, item_device = 0;
+        double item_volume = 0.;
+        int64_t item_duration_ns = 0;
+        item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &item_device);
+        item->getInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, &item_duration_ns);
+        item->getInt32(AUDIO_POWER_USAGE_PROP_TYPE, &item_type);
+        item->getDouble(AUDIO_POWER_USAGE_PROP_VOLUME, &item_volume);
+
+        // aggregate by device and type
+        if (item_device == device && item_type == type) {
+            int64_t final_duration_ns = item_duration_ns + duration_ns;
+            double final_volume = (device & INPUT_DEVICE_BIT) ? 1.0:
+                            ((item_volume * item_duration_ns +
+                            average_vol * duration_ns) / final_duration_ns);
+
+            item->setInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, final_duration_ns);
+            item->setDouble(AUDIO_POWER_USAGE_PROP_VOLUME, final_volume);
+            item->setTimestamp(systemTime(SYSTEM_TIME_REALTIME));
+
+            ALOGV("%s: update (%#x, %d, %lld, %f) --> (%lld, %f)", __func__,
+                  device, type,
+                  (long long)item_duration_ns, item_volume,
+                  (long long)final_duration_ns, final_volume);
+
+            return true;
+        }
+    }
+
+    auto sitem = std::make_shared<mediametrics::Item>(AUDIO_POWER_USAGE_KEY_AUDIO_USAGE);
+    sitem->setTimestamp(systemTime(SYSTEM_TIME_REALTIME));
+    sitem->setInt32(AUDIO_POWER_USAGE_PROP_DEVICE, device);
+    sitem->setInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, duration_ns);
+    sitem->setInt32(AUDIO_POWER_USAGE_PROP_TYPE, type);
+    sitem->setDouble(AUDIO_POWER_USAGE_PROP_VOLUME, average_vol);
+    mItems.emplace_back(sitem);
+    return true;
+}
+
+void AudioPowerUsage::checkTrackRecord(
+        const std::shared_ptr<const mediametrics::Item>& item, bool isTrack)
+{
+    const std::string key = item->getKey();
+
+    int64_t deviceTimeNs = 0;
+    if (!item->getInt64(AMEDIAMETRICS_PROP_DEVICETIMENS, &deviceTimeNs)) {
+        return;
+    }
+    double deviceVolume = 1.;
+    if (isTrack && !item->getDouble(AMEDIAMETRICS_PROP_DEVICEVOLUME, &deviceVolume)) {
+        return;
+    }
+    int32_t type = 0;
+    std::string type_string;
+    if ((isTrack && mAudioAnalytics->mAnalyticsState->timeMachine().get(
+               key, AMEDIAMETRICS_PROP_STREAMTYPE, &type_string) == OK) ||
+        (!isTrack && mAudioAnalytics->mAnalyticsState->timeMachine().get(
+               key, AMEDIAMETRICS_PROP_SOURCE, &type_string) == OK)) {
+        typeFromString(type_string, type);
+
+        if (isTrack && type == UNKNOWN_TYPE &&
+                   mAudioAnalytics->mAnalyticsState->timeMachine().get(
+                   key, AMEDIAMETRICS_PROP_USAGE, &type_string) == OK) {
+            typeFromString(type_string, type);
+        }
+        if (isTrack && type == UNKNOWN_TYPE &&
+                   mAudioAnalytics->mAnalyticsState->timeMachine().get(
+                   key, AMEDIAMETRICS_PROP_CONTENTTYPE, &type_string) == OK) {
+            typeFromString(type_string, type);
+        }
+        ALOGV("type = %s => %d", type_string.c_str(), type);
+    }
+
+    int32_t device = 0;
+    std::string device_strings;
+    if ((isTrack && mAudioAnalytics->mAnalyticsState->timeMachine().get(
+         key, AMEDIAMETRICS_PROP_OUTPUTDEVICES, &device_strings) == OK) ||
+        (!isTrack && mAudioAnalytics->mAnalyticsState->timeMachine().get(
+         key, AMEDIAMETRICS_PROP_INPUTDEVICES, &device_strings) == OK)) {
+
+        device = deviceFromStringPairs(device_strings);
+        ALOGV("device = %s => %d", device_strings.c_str(), device);
+    }
+    std::lock_guard l(mLock);
+    saveAsItem_l(device, deviceTimeNs, type, deviceVolume);
+}
+
+void AudioPowerUsage::checkMode(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    std::string mode;
+    if (!item->getString(AMEDIAMETRICS_PROP_AUDIOMODE, &mode)) return;
+
+    std::lock_guard l(mLock);
+    if (mode == mMode) return;  // no change in mode.
+
+    if (mMode == "AUDIO_MODE_IN_CALL") { // leaving call mode
+        const int64_t endCallNs = item->getTimestamp();
+        const int64_t durationNs = endCallNs - mDeviceTimeNs;
+        if (durationNs > 0) {
+            mDeviceVolume = (mDeviceVolume * (mVolumeTimeNs - mDeviceTimeNs) +
+                    mVoiceVolume * (endCallNs - mVolumeTimeNs)) / durationNs;
+            saveAsItem_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+        }
+    } else if (mode == "AUDIO_MODE_IN_CALL") { // entering call mode
+        mStartCallNs = item->getTimestamp(); // advisory only
+
+        mDeviceVolume = 0;
+        mVolumeTimeNs = mStartCallNs;
+        mDeviceTimeNs = mStartCallNs;
+    }
+    ALOGV("%s: new mode:%s  old mode:%s", __func__, mode.c_str(), mMode.c_str());
+    mMode = mode;
+}
+
+void AudioPowerUsage::checkVoiceVolume(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    double voiceVolume = 0.;
+    if (!item->getDouble(AMEDIAMETRICS_PROP_VOICEVOLUME, &voiceVolume)) return;
+
+    std::lock_guard l(mLock);
+    if (voiceVolume == mVoiceVolume) return;  // no change in volume
+
+    // we only track average device volume when we are in-call
+    if (mMode == "AUDIO_MODE_IN_CALL") {
+        const int64_t timeNs = item->getTimestamp();
+        const int64_t durationNs = timeNs - mDeviceTimeNs;
+        if (durationNs > 0) {
+            mDeviceVolume = (mDeviceVolume * (mVolumeTimeNs - mDeviceTimeNs) +
+                    mVoiceVolume * (timeNs - mVolumeTimeNs)) / durationNs;
+            mVolumeTimeNs = timeNs;
+        }
+    }
+    ALOGV("%s: new voice volume:%lf  old voice volume:%lf", __func__, voiceVolume, mVoiceVolume);
+    mVoiceVolume = voiceVolume;
+}
+
+void AudioPowerUsage::checkCreatePatch(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    std::string outputDevices;
+    if (!item->get(AMEDIAMETRICS_PROP_OUTPUTDEVICES, &outputDevices)) return;
+
+    const std::string& key = item->getKey();
+    std::string flags;
+    if (mAudioAnalytics->mAnalyticsState->timeMachine().get(
+         key, AMEDIAMETRICS_PROP_FLAGS, &flags) != OK) return;
+
+    if (flags.find("AUDIO_OUTPUT_FLAG_PRIMARY") == std::string::npos) return;
+
+    const int32_t device = deviceFromStringPairs(outputDevices);
+
+    std::lock_guard l(mLock);
+    if (mPrimaryDevice == device) return;
+
+    if (mMode == "AUDIO_MODE_IN_CALL") {
+        // Save statistics
+        const int64_t endDeviceNs = item->getTimestamp();
+        const int64_t durationNs = endDeviceNs - mDeviceTimeNs;
+        if (durationNs > 0) {
+            mDeviceVolume = (mDeviceVolume * (mVolumeTimeNs - mDeviceTimeNs) +
+                    mVoiceVolume * (endDeviceNs - mVolumeTimeNs)) / durationNs;
+            saveAsItem_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+        }
+        // reset statistics
+        mDeviceVolume = 0;
+        mDeviceTimeNs = endDeviceNs;
+        mVolumeTimeNs = endDeviceNs;
+    }
+    ALOGV("%s: new primary device:%#x  old primary device:%#x", __func__, device, mPrimaryDevice);
+    mPrimaryDevice = device;
+}
+
+AudioPowerUsage::AudioPowerUsage(AudioAnalytics *audioAnalytics)
+    : mAudioAnalytics(audioAnalytics)
+    , mDisabled(property_get_bool(PROP_AUDIO_METRICS_DISABLED, AUDIO_METRICS_DISABLED_DEFAULT))
+    , mIntervalHours(property_get_int32(PROP_AUDIO_METRICS_INTERVAL_HR, INTERVAL_HR_DEFAULT))
+{
+    ALOGD("%s", __func__);
+    ALOGI_IF(mDisabled, "AudioPowerUsage is disabled.");
+    collect(); // send items
+}
+
+AudioPowerUsage::~AudioPowerUsage()
+{
+    ALOGD("%s", __func__);
+}
+
+void AudioPowerUsage::clear()
+{
+    std::lock_guard _l(mLock);
+    mItems.clear();
+}
+
+void AudioPowerUsage::collect()
+{
+    std::lock_guard _l(mLock);
+    for (const auto &item : mItems) {
+        sendItem(item);
+    }
+    mItems.clear();
+    mAudioAnalytics->mTimedAction.postIn(
+        mIntervalHours <= 0 ? std::chrono::seconds(5) : std::chrono::hours(mIntervalHours),
+        [this](){ collect(); });
+}
+
+std::pair<std::string, int32_t> AudioPowerUsage::dump(int limit) const {
+    if (limit <= 2) {
+        return {{}, 0};
+    }
+    std::lock_guard _l(mLock);
+    if (mDisabled) {
+        return {"AudioPowerUsage disabled\n", 1};
+    }
+    if (mItems.empty()) {
+        return {"AudioPowerUsage empty\n", 1};
+    }
+
+    int slot = 1;
+    std::stringstream ss;
+    ss << "AudioPowerUsage:\n";
+    for (const auto &item : mItems) {
+        if (slot >= limit - 1) {
+            ss << "-- AudioPowerUsage may be truncated!\n";
+            ++slot;
+            break;
+        }
+        ss << " " << slot << " " << item->toString() << "\n";
+        slot++;
+    }
+    return { ss.str(), slot };
+}
+
+} // namespace android
diff --git a/services/mediametrics/AudioPowerUsage.h b/services/mediametrics/AudioPowerUsage.h
new file mode 100644
index 0000000..446ff4f
--- /dev/null
+++ b/services/mediametrics/AudioPowerUsage.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <deque>
+#include <media/MediaMetricsItem.h>
+#include <mutex>
+#include <thread>
+
+namespace android::mediametrics {
+
+class AudioAnalytics;
+
+class AudioPowerUsage {
+public:
+    explicit AudioPowerUsage(AudioAnalytics *audioAnalytics);
+    ~AudioPowerUsage();
+
+    void checkTrackRecord(const std::shared_ptr<const mediametrics::Item>& item, bool isTrack);
+    void checkMode(const std::shared_ptr<const mediametrics::Item>& item);
+    void checkVoiceVolume(const std::shared_ptr<const mediametrics::Item>& item);
+    void checkCreatePatch(const std::shared_ptr<const mediametrics::Item>& item);
+    void clear();
+
+    /**
+     * Returns a pair consisting of the dump string, and the number of lines in the string.
+     *
+     * The number of lines in the returned pair is used as an optimization
+     * for subsequent line limiting.
+     *
+     * \param lines the maximum number of lines in the string returned.
+     */
+    std::pair<std::string, int32_t> dump(int32_t lines = INT32_MAX) const;
+
+    // align with message AudioUsageDataReported in frameworks/base/cmds/statsd/src/atoms.proto
+    enum AudioType {
+        UNKNOWN_TYPE = 0,
+        VOICE_CALL_TYPE = 1,            // voice call
+        VOIP_CALL_TYPE = 2,             // voip call, including uplink and downlink
+        MEDIA_TYPE = 3,                 // music and system sound
+        RINGTONE_NOTIFICATION_TYPE = 4, // ringtone and notification
+        ALARM_TYPE = 5,                 // alarm type
+        // record type
+        CAMCORDER_TYPE = 6,             // camcorder
+        RECORD_TYPE = 7,                // other recording
+    };
+
+    enum AudioDevice {
+        OUTPUT_EARPIECE         = 0x1,
+        OUTPUT_SPEAKER          = 0x2,
+        OUTPUT_WIRED_HEADSET    = 0x4,
+        OUTPUT_USB_HEADSET      = 0x8,
+        OUTPUT_BLUETOOTH_SCO    = 0x10,
+        OUTPUT_BLUETOOTH_A2DP   = 0x20,
+        OUTPUT_SPEAKER_SAFE     = 0x40,
+
+        INPUT_DEVICE_BIT        = 0x40000000,
+        INPUT_BUILTIN_MIC       = INPUT_DEVICE_BIT | 0x1, // non-negative positive int32.
+        INPUT_BUILTIN_BACK_MIC  = INPUT_DEVICE_BIT | 0x2,
+        INPUT_WIRED_HEADSET_MIC = INPUT_DEVICE_BIT | 0x4,
+        INPUT_USB_HEADSET_MIC   = INPUT_DEVICE_BIT | 0x8,
+        INPUT_BLUETOOTH_SCO     = INPUT_DEVICE_BIT | 0x10,
+    };
+
+    static bool typeFromString(const std::string& type_string, int32_t& type);
+    static bool deviceFromString(const std::string& device_string, int32_t& device);
+    static int32_t deviceFromStringPairs(const std::string& device_strings);
+private:
+    bool saveAsItem_l(int32_t device, int64_t duration, int32_t type, double average_vol)
+         REQUIRES(mLock);
+    static void sendItem(const std::shared_ptr<const mediametrics::Item>& item);
+    void collect();
+
+    AudioAnalytics * const mAudioAnalytics;
+    const bool mDisabled;
+    const int32_t mIntervalHours;
+
+    mutable std::mutex mLock;
+    std::deque<std::shared_ptr<mediametrics::Item>> mItems GUARDED_BY(mLock);
+
+    double mVoiceVolume GUARDED_BY(mLock) = 0.;
+    double mDeviceVolume GUARDED_BY(mLock) = 0.;
+    int64_t mStartCallNs GUARDED_BY(mLock) = 0; // advisory only
+    int64_t mVolumeTimeNs GUARDED_BY(mLock) = 0;
+    int64_t mDeviceTimeNs GUARDED_BY(mLock) = 0;
+    int32_t mPrimaryDevice GUARDED_BY(mLock) = OUTPUT_SPEAKER;
+    std::string mMode GUARDED_BY(mLock) {"AUDIO_MODE_NORMAL"};
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index b465ecd..f7988f1 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -803,7 +803,7 @@
 
   // TODO: Verify contents of AudioAnalytics.
   // Currently there is no getter API in AudioAnalytics besides dump.
-  ASSERT_EQ(9, audioAnalytics.dump(1000).second /* lines */);
+  ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
 
   ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
   // untrusted entities can add to an existing key
@@ -839,7 +839,7 @@
 
   // TODO: Verify contents of AudioAnalytics.
   // Currently there is no getter API in AudioAnalytics besides dump.
-  ASSERT_EQ(9, audioAnalytics.dump(1000).second /* lines */);
+  ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
 
   ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
   // untrusted entities can add to an existing key
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 9dab770..0843e0b 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -23,10 +23,10 @@
 #include <map>
 #include <mutex>
 #include <sstream>
+#include <thread>
 #include <utils/Singleton.h>
 #include <vector>
 
-
 #include "AAudioEndpointManager.h"
 #include "AAudioServiceEndpoint.h"
 
@@ -36,7 +36,6 @@
 #include "AAudioServiceEndpointPlay.h"
 #include "AAudioServiceEndpointMMAP.h"
 
-
 #define AAUDIO_BUFFER_CAPACITY_MIN    4 * 512
 #define AAUDIO_SAMPLE_RATE_DEFAULT    48000
 
@@ -48,7 +47,6 @@
 using namespace android;  // TODO just import names needed
 using namespace aaudio;   // TODO just import names needed
 
-
 AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP(AAudioService &audioService)
         : mMmapStream(nullptr)
         , mAAudioService(audioService) {}
@@ -318,9 +316,8 @@
     return 0; // TODO
 }
 
-// This is called by AudioFlinger when it wants to destroy a stream.
-void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
-    ALOGD("%s(portHandle = %d) called", __func__, portHandle);
+// This is called by onTearDown() in a separate thread to avoid deadlocks.
+void AAudioServiceEndpointMMAP::handleTearDownAsync(audio_port_handle_t portHandle) {
     // Are we tearing down the EXCLUSIVE MMAP stream?
     if (isStreamRegistered(portHandle)) {
         ALOGD("%s(%d) tearing down this entire MMAP endpoint", __func__, portHandle);
@@ -333,6 +330,13 @@
     }
 };
 
+// This is called by AudioFlinger when it wants to destroy a stream.
+void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
+    ALOGD("%s(portHandle = %d) called", __func__, portHandle);
+    std::thread asyncTask(&AAudioServiceEndpointMMAP::handleTearDownAsync, this, portHandle);
+    asyncTask.detach();
+}
+
 void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
                                               android::Vector<float> values) {
     // TODO Do we really need a different volume for each channel?
@@ -345,12 +349,20 @@
     }
 };
 
-void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
+void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t portHandle) {
+    const int32_t deviceId = static_cast<int32_t>(portHandle);
     ALOGD("%s() called with dev %d, old = %d", __func__, deviceId, getDeviceId());
-    if (getDeviceId() != AUDIO_PORT_HANDLE_NONE  && getDeviceId() != deviceId) {
-        disconnectRegisteredStreams();
+    if (getDeviceId() != deviceId) {
+        if (getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
+            std::thread asyncTask([this, deviceId]() {
+                disconnectRegisteredStreams();
+                setDeviceId(deviceId);
+            });
+            asyncTask.detach();
+        } else {
+            setDeviceId(deviceId);
+        }
     }
-    setDeviceId(deviceId);
 };
 
 /**
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index f599066..3d10861 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -68,13 +68,15 @@
 
     aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
 
+    void handleTearDownAsync(audio_port_handle_t portHandle);
+
     // -------------- Callback functions for MmapStreamCallback ---------------------
-    void onTearDown(audio_port_handle_t handle) override;
+    void onTearDown(audio_port_handle_t portHandle) override;
 
     void onVolumeChanged(audio_channel_mask_t channels,
                          android::Vector<float> values) override;
 
-    void onRoutingChanged(audio_port_handle_t deviceId) override;
+    void onRoutingChanged(audio_port_handle_t portHandle) override;
     // ------------------------------------------------------------------------------
 
     aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable);