Merge "Fix FLAC codec VTS failures" into pi-dev
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index a1a8cd6..c59d0e7 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -221,7 +221,7 @@
     mCallbacks.erase(cb);
 }
 
-void CameraManagerGlobal::getCameraIdList(std::vector<String8> *cameraIds) {
+void CameraManagerGlobal::getCameraIdList(std::vector<String8>* cameraIds) {
     // Ensure that we have initialized/refreshed the list of available devices
     auto cs = getCameraService();
     Mutex::Autolock _l(mLock);
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 4a172f3..cc42f77 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -19,6 +19,7 @@
 
 #include <camera/NdkCameraManager.h>
 
+#include <android-base/parseint.h>
 #include <android/hardware/ICameraService.h>
 #include <android/hardware/BnCameraServiceListener.h>
 #include <camera/CameraMetadata.h>
@@ -140,8 +141,29 @@
     static bool validStatus(int32_t status);
     static bool isStatusAvailable(int32_t status);
 
+    // The sort logic must match the logic in
+    // libcameraservice/common/CameraProviderManager.cpp::getAPI1CompatibleCameraDeviceIds
+    struct CameraIdComparator {
+        bool operator()(const String8& a, const String8& b) const {
+            uint32_t aUint = 0, bUint = 0;
+            bool aIsUint = base::ParseUint(a.c_str(), &aUint);
+            bool bIsUint = base::ParseUint(b.c_str(), &bUint);
+
+            // Uint device IDs first
+            if (aIsUint && bIsUint) {
+                return aUint < bUint;
+            } else if (aIsUint) {
+                return true;
+            } else if (bIsUint) {
+                return false;
+            }
+            // Simple string compare if both id are not uint
+            return a < b;
+        }
+    };
+
     // Map camera_id -> status
-    std::map<String8, int32_t> mDeviceStatusMap;
+    std::map<String8, int32_t, CameraIdComparator> mDeviceStatusMap;
 
     // For the singleton instance
     static Mutex sLock;
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2829b90..c7d2545 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1913,8 +1913,8 @@
      * the thumbnail data will also be rotated.</p>
      * <p>Note that this orientation is relative to the orientation of the camera sensor, given
      * by ACAMERA_SENSOR_ORIENTATION.</p>
-     * <p>To translate from the device orientation given by the Android sensor APIs, the following
-     * sample code may be used:</p>
+     * <p>To translate from the device orientation given by the Android sensor APIs for camera
+     * sensors which are not EXTERNAL, the following sample code may be used:</p>
      * <pre><code>private int getJpegOrientation(CameraCharacteristics c, int deviceOrientation) {
      *     if (deviceOrientation == android.view.OrientationEventListener.ORIENTATION_UNKNOWN) return 0;
      *     int sensorOrientation = c.get(CameraCharacteristics.SENSOR_ORIENTATION);
@@ -1933,6 +1933,8 @@
      *     return jpegOrientation;
      * }
      * </code></pre>
+     * <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
+     * also be set to EXTERNAL. The above code is not relevant in such case.</p>
      *
      * @see ACAMERA_SENSOR_ORIENTATION
      */
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 40aeb9f..73ecda1 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -341,10 +341,10 @@
                 return OK;
             }
 
-            CryptoPlugin::SubSample *subSamples =
-                    new CryptoPlugin::SubSample[numSubSamples];
+            std::unique_ptr<CryptoPlugin::SubSample[]> subSamples =
+                    std::make_unique<CryptoPlugin::SubSample[]>(numSubSamples);
 
-            data.read(subSamples,
+            data.read(subSamples.get(),
                     sizeof(CryptoPlugin::SubSample) * numSubSamples);
 
             DestinationBuffer destination;
@@ -402,7 +402,7 @@
                 result = -EINVAL;
             } else {
                 result = decrypt(key, iv, mode, pattern, source, offset,
-                        subSamples, numSubSamples, destination, &errorDetailMsg);
+                        subSamples.get(), numSubSamples, destination, &errorDetailMsg);
             }
 
             reply->writeInt32(result);
@@ -421,9 +421,7 @@
                 }
             }
 
-            delete[] subSamples;
-            subSamples = NULL;
-
+            subSamples.reset();
             return OK;
         }
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index 300c688..d51e29d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -400,7 +400,7 @@
 
     if (level > SecurityLevel::SW_SECURE_CRYPTO) {
         ALOGE("Cannot set security level > max");
-        return Status::BAD_VALUE;
+        return Status::ERROR_DRM_CANNOT_HANDLE;
     }
 
     std::vector<uint8_t> sid = toVector(sessionId);
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index b4fa3c5..ca119d5 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -60,6 +60,8 @@
     volatile int32_t mRear;     // written by producer (output: client, input: server)
     volatile int32_t mFlush;    // incremented by client to indicate a request to flush;
                                 // server notices and discards all data between mFront and mRear
+    volatile int32_t mStop;     // set by client to indicate a stop frame position; server
+                                // will not read beyond this position until start is called.
     volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
     volatile uint32_t mUnderrunCount;  // server increments for each underrun occurrence
 };
@@ -335,6 +337,8 @@
         mTimestamp.clear();
     }
 
+    virtual void stop() { }; // called by client in AudioTrack::stop()
+
 private:
     // This is a copy of mCblk->mBufferSizeInFrames
     uint32_t   mBufferSizeInFrames;  // effective size of the buffer
@@ -383,8 +387,14 @@
         mPlaybackRateMutator.push(playbackRate);
     }
 
+    // Sends flush and stop position information from the client to the server,
+    // used by streaming AudioTrack flush() or stop().
+    void sendStreamingFlushStop(bool flush);
+
     virtual void flush();
 
+            void stop() override;
+
     virtual uint32_t    getUnderrunFrames() const {
         return mCblk->u.mStreaming.mUnderrunFrames;
     }
@@ -410,6 +420,8 @@
 
     virtual void    flush();
 
+    void stop() override;
+
 #define MIN_LOOP    16  // minimum length of each loop iteration in frames
 
             // setLoop(), setBufferPosition(), and setBufferPositionAndLoop() set the
@@ -532,6 +544,10 @@
     //   client will be notified via Futex
     virtual void    flushBufferIfNeeded();
 
+    // Returns the rear position of the AudioTrack shared ring buffer, limited by
+    // the stop frame position level.
+    virtual int32_t getRear() const = 0;
+
     // Total count of the number of flushed frames since creation (never reset).
     virtual int64_t     framesFlushed() const { return mFlushed; }
 
@@ -607,10 +623,18 @@
         return mDrained.load();
     }
 
+    int32_t             getRear() const override;
+
+    // Called on server side track start().
+    virtual void        start();
+
 private:
     AudioPlaybackRate             mPlaybackRate;  // last observed playback rate
     PlaybackRateQueue::Observer   mPlaybackRateObserver;
 
+    // Last client stop-at position when start() was called. Used for streaming AudioTracks.
+    std::atomic<int32_t>          mStopLast{0};
+
     // The server keeps a copy here where it is safe from the client.
     uint32_t                      mUnderrunCount; // echoed to mCblk
     bool                          mUnderrunning;  // used to detect edge of underrun
@@ -634,6 +658,10 @@
     virtual void        tallyUnderrunFrames(uint32_t frameCount);
     virtual uint32_t    getUnderrunFrames() const { return 0; }
 
+    int32_t getRear() const override;
+
+    void start() override { } // ignore for static tracks
+
 private:
     status_t            updateStateWithLoop(StaticAudioTrackState *localState,
                                             const StaticAudioTrackState &update) const;
@@ -661,6 +689,10 @@
             size_t frameSize, bool clientInServer)
         : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
 
+    int32_t getRear() const override {
+        return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
+    }
+
 protected:
     virtual ~AudioRecordServerProxy() { }
 };
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 07ef0e3..99f32d5 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1123,19 +1123,33 @@
 
         case FOURCC('t', 'r', 'e', 'f'):
         {
-            *offset += chunk_size;
-
-            if (mLastTrack == NULL) {
+            off64_t stop_offset = *offset + chunk_size;
+            *offset = data_offset;
+            while (*offset < stop_offset) {
+                status_t err = parseChunk(offset, depth + 1);
+                if (err != OK) {
+                    return err;
+                }
+            }
+            if (*offset != stop_offset) {
                 return ERROR_MALFORMED;
             }
+            break;
+        }
 
-            // Skip thumbnail track for now since we don't have an
-            // API to retrieve it yet.
-            // The thumbnail track can't be accessed by negative index or time,
-            // because each timed sample has its own corresponding thumbnail
-            // in the thumbnail track. We'll need a dedicated API to retrieve
-            // thumbnail at time instead.
-            mLastTrack->skipTrack = true;
+        case FOURCC('t', 'h', 'm', 'b'):
+        {
+            *offset += chunk_size;
+
+            if (mLastTrack != NULL) {
+                // Skip thumbnail track for now since we don't have an
+                // API to retrieve it yet.
+                // The thumbnail track can't be accessed by negative index or time,
+                // because each timed sample has its own corresponding thumbnail
+                // in the thumbnail track. We'll need a dedicated API to retrieve
+                // thumbnail at time instead.
+                mLastTrack->skipTrack = true;
+            }
 
             break;
         }
@@ -2353,7 +2367,9 @@
                     // This means that the file should have moov box.
                     // It could be any iso files (mp4, heifs, etc.)
                     mHasMoovBox = true;
-                    ALOGV("identified HEIF image with other tracks");
+                    if (mIsHeif) {
+                        ALOGV("identified HEIF image with other tracks");
+                    }
                 }
             }
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 50c1295..86791c2 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -770,6 +770,7 @@
         mReleased = 0;
     }
 
+    mProxy->stop(); // notify server not to read beyond current client position until start().
     mProxy->interrupt();
     mAudioTrack->stop();
 
@@ -2248,6 +2249,16 @@
         staticPosition = mStaticProxy->getPosition().unsignedValue();
     }
 
+    // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
+    // causes a lot of churn on the service side, and it can reject starting
+    // playback of a previously created track. May also apply to other cases.
+    const int INITIAL_RETRIES = 3;
+    int retries = INITIAL_RETRIES;
+retry:
+    if (retries < INITIAL_RETRIES) {
+        // See the comment for clearAudioConfigCache at the start of the function.
+        AudioSystem::clearAudioConfigCache();
+    }
     mFlags = mOrigFlags;
 
     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
@@ -2256,7 +2267,10 @@
     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
     status_t result = createTrack_l();
 
-    if (result == NO_ERROR) {
+    if (result != NO_ERROR) {
+        ALOGW("%s(): createTrack_l failed, do not retry", __func__);
+        retries = 0;
+    } else {
         // take the frames that will be lost by track recreation into account in saved position
         // For streaming tracks, this is the amount we obtained from the user/client
         // (not the number actually consumed at the server - those are already lost).
@@ -2301,7 +2315,10 @@
         mFramesWrittenAtRestore = mFramesWrittenServerOffset;
     }
     if (result != NO_ERROR) {
-        ALOGW("restoreTrack_l() failed status %d", result);
+        ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+        if (--retries > 0) {
+            goto retry;
+        }
         mState = STATE_STOPPED;
         mReleased = 0;
     }
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 7bf4f99..b4c179d 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -393,19 +393,50 @@
 
 // ---------------------------------------------------------------------------
 
-__attribute__((no_sanitize("integer")))
 void AudioTrackClientProxy::flush()
 {
+    sendStreamingFlushStop(true /* flush */);
+}
+
+void AudioTrackClientProxy::stop()
+{
+    sendStreamingFlushStop(false /* flush */);
+}
+
+// Sets the client-written mFlush and mStop positions, which control server behavior.
+//
+// @param flush indicates whether the operation is a flush or stop.
+// A client stop sets mStop to the current write position;
+// the server will not read past this point until start() or subsequent flush().
+// A client flush sets both mStop and mFlush to the current write position.
+// This advances the server read limit (if previously set) and on the next
+// server read advances the server read position to this limit.
+//
+void AudioTrackClientProxy::sendStreamingFlushStop(bool flush)
+{
+    // TODO: Replace this by 64 bit counters - avoids wrap complication.
     // This works for mFrameCountP2 <= 2^30
-    size_t increment = mFrameCountP2 << 1;
-    size_t mask = increment - 1;
-    audio_track_cblk_t* cblk = mCblk;
     // mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
     // Should newFlush = cblk->u.mStreaming.mRear?  Only problem is
     // if you want to flush twice to the same rear location after a 32 bit wrap.
-    int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
-                        ((cblk->u.mStreaming.mFlush & ~mask) + increment);
-    android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
+
+    const size_t increment = mFrameCountP2 << 1;
+    const size_t mask = increment - 1;
+    // No need for client atomic synchronization on mRear, mStop, mFlush
+    // as AudioTrack client only read/writes to them under client lock. Server only reads.
+    const int32_t rearMasked = mCblk->u.mStreaming.mRear & mask;
+
+    // update stop before flush so that the server front
+    // never advances beyond a (potential) previous stop's rear limit.
+    int32_t stopBits; // the following add can overflow
+    __builtin_add_overflow(mCblk->u.mStreaming.mStop & ~mask, increment, &stopBits);
+    android_atomic_release_store(rearMasked | stopBits, &mCblk->u.mStreaming.mStop);
+
+    if (flush) {
+        int32_t flushBits; // the following add can overflow
+        __builtin_add_overflow(mCblk->u.mStreaming.mFlush & ~mask, increment, &flushBits);
+        android_atomic_release_store(rearMasked | flushBits, &mCblk->u.mStreaming.mFlush);
+    }
 }
 
 bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -540,6 +571,11 @@
     LOG_ALWAYS_FATAL("static flush");
 }
 
+void StaticAudioTrackClientProxy::stop()
+{
+    ; // no special handling required for static tracks.
+}
+
 void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
 {
     // This can only happen on a 64-bit client
@@ -638,6 +674,7 @@
     if (flush != mFlush) {
         ALOGV("ServerProxy::flushBufferIfNeeded() mStreaming.mFlush = 0x%x, mFlush = 0x%0x",
                 flush, mFlush);
+        // shouldn't matter, but for range safety use mRear instead of getRear().
         int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
         int32_t front = cblk->u.mStreaming.mFront;
 
@@ -677,6 +714,45 @@
 }
 
 __attribute__((no_sanitize("integer")))
+int32_t AudioTrackServerProxy::getRear() const
+{
+    const int32_t stop = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+    const int32_t rear = android_atomic_acquire_load(&mCblk->u.mStreaming.mRear);
+    const int32_t stopLast = mStopLast.load(std::memory_order_acquire);
+    if (stop != stopLast) {
+        const int32_t front = mCblk->u.mStreaming.mFront;
+        const size_t overflowBit = mFrameCountP2 << 1;
+        const size_t mask = overflowBit - 1;
+        int32_t newRear = (rear & ~mask) | (stop & mask);
+        ssize_t filled = newRear - front;
+        if (filled < 0) {
+            // front and rear offsets span the overflow bit of the p2 mask
+            // so rebasing newrear.
+            ALOGV("stop wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+            newRear += overflowBit;
+            filled += overflowBit;
+        }
+        if (0 <= filled && (size_t) filled <= mFrameCount) {
+            // we're stopped, return the stop level as newRear
+            return newRear;
+        }
+
+        // A corrupt stop. Log error and ignore.
+        ALOGE("mStopLast %#x -> stop %#x, front %#x, rear %#x, mask %#x, newRear %#x, "
+                "filled %zd=%#x",
+                stopLast, stop, front, rear,
+                (unsigned)mask, newRear, filled, (unsigned)filled);
+        // Don't reset mStopLast as this is const.
+    }
+    return rear;
+}
+
+void AudioTrackServerProxy::start()
+{
+    mStopLast = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+}
+
+__attribute__((no_sanitize("integer")))
 status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
 {
     LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0,
@@ -693,7 +769,7 @@
     // See notes on barriers at ClientProxy::obtainBuffer()
     if (mIsOut) {
         flushBufferIfNeeded(); // might modify mFront
-        rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+        rear = getRear();
         front = cblk->u.mStreaming.mFront;
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -825,8 +901,7 @@
         // FIXME should return an accurate value, but over-estimate is better than under-estimate
         return mFrameCount;
     }
-    // the acquire might not be necessary since not doing a subsequent read
-    int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+    const int32_t rear = getRear();
     ssize_t filled = rear - cblk->u.mStreaming.mFront;
     // pipe should not already be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
@@ -852,7 +927,7 @@
     if (flush != mFlush) {
         return mFrameCount;
     }
-    const int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+    const int32_t rear = getRear();
     const ssize_t filled = rear - cblk->u.mStreaming.mFront;
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
         return 0; // error condition, silently return 0.
@@ -1149,6 +1224,12 @@
     }
 }
 
+int32_t StaticAudioTrackServerProxy::getRear() const
+{
+    LOG_ALWAYS_FATAL("getRear() not permitted for static tracks");
+    return 0;
+}
+
 // ---------------------------------------------------------------------------
 
 }   // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index a20f1f2..77cfe4d 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -87,7 +87,7 @@
     GET_AUDIO_HW_SYNC_FOR_SESSION,
     SYSTEM_READY,
     FRAME_COUNT_HAL,
-    LIST_MICROPHONES,
+    GET_MICROPHONES,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -849,7 +849,7 @@
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        status_t status = remote()->transact(LIST_MICROPHONES, data, &reply);
+        status_t status = remote()->transact(GET_MICROPHONES, data, &reply);
         if (status != NO_ERROR ||
                 (status = (status_t)reply.readInt32()) != NO_ERROR) {
             return status;
@@ -1444,7 +1444,7 @@
             reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
-        case LIST_MICROPHONES: {
+        case GET_MICROPHONES: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             std::vector<media::MicrophoneInfo> microphones;
             status_t status = getMicrophones(&microphones);
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
index 53c1652..5b99d70 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/2.0/DeviceHalHidl.cpp
@@ -346,6 +346,12 @@
     return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
 }
 
+status_t DeviceHalHidl::getMicrophones(
+        std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+    if (mDevice == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
+
 status_t DeviceHalHidl::dump(int fd) {
     if (mDevice == 0) return NO_INIT;
     native_handle_t* hidlHandle = native_handle_create(1, 0);
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/2.0/DeviceHalHidl.h
index 8651b51..3c1cb59 100644
--- a/media/libaudiohal/2.0/DeviceHalHidl.h
+++ b/media/libaudiohal/2.0/DeviceHalHidl.h
@@ -107,6 +107,9 @@
     // Set audio port configuration.
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    // List microphones
+    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
     virtual status_t dump(int fd);
 
   private:
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
index fc098f5..ec3bf78 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/2.0/DeviceHalLocal.cpp
@@ -184,6 +184,11 @@
         return INVALID_OPERATION;
 }
 
+status_t DeviceHalLocal::getMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    return INVALID_OPERATION;
+}
+
 status_t DeviceHalLocal::dump(int fd) {
     return mDev->dump(mDev, fd);
 }
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/2.0/DeviceHalLocal.h
index 865f296..aec201a 100644
--- a/media/libaudiohal/2.0/DeviceHalLocal.h
+++ b/media/libaudiohal/2.0/DeviceHalLocal.h
@@ -100,6 +100,9 @@
     // Set audio port configuration.
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    // List microphones
+    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
     virtual status_t dump(int fd);
 
     void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
index 0cafa36..9869cd2 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/2.0/StreamHalHidl.cpp
@@ -555,6 +555,11 @@
     }
 }
 
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+    // Audio HAL V2.0 does not support propagating source metadata
+    return INVALID_OPERATION;
+}
+
 void StreamOutHalHidl::onWriteReady() {
     sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
     if (callback == 0) return;
@@ -749,4 +754,15 @@
     }
 }
 
+status_t StreamInHalHidl::getActiveMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    if (mStream == 0) return NO_INIT;
+    return INVALID_OPERATION;
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+    // Audio HAL V2.0 does not support propagating sink metadata
+    return INVALID_OPERATION;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/2.0/StreamHalHidl.h
index d4ab943..ebad8ae 100644
--- a/media/libaudiohal/2.0/StreamHalHidl.h
+++ b/media/libaudiohal/2.0/StreamHalHidl.h
@@ -161,6 +161,9 @@
     // Return a recent count of the number of audio frames presented to an external observer.
     virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
 
+    // Called when the metadata of the stream's source has been changed.
+    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
     // Methods used by StreamOutCallback (HIDL).
     void onWriteReady();
     void onDrainReady();
@@ -210,6 +213,12 @@
     // the clock time associated with that frame count.
     virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
 
+    // Get active microphones
+    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+    // Called when the metadata of the stream's sink has been changed.
+    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
   private:
     friend class DeviceHalHidl;
     typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
index 8d61e24..98107e5 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/2.0/StreamHalLocal.cpp
@@ -231,6 +231,19 @@
     return mStream->get_presentation_position(mStream, frames, timestamp);
 }
 
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+    if (mStream->update_source_metadata == nullptr) {
+        return INVALID_OPERATION;
+    }
+    const source_metadata_t metadata {
+        .track_count = sourceMetadata.tracks.size(),
+        // const cast is fine as it is in a const structure
+        .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+    };
+    mStream->update_source_metadata(mStream, &metadata);
+    return OK;
+}
+
 status_t StreamOutHalLocal::start() {
     if (mStream->start == NULL) return INVALID_OPERATION;
     return mStream->start(mStream);
@@ -292,6 +305,19 @@
     return mStream->get_capture_position(mStream, frames, time);
 }
 
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+    if (mStream->update_sink_metadata == nullptr) {
+        return INVALID_OPERATION;
+    }
+    const sink_metadata_t metadata {
+        .track_count = sinkMetadata.tracks.size(),
+        // const cast is fine as it is in a const structure
+        .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+    };
+    mStream->update_sink_metadata(mStream, &metadata);
+    return OK;
+}
+
 status_t StreamInHalLocal::start() {
     if (mStream->start == NULL) return INVALID_OPERATION;
     return mStream->start(mStream);
@@ -313,4 +339,9 @@
     return mStream->get_mmap_position(mStream, position);
 }
 
+status_t StreamInHalLocal::getActiveMicrophones(
+        std::vector<media::MicrophoneInfo> *microphones __unused) {
+    return INVALID_OPERATION;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/2.0/StreamHalLocal.h
index c7136df..cda8d0c 100644
--- a/media/libaudiohal/2.0/StreamHalLocal.h
+++ b/media/libaudiohal/2.0/StreamHalLocal.h
@@ -149,6 +149,9 @@
     // Get current read/write position in the mmap buffer
     virtual status_t getMmapPosition(struct audio_mmap_position *position);
 
+    // Called when the metadata of the stream's source has been changed.
+    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
   private:
     audio_stream_out_t *mStream;
     wp<StreamOutHalInterfaceCallback> mCallback;
@@ -194,6 +197,12 @@
     // Get current read/write position in the mmap buffer
     virtual status_t getMmapPosition(struct audio_mmap_position *position);
 
+    // Get active microphones
+    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+    // Called when the metadata of the stream's sink has been changed.
+    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
   private:
     audio_stream_in_t *mStream;
 
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/4.0/Android.bp
index 3d104ab..833defa 100644
--- a/media/libaudiohal/4.0/Android.bp
+++ b/media/libaudiohal/4.0/Android.bp
@@ -26,6 +26,7 @@
     shared_libs: [
         "libaudiohal_deathhandler",
         "libaudioutils",
+        "libbinder",
         "libcutils",
         "liblog",
         "libutils",
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
index a3cc28f..fe27504 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
@@ -22,6 +22,11 @@
 
 #include "ConversionHelperHidl.h"
 
+using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
+using ::android::hardware::audio::V4_0::DeviceAddress;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
 using ::android::hardware::audio::V4_0::Result;
 
 namespace android {
@@ -101,5 +106,132 @@
     ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
 }
 
+// TODO: Use the same implementation in the hal when it moves to a util library.
+std::string deviceAddressToHal(const DeviceAddress& address) {
+    // HAL assumes that the address is NUL-terminated.
+    char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+    memset(halAddress, 0, sizeof(halAddress));
+    audio_devices_t halDevice = static_cast<audio_devices_t>(address.device);
+    const bool isInput = (halDevice & AUDIO_DEVICE_BIT_IN) != 0;
+    if (isInput) halDevice &= ~AUDIO_DEVICE_BIT_IN;
+    if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != 0) ||
+        (isInput && (halDevice & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+        snprintf(halAddress, sizeof(halAddress), "%02X:%02X:%02X:%02X:%02X:%02X",
+                 address.address.mac[0], address.address.mac[1], address.address.mac[2],
+                 address.address.mac[3], address.address.mac[4], address.address.mac[5]);
+    } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_IP) != 0) ||
+               (isInput && (halDevice & AUDIO_DEVICE_IN_IP) != 0)) {
+        snprintf(halAddress, sizeof(halAddress), "%d.%d.%d.%d", address.address.ipv4[0],
+                 address.address.ipv4[1], address.address.ipv4[2], address.address.ipv4[3]);
+    } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_USB) != 0) ||
+               (isInput && (halDevice & AUDIO_DEVICE_IN_ALL_USB) != 0)) {
+        snprintf(halAddress, sizeof(halAddress), "card=%d;device=%d", address.address.alsa.card,
+                 address.address.alsa.device);
+    } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_BUS) != 0) ||
+               (isInput && (halDevice & AUDIO_DEVICE_IN_BUS) != 0)) {
+        snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+    } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0 ||
+               (isInput && (halDevice & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+        snprintf(halAddress, sizeof(halAddress), "%s", address.rSubmixAddress.c_str());
+    } else {
+        snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+    }
+    return halAddress;
+}
+
+//local conversion helpers
+
+audio_microphone_channel_mapping_t  channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+    switch (mapping) {
+        case AudioMicrophoneChannelMapping::UNUSED:
+            return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+        case AudioMicrophoneChannelMapping::DIRECT:
+            return AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT;
+        case AudioMicrophoneChannelMapping::PROCESSED:
+            return AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED;
+        default:
+            LOG_ALWAYS_FATAL("Unknown channelMappingToHal conversion %d", mapping);
+    }
+}
+
+audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+    switch (location) {
+        case AudioMicrophoneLocation::UNKNOWN:
+            return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
+        case AudioMicrophoneLocation::MAINBODY:
+            return AUDIO_MICROPHONE_LOCATION_MAINBODY;
+        case AudioMicrophoneLocation::MAINBODY_MOVABLE:
+            return AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE;
+        case AudioMicrophoneLocation::PERIPHERAL:
+            return AUDIO_MICROPHONE_LOCATION_PERIPHERAL;
+        default:
+            LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
+    }
+}
+audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+    switch (dir) {
+        case AudioMicrophoneDirectionality::UNKNOWN:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
+        case AudioMicrophoneDirectionality::OMNI:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
+        case AudioMicrophoneDirectionality::BI_DIRECTIONAL:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL;
+        case AudioMicrophoneDirectionality::CARDIOID:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID;
+        case AudioMicrophoneDirectionality::HYPER_CARDIOID:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID;
+        case AudioMicrophoneDirectionality::SUPER_CARDIOID:
+            return AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID;
+        default:
+            LOG_ALWAYS_FATAL("Unknown directionalityToHal conversion %d", dir);
+    }
+}
+
+// static
+void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
+                                                     audio_microphone_characteristic_t *pDst) {
+    if (pDst != NULL) {
+        snprintf(pDst->device_id, sizeof(pDst->device_id),
+                 "%s", src.deviceId.c_str());
+        pDst->device = static_cast<audio_devices_t>(src.deviceAddress.device);
+        snprintf(pDst->address, sizeof(pDst->address),
+                 "%s", deviceAddressToHal(src.deviceAddress).c_str());
+        if (src.channelMapping.size() > AUDIO_CHANNEL_COUNT_MAX) {
+            ALOGW("microphoneInfoToStruct found %zu channelMapping elements. Max expected is %d",
+                  src.channelMapping.size(), AUDIO_CHANNEL_COUNT_MAX);
+        }
+        size_t ch;
+        for (ch = 0; ch < src.channelMapping.size() && ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+            pDst->channel_mapping[ch] = channelMappingToHal(src.channelMapping[ch]);
+        }
+        for (; ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+            pDst->channel_mapping[ch] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+        }
+        pDst->location = locationToHal(src.location);
+        pDst->group = (audio_microphone_group_t)src.group;
+        pDst->index_in_the_group = (unsigned int)src.indexInTheGroup;
+        pDst->sensitivity = src.sensitivity;
+        pDst->max_spl = src.maxSpl;
+        pDst->min_spl = src.minSpl;
+        pDst->directionality = directionalityToHal(src.directionality);
+        pDst->num_frequency_responses = (unsigned int)src.frequencyResponse.size();
+        if (pDst->num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+            ALOGW("microphoneInfoToStruct found %d frequency responses. Max expected is %d",
+                  pDst->num_frequency_responses, AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES);
+            pDst->num_frequency_responses = AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES;
+        }
+        for (size_t k = 0; k < pDst->num_frequency_responses; k++) {
+            pDst->frequency_responses[0][k] = src.frequencyResponse[k].frequency;
+            pDst->frequency_responses[1][k] = src.frequencyResponse[k].level;
+        }
+        pDst->geometric_location.x = src.position.x;
+        pDst->geometric_location.y = src.position.y;
+        pDst->geometric_location.z = src.position.z;
+        pDst->orientation.x = src.orientation.x;
+        pDst->orientation.y = src.orientation.y;
+        pDst->orientation.z = src.orientation.z;
+    }
+}
+
 }  // namespace V4_0
 }  // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
index ddc8569..8823a8d 100644
--- a/media/libaudiohal/4.0/ConversionHelperHidl.h
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.h
@@ -19,9 +19,11 @@
 
 #include <android/hardware/audio/4.0/types.h>
 #include <hidl/HidlSupport.h>
+#include <system/audio.h>
 #include <utils/String8.h>
 
 using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
 using ::android::hardware::Return;
 using ::android::hardware::hidl_string;
 using ::android::hardware::hidl_vec;
@@ -34,6 +36,8 @@
     static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
     static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
     static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+    static void microphoneInfoToHal(const MicrophoneInfo& src,
+                                    audio_microphone_characteristic_t *pDst);
 
     ConversionHelperHidl(const char* className);
 
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/4.0/DeviceHalHidl.cpp
index 1f33b97..6facca9 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.cpp
+++ b/media/libaudiohal/4.0/DeviceHalHidl.cpp
@@ -359,6 +359,23 @@
     return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
 }
 
+status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
+    if (mDevice == 0) return NO_INIT;
+    Result retval;
+    Return<void> ret = mDevice->getMicrophones(
+            [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+        retval = r;
+        for (size_t k = 0; k < micArrayHal.size(); k++) {
+            audio_microphone_characteristic_t dst;
+            //convert
+            microphoneInfoToHal(micArrayHal[k], &dst);
+            media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+            microphonesInfo->push_back(microphone);
+        }
+    });
+    return processReturn("getMicrophones", ret, retval);
+}
+
 status_t DeviceHalHidl::dump(int fd) {
     if (mDevice == 0) return NO_INIT;
     native_handle_t* hidlHandle = native_handle_create(1, 0);
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
index f460add..0bd2175 100644
--- a/media/libaudiohal/4.0/DeviceHalHidl.h
+++ b/media/libaudiohal/4.0/DeviceHalHidl.h
@@ -108,6 +108,9 @@
     // Set audio port configuration.
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    // List microphones
+    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
     virtual status_t dump(int fd);
 
   private:
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/4.0/DeviceHalLocal.cpp
index e64eee1..a245dd9 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.cpp
+++ b/media/libaudiohal/4.0/DeviceHalLocal.cpp
@@ -185,6 +185,18 @@
         return INVALID_OPERATION;
 }
 
+status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+    if (mDev->get_microphones == NULL) return INVALID_OPERATION;
+    size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+    audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+    status_t status = mDev->get_microphones(mDev, &mic_array[0], &actual_mics);
+    for (size_t i = 0; i < actual_mics; i++) {
+        media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+        microphones->push_back(microphoneInfo);
+    }
+    return status;
+}
+
 status_t DeviceHalLocal::dump(int fd) {
     return mDev->dump(mDev, fd);
 }
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
index daafdc7..08341a4 100644
--- a/media/libaudiohal/4.0/DeviceHalLocal.h
+++ b/media/libaudiohal/4.0/DeviceHalLocal.h
@@ -101,6 +101,9 @@
     // Set audio port configuration.
     virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+    // List microphones
+    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
     virtual status_t dump(int fd);
 
     void closeOutputStream(struct audio_stream_out *stream_out);
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/4.0/StreamHalHidl.cpp
index de16e98..1c2fdb0 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.cpp
+++ b/media/libaudiohal/4.0/StreamHalHidl.cpp
@@ -28,14 +28,20 @@
 #include "VersionUtils.h"
 
 using ::android::hardware::audio::common::V4_0::AudioChannelMask;
+using ::android::hardware::audio::common::V4_0::AudioContentType;
 using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::common::V4_0::AudioSource;
+using ::android::hardware::audio::common::V4_0::AudioUsage;
 using ::android::hardware::audio::common::V4_0::ThreadInfo;
 using ::android::hardware::audio::V4_0::AudioDrain;
 using ::android::hardware::audio::V4_0::IStreamOutCallback;
 using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
 using ::android::hardware::audio::V4_0::MmapBufferInfo;
 using ::android::hardware::audio::V4_0::MmapPosition;
 using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
+using ::android::hardware::audio::V4_0::RecordTrackMetadata;
 using ::android::hardware::audio::V4_0::Result;
 using ::android::hardware::audio::V4_0::TimeSpec;
 using ::android::hardware::MQDescriptorSync;
@@ -560,6 +566,28 @@
     }
 }
 
+/** Transform a standard collection to an HIDL vector. */
+template <class Values, class ElementConverter>
+static auto transformToHidlVec(const Values& values, ElementConverter converter) {
+    hidl_vec<decltype(converter(*values.begin()))> result{values.size()};
+    using namespace std;
+    transform(begin(values), end(values), begin(result), converter);
+    return result;
+}
+
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+    hardware::audio::V4_0::SourceMetadata halMetadata = {
+        .tracks = transformToHidlVec(sourceMetadata.tracks,
+              [](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
+                  return {
+                    .usage=static_cast<AudioUsage>(metadata.usage),
+                    .contentType=static_cast<AudioContentType>(metadata.content_type),
+                    .gain=metadata.gain,
+                  };
+              })};
+    return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
+}
+
 void StreamOutHalHidl::onWriteReady() {
     sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
     if (callback == 0) return;
@@ -754,5 +782,36 @@
     }
 }
 
+
+status_t StreamInHalHidl::getActiveMicrophones(
+        std::vector<media::MicrophoneInfo> *microphonesInfo) {
+    if (!mStream) return NO_INIT;
+    Result retval;
+    Return<void> ret = mStream->getActiveMicrophones(
+            [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+        retval = r;
+        for (size_t k = 0; k < micArrayHal.size(); k++) {
+            audio_microphone_characteristic_t dst;
+            // convert
+            microphoneInfoToHal(micArrayHal[k], &dst);
+            media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+            microphonesInfo->push_back(microphone);
+        }
+    });
+    return processReturn("getActiveMicrophones", ret, retval);
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+    hardware::audio::V4_0::SinkMetadata halMetadata = {
+        .tracks = transformToHidlVec(sinkMetadata.tracks,
+              [](const record_track_metadata& metadata) -> RecordTrackMetadata {
+                  return {
+                    .source=static_cast<AudioSource>(metadata.source),
+                    .gain=metadata.gain,
+                  };
+              })};
+    return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
+}
+
 } // namespace V4_0
 } // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
index 8d4dc8c..2dda0f8 100644
--- a/media/libaudiohal/4.0/StreamHalHidl.h
+++ b/media/libaudiohal/4.0/StreamHalHidl.h
@@ -162,6 +162,9 @@
     // Return a recent count of the number of audio frames presented to an external observer.
     virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
 
+    // Called when the metadata of the stream's source has been changed.
+    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
     // Methods used by StreamOutCallback (HIDL).
     void onWriteReady();
     void onDrainReady();
@@ -211,6 +214,12 @@
     // the clock time associated with that frame count.
     virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
 
+    // Get active microphones
+    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+    // Called when the metadata of the stream's sink has been changed.
+    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
   private:
     friend class DeviceHalHidl;
     typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/4.0/StreamHalLocal.cpp
index 592a931..e9d96bf 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.cpp
+++ b/media/libaudiohal/4.0/StreamHalLocal.cpp
@@ -233,6 +233,19 @@
     return mStream->get_presentation_position(mStream, frames, timestamp);
 }
 
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+    if (mStream->update_source_metadata == nullptr) {
+        return INVALID_OPERATION;
+    }
+    const source_metadata_t metadata {
+        .track_count = sourceMetadata.tracks.size(),
+        // const cast is fine as it is in a const structure
+        .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+    };
+    mStream->update_source_metadata(mStream, &metadata);
+    return OK;
+}
+
 status_t StreamOutHalLocal::start() {
     if (mStream->start == NULL) return INVALID_OPERATION;
     return mStream->start(mStream);
@@ -294,6 +307,19 @@
     return mStream->get_capture_position(mStream, frames, time);
 }
 
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+    if (mStream->update_sink_metadata == nullptr) {
+        return INVALID_OPERATION;
+    }
+    const sink_metadata_t metadata {
+        .track_count = sinkMetadata.tracks.size(),
+        // const cast is fine as it is in a const structure
+        .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+    };
+    mStream->update_sink_metadata(mStream, &metadata);
+    return OK;
+}
+
 status_t StreamInHalLocal::start() {
     if (mStream->start == NULL) return INVALID_OPERATION;
     return mStream->start(mStream);
@@ -315,5 +341,17 @@
     return mStream->get_mmap_position(mStream, position);
 }
 
+status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+    if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
+    size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+    audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+    status_t status = mStream->get_active_microphones(mStream, &mic_array[0], &actual_mics);
+    for (size_t i = 0; i < actual_mics; i++) {
+        media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+        microphones->push_back(microphoneInfo);
+    }
+    return status;
+}
+
 } // namespace V4_0
 } // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
index 076bc4c..7237509 100644
--- a/media/libaudiohal/4.0/StreamHalLocal.h
+++ b/media/libaudiohal/4.0/StreamHalLocal.h
@@ -150,6 +150,9 @@
     // Get current read/write position in the mmap buffer
     virtual status_t getMmapPosition(struct audio_mmap_position *position);
 
+    // Called when the metadata of the stream's source has been changed.
+    status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
   private:
     audio_stream_out_t *mStream;
     wp<StreamOutHalInterfaceCallback> mCallback;
@@ -195,6 +198,12 @@
     // Get current read/write position in the mmap buffer
     virtual status_t getMmapPosition(struct audio_mmap_position *position);
 
+    // Get active microphones
+    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+    // Called when the metadata of the stream's sink has been changed.
+    status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
   private:
     audio_stream_in_t *mStream;
 
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index caf01be..7de8eb3 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 #define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
 
+#include <media/MicrophoneInfo.h>
 #include <system/audio.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
@@ -105,6 +106,9 @@
     // Set audio port configuration.
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
 
+    // List microphones
+    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
     virtual status_t dump(int fd) = 0;
 
   protected:
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 7419c34..c969e28 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -17,7 +17,10 @@
 #ifndef ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
 #define ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
 
+#include <vector>
+
 #include <media/audiohal/EffectHalInterface.h>
+#include <media/MicrophoneInfo.h>
 #include <system/audio.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
@@ -142,6 +145,15 @@
     // Return a recent count of the number of audio frames presented to an external observer.
     virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) = 0;
 
+    struct SourceMetadata {
+        std::vector<playback_track_metadata_t> tracks;
+    };
+    /**
+     * Called when the metadata of the stream's source has been changed.
+     * @param sourceMetadata Description of the audio that is played by the clients.
+     */
+    virtual status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) = 0;
+
   protected:
     virtual ~StreamOutHalInterface() {}
 };
@@ -161,6 +173,18 @@
     // the clock time associated with that frame count.
     virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
 
+    // Get active microphones
+    virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
+    struct SinkMetadata {
+        std::vector<record_track_metadata_t> tracks;
+    };
+    /**
+     * Called when the metadata of the stream's sink has been changed.
+     * @param sinkMetadata Description of the audio that is suggested by the clients.
+     */
+    virtual status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) = 0;
+
   protected:
     virtual ~StreamInHalInterface() {}
 };
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 4ed3ba8..d79501f 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -203,7 +203,7 @@
         auto parseProxy = [&xmlEffect, &parseImpl](const char* tag, EffectImpl& proxyLib) {
             auto* xmlProxyLib = xmlEffect.FirstChildElement(tag);
             if (xmlProxyLib == nullptr) {
-                ALOGE("effectProxy must contain a <%s>: %s", tag, dump(*xmlProxyLib));
+                ALOGE("effectProxy must contain a <%s>: %s", tag, dump(xmlEffect));
                 return false;
             }
             return parseImpl(*xmlProxyLib, proxyLib);
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index 14a171b..dd729c5 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -38,6 +38,9 @@
   loudness_enhancer {
     path /vendor/lib/soundfx/libldnhncr.so
   }
+  dynamics_processing {
+    path /vendor/lib/soundfx/libdynproc.so
+  }
 }
 
 # Default pre-processing library. Add to audio_effect.conf "libraries" section if
@@ -129,6 +132,10 @@
     library loudness_enhancer
     uuid fa415329-2034-4bea-b5dc-5b381c8d1e2c
   }
+  dynamics_processing {
+    library dynamics_processing
+    uuid e0e6539b-1781-7261-676f-6d7573696340
+  }
 }
 
 # Default pre-processing effects. Add to audio_effect.conf "effects" section if
diff --git a/media/libeffects/dynamicsproc/Android.mk b/media/libeffects/dynamicsproc/Android.mk
new file mode 100644
index 0000000..7be0c49
--- /dev/null
+++ b/media/libeffects/dynamicsproc/Android.mk
@@ -0,0 +1,43 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+# DynamicsProcessing library
+include $(CLEAR_VARS)
+
+LOCAL_VENDOR_MODULE := true
+
+EIGEN_PATH := external/eigen
+LOCAL_C_INCLUDES += $(EIGEN_PATH)
+
+LOCAL_SRC_FILES:= \
+    EffectDynamicsProcessing.cpp \
+    dsp/DPBase.cpp \
+    dsp/DPFrequency.cpp
+
+LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    liblog \
+
+LOCAL_MODULE_RELATIVE_PATH := soundfx
+LOCAL_MODULE:= libdynproc
+
+LOCAL_HEADER_LIBRARIES := \
+    libaudioeffects
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
new file mode 100644
index 0000000..55383eb
--- /dev/null
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -0,0 +1,1259 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectDP"
+//#define LOG_NDEBUG 0
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <new>
+
+#include <log/log.h>
+
+#include <audio_effects/effect_dynamicsprocessing.h>
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while (false)
+#endif
+
+// union to hold command values
+using value_t = union {
+    int32_t i;
+    float f;
+};
+
+// effect_handle_t interface implementation for DP effect
+extern const struct effect_interface_s gDPInterface;
+
+// AOSP Dynamics Processing UUID: e0e6539b-1781-7261-676f-6d7573696340
+const effect_descriptor_t gDPDescriptor = {
+        {0x7261676f, 0x6d75, 0x7369, 0x6364, {0x28, 0xe2, 0xfd, 0x3a, 0xc3, 0x9e}}, // type
+        {0xe0e6539b, 0x1781, 0x7261, 0x676f, {0x6d, 0x75, 0x73, 0x69, 0x63, 0x40}}, // uuid
+        EFFECT_CONTROL_API_VERSION,
+        (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST),
+        0, // TODO
+        1,
+        "Dynamics Processing",
+        "The Android Open Source Project",
+};
+
+enum dp_state_e {
+    DYNAMICS_PROCESSING_STATE_UNINITIALIZED,
+    DYNAMICS_PROCESSING_STATE_INITIALIZED,
+    DYNAMICS_PROCESSING_STATE_ACTIVE,
+};
+
+struct DynamicsProcessingContext {
+    const struct effect_interface_s *mItfe;
+    effect_config_t mConfig;
+    uint8_t mState;
+
+    dp_fx::DPBase * mPDynamics; //the effect (or current effect)
+    int32_t mCurrentVariant;
+    float mPreferredFrameDuration;
+};
+
+// The value offset of an effect parameter is computed by rounding up
+// the parameter size to the next 32 bit alignment.
+static inline uint32_t computeParamVOffset(const effect_param_t *p) {
+    return ((p->psize + sizeof(int32_t) - 1) / sizeof(int32_t)) *
+            sizeof(int32_t);
+}
+
+//--- local function prototypes
+int DP_setParameter(DynamicsProcessingContext *pContext,
+        uint32_t paramSize,
+        void *pParam,
+        uint32_t valueSize,
+        void *pValue);
+int DP_getParameter(DynamicsProcessingContext *pContext,
+        uint32_t paramSize,
+        void *pParam,
+        uint32_t *pValueSize,
+        void *pValue);
+int DP_getParameterCmdSize(uint32_t paramSize,
+        void *pParam);
+void DP_expectedParamValueSizes(uint32_t paramSize,
+        void *pParam,
+        bool isSet,
+        uint32_t *pCmdSize,
+        uint32_t *pValueSize);
+//
+//--- Local functions (not directly used by effect interface)
+//
+
+void DP_reset(DynamicsProcessingContext *pContext)
+{
+    ALOGV("> DP_reset(%p)", pContext);
+    if (pContext->mPDynamics != NULL) {
+        pContext->mPDynamics->reset();
+    } else {
+        ALOGE("DP_reset(%p): null DynamicsProcessing", pContext);
+    }
+}
+
+//----------------------------------------------------------------------------
+// DP_setConfig()
+//----------------------------------------------------------------------------
+// Purpose: Set input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_setConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+    ALOGV("DP_setConfig(%p)", pContext);
+
+    if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate) return -EINVAL;
+    if (pConfig->inputCfg.channels != pConfig->outputCfg.channels) return -EINVAL;
+    if (pConfig->inputCfg.format != pConfig->outputCfg.format) return -EINVAL;
+    if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
+            pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
+    if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_FLOAT) return -EINVAL;
+
+    pContext->mConfig = *pConfig;
+
+    DP_reset(pContext);
+
+    return 0;
+}
+
+//----------------------------------------------------------------------------
+// DP_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void DP_getConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+    *pConfig = pContext->mConfig;
+}
+
+//----------------------------------------------------------------------------
+// DP_init()
+//----------------------------------------------------------------------------
+// Purpose: Initialize engine with default configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_init(DynamicsProcessingContext *pContext)
+{
+    ALOGV("DP_init(%p)", pContext);
+
+    pContext->mItfe = &gDPInterface;
+    pContext->mPDynamics = NULL;
+    pContext->mState = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
+
+    pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+    pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+    pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+    pContext->mConfig.inputCfg.samplingRate = 48000;
+    pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
+    pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
+    pContext->mConfig.inputCfg.bufferProvider.cookie = NULL;
+    pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
+    pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+    pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+    pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+    pContext->mConfig.outputCfg.samplingRate = 48000;
+    pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
+    pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
+    pContext->mConfig.outputCfg.bufferProvider.cookie = NULL;
+    pContext->mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+    pContext->mCurrentVariant = -1; //none
+    pContext->mPreferredFrameDuration = 0; //none
+
+    DP_setConfig(pContext, &pContext->mConfig);
+    pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+    return 0;
+}
+
+void DP_changeVariant(DynamicsProcessingContext *pContext, int newVariant) {
+    ALOGV("DP_changeVariant from %d to %d", pContext->mCurrentVariant, newVariant);
+    switch(newVariant) {
+    case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+        pContext->mCurrentVariant = VARIANT_FAVOR_FREQUENCY_RESOLUTION;
+        delete pContext->mPDynamics;
+        pContext->mPDynamics = new dp_fx::DPFrequency();
+        break;
+    }
+    default: {
+        ALOGW("DynamicsProcessing variant %d not available for creation", newVariant);
+        break;
+    }
+    } //switch
+}
+
+static inline bool isPowerOf2(unsigned long n) {
+    return (n & (n - 1)) == 0;
+}
+
+void DP_configureVariant(DynamicsProcessingContext *pContext, int newVariant) {
+    ALOGV("DP_configureVariant %d", newVariant);
+    switch(newVariant) {
+    case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+        int32_t minBlockSize = (int32_t)dp_fx::DPFrequency::getMinBockSize();
+        int32_t desiredBlock = pContext->mPreferredFrameDuration *
+                pContext->mConfig.inputCfg.samplingRate / 1000.0f;
+        int32_t currentBlock = desiredBlock;
+        ALOGV(" sampling rate: %d, desiredBlock size %0.2f (%d) samples",
+                pContext->mConfig.inputCfg.samplingRate, pContext->mPreferredFrameDuration,
+                desiredBlock);
+        if (desiredBlock < minBlockSize) {
+            currentBlock = minBlockSize;
+        } else if (!isPowerOf2(desiredBlock)) {
+            //find next highest power of 2.
+            currentBlock = 1 << (32 - __builtin_clz(desiredBlock));
+        }
+        ((dp_fx::DPFrequency*)pContext->mPDynamics)->configure(currentBlock,
+                currentBlock/2,
+                pContext->mConfig.inputCfg.samplingRate);
+        break;
+    }
+    default: {
+        ALOGE("DynamicsProcessing variant %d not available to configure", newVariant);
+        break;
+    }
+    }
+}
+
+//
+//--- Effect Library Interface Implementation
+//
+
+int DPLib_Release(effect_handle_t handle) {
+    DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)handle;
+
+    ALOGV("DPLib_Release %p", handle);
+    if (pContext == NULL) {
+        return -EINVAL;
+    }
+    delete pContext->mPDynamics;
+    delete pContext;
+
+    return 0;
+}
+
+int DPLib_Create(const effect_uuid_t *uuid,
+                         int32_t sessionId __unused,
+                         int32_t ioId __unused,
+                         effect_handle_t *pHandle) {
+    ALOGV("DPLib_Create()");
+
+    if (pHandle == NULL || uuid == NULL) {
+        return -EINVAL;
+    }
+
+    if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) != 0) {
+        return -EINVAL;
+    }
+
+    DynamicsProcessingContext *pContext = new DynamicsProcessingContext;
+    *pHandle = (effect_handle_t)pContext;
+    int ret = DP_init(pContext);
+    if (ret < 0) {
+        ALOGW("DPLib_Create() init failed");
+        DPLib_Release(*pHandle);
+        return ret;
+    }
+
+    ALOGV("DPLib_Create context is %p", pContext);
+    return 0;
+}
+
+int DPLib_GetDescriptor(const effect_uuid_t *uuid,
+                                effect_descriptor_t *pDescriptor) {
+
+    if (pDescriptor == NULL || uuid == NULL){
+        ALOGE("DPLib_GetDescriptor() called with NULL pointer");
+        return -EINVAL;
+    }
+
+    if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) == 0) {
+        *pDescriptor = gDPDescriptor;
+        return 0;
+    }
+
+    return -EINVAL;
+} /* end DPLib_GetDescriptor */
+
+//
+//--- Effect Control Interface Implementation
+//
+int DP_process(effect_handle_t self, audio_buffer_t *inBuffer,
+        audio_buffer_t *outBuffer) {
+    DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+    if (pContext == NULL) {
+        ALOGE("DP_process() called with NULL context");
+        return -EINVAL;
+    }
+
+    if (inBuffer == NULL || inBuffer->raw == NULL ||
+        outBuffer == NULL || outBuffer->raw == NULL ||
+        inBuffer->frameCount != outBuffer->frameCount ||
+        inBuffer->frameCount == 0) {
+        ALOGE("inBuffer or outBuffer are NULL or have problems with frame count");
+        return -EINVAL;
+    }
+    if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+        ALOGE("mState is not DYNAMICS_PROCESSING_STATE_ACTIVE. Current mState %d",
+                pContext->mState);
+        return -ENODATA;
+    }
+    //if dynamics exist...
+    if (pContext->mPDynamics != NULL) {
+        int32_t channelCount = (int32_t)audio_channel_count_from_out_mask(
+                        pContext->mConfig.inputCfg.channels);
+        pContext->mPDynamics->processSamples(inBuffer->f32, inBuffer->f32,
+                inBuffer->frameCount * channelCount);
+
+        if (inBuffer->raw != outBuffer->raw) {
+            if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+                for (size_t i = 0; i < outBuffer->frameCount * channelCount; i++) {
+                    outBuffer->f32[i] += inBuffer->f32[i];
+                }
+            } else {
+                memcpy(outBuffer->raw, inBuffer->raw,
+                        outBuffer->frameCount * channelCount * sizeof(float));
+            }
+        }
+    } else {
+        //do nothing. no effect created yet. warning.
+        ALOGW("Warning: no DynamicsProcessing engine available");
+        return -EINVAL;
+    }
+    return 0;
+}
+
+int DP_command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+        void *pCmdData, uint32_t *replySize, void *pReplyData) {
+
+    DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+    if (pContext == NULL || pContext->mState == DYNAMICS_PROCESSING_STATE_UNINITIALIZED) {
+        ALOGE("DP_command() called with NULL context or uninitialized state.");
+        return -EINVAL;
+    }
+
+    ALOGV("DP_command command %d cmdSize %d",cmdCode, cmdSize);
+    switch (cmdCode) {
+    case EFFECT_CMD_INIT:
+        if (pReplyData == NULL || *replySize != sizeof(int)) {
+            ALOGE("EFFECT_CMD_INIT wrong replyData or repySize");
+            return -EINVAL;
+        }
+        *(int *) pReplyData = DP_init(pContext);
+        break;
+    case EFFECT_CMD_SET_CONFIG:
+        if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
+                || pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+            ALOGE("EFFECT_CMD_SET_CONFIG error with pCmdData, cmdSize, pReplyData or replySize");
+            return -EINVAL;
+        }
+        *(int *) pReplyData = DP_setConfig(pContext,
+                (effect_config_t *) pCmdData);
+        break;
+    case EFFECT_CMD_GET_CONFIG:
+        if (pReplyData == NULL ||
+            *replySize != sizeof(effect_config_t)) {
+            ALOGE("EFFECT_CMD_GET_CONFIG wrong replyData or repySize");
+            return -EINVAL;
+        }
+        DP_getConfig(pContext, (effect_config_t *)pReplyData);
+        break;
+    case EFFECT_CMD_RESET:
+        DP_reset(pContext);
+        break;
+    case EFFECT_CMD_ENABLE:
+        if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+            ALOGE("EFFECT_CMD_ENABLE wrong replyData or repySize");
+            return -EINVAL;
+        }
+        if (pContext->mState != DYNAMICS_PROCESSING_STATE_INITIALIZED) {
+            ALOGE("EFFECT_CMD_ENABLE state not initialized");
+            *(int *)pReplyData = -ENOSYS;
+        } else {
+            pContext->mState = DYNAMICS_PROCESSING_STATE_ACTIVE;
+            ALOGV("EFFECT_CMD_ENABLE() OK");
+            *(int *)pReplyData = 0;
+        }
+        break;
+    case EFFECT_CMD_DISABLE:
+        if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+            ALOGE("EFFECT_CMD_DISABLE wrong replyData or repySize");
+            return -EINVAL;
+        }
+        if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+            ALOGE("EFFECT_CMD_DISABLE state not active");
+            *(int *)pReplyData = -ENOSYS;
+        } else {
+            pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+            ALOGV("EFFECT_CMD_DISABLE() OK");
+            *(int *)pReplyData = 0;
+        }
+        break;
+    case EFFECT_CMD_GET_PARAM: {
+        if (pCmdData == NULL || pReplyData == NULL || replySize == NULL) {
+            ALOGE("null pCmdData or pReplyData or replySize");
+            return -EINVAL;
+        }
+        effect_param_t *pEffectParam = (effect_param_t *) pCmdData;
+        uint32_t expectedCmdSize = DP_getParameterCmdSize(pEffectParam->psize,
+                pEffectParam->data);
+        if (cmdSize != expectedCmdSize || *replySize < expectedCmdSize) {
+            ALOGE("error cmdSize: %d, expetedCmdSize: %d, replySize: %d",
+                    cmdSize, expectedCmdSize, *replySize);
+            return -EINVAL;
+        }
+
+        ALOGVV("DP_command expectedCmdSize: %d", expectedCmdSize);
+        memcpy(pReplyData, pCmdData, expectedCmdSize);
+        effect_param_t *p = (effect_param_t *)pReplyData;
+
+        uint32_t voffset = computeParamVOffset(p);
+
+        p->status = DP_getParameter(pContext,
+                p->psize,
+                p->data,
+                &p->vsize,
+                p->data + voffset);
+        *replySize = sizeof(effect_param_t) + voffset + p->vsize;
+
+        ALOGVV("DP_command replysize %u, status %d" , *replySize, p->status);
+        break;
+    }
+    case EFFECT_CMD_SET_PARAM: {
+        if (pCmdData == NULL ||
+                cmdSize < (sizeof(effect_param_t) + sizeof(int32_t) + sizeof(int32_t)) ||
+                pReplyData == NULL || replySize == NULL || *replySize != sizeof(int32_t)) {
+            ALOGE("\tLVM_ERROR : DynamicsProcessing cmdCode Case: "
+                    "EFFECT_CMD_SET_PARAM: ERROR");
+            return -EINVAL;
+        }
+
+        effect_param_t * const p = (effect_param_t *) pCmdData;
+        const uint32_t voffset = computeParamVOffset(p);
+
+        *(int *)pReplyData = DP_setParameter(pContext,
+                p->psize,
+                (void *)p->data,
+                p->vsize,
+                p->data + voffset);
+        break;
+    }
+    case EFFECT_CMD_SET_DEVICE:
+    case EFFECT_CMD_SET_VOLUME:
+    case EFFECT_CMD_SET_AUDIO_MODE:
+        break;
+
+    default:
+        ALOGW("DP_command invalid command %d",cmdCode);
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+//register expected cmd size
+int DP_getParameterCmdSize(uint32_t paramSize,
+        void *pParam) {
+    if (paramSize < sizeof(int32_t)) {
+        return 0;
+    }
+    int32_t param = *(int32_t*)pParam;
+    switch(param) {
+    case DP_PARAM_GET_CHANNEL_COUNT: //paramcmd
+    case DP_PARAM_ENGINE_ARCHITECTURE:
+        //effect + param
+        return (int)(sizeof(effect_param_t) + sizeof(uint32_t));
+    case DP_PARAM_INPUT_GAIN: //paramcmd + param
+    case DP_PARAM_LIMITER:
+    case DP_PARAM_PRE_EQ:
+    case DP_PARAM_POST_EQ:
+    case DP_PARAM_MBC:
+        //effect + param
+        return (int)(sizeof(effect_param_t) + 2 * sizeof(uint32_t));
+    case DP_PARAM_PRE_EQ_BAND:
+    case DP_PARAM_POST_EQ_BAND:
+    case DP_PARAM_MBC_BAND:
+        return (int)(sizeof(effect_param_t) + 3 * sizeof(uint32_t));
+    }
+    return 0;
+}
+
+//helper function
+bool DP_checkSizesInt(uint32_t paramSize, uint32_t valueSize, uint32_t expectedParams,
+        uint32_t expectedValues) {
+    if (paramSize < expectedParams * sizeof(int32_t)) {
+        ALOGE("Invalid paramSize: %u expected %u", paramSize,
+                (uint32_t) (expectedParams * sizeof(int32_t)));
+        return false;
+    }
+    if (valueSize < expectedValues * sizeof(int32_t)) {
+        ALOGE("Invalid valueSize %u expected %u", valueSize,
+                (uint32_t)(expectedValues * sizeof(int32_t)));
+        return false;
+    }
+    return true;
+}
+
+static dp_fx::DPChannel* DP_getChannel(DynamicsProcessingContext *pContext,
+        int32_t channel) {
+    if (pContext->mPDynamics == NULL) {
+        return NULL;
+    }
+    dp_fx::DPChannel *pChannel = pContext->mPDynamics->getChannel(channel);
+    ALOGE_IF(pChannel == NULL, "DPChannel NULL. invalid channel %d", channel);
+    return pChannel;
+}
+
+static dp_fx::DPEq* DP_getEq(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t eqType) {
+    dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+    if (pChannel == NULL) {
+        return NULL;
+    }
+    dp_fx::DPEq *pEq = (eqType == DP_PARAM_PRE_EQ ? pChannel->getPreEq() :
+            (eqType == DP_PARAM_POST_EQ ? pChannel->getPostEq() : NULL));
+    ALOGE_IF(pEq == NULL,"DPEq NULL invalid eq");
+    return pEq;
+}
+
+static dp_fx::DPEqBand* DP_getEqBand(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t eqType, int32_t band) {
+    dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqType);
+    if (pEq == NULL) {
+        return NULL;
+    }
+    dp_fx::DPEqBand *pEqBand = pEq->getBand(band);
+    ALOGE_IF(pEqBand == NULL, "DPEqBand NULL. invalid band %d", band);
+    return pEqBand;
+}
+
+static dp_fx::DPMbc* DP_getMbc(DynamicsProcessingContext *pContext, int32_t channel) {
+    dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+    if (pChannel == NULL) {
+        return NULL;
+    }
+    dp_fx::DPMbc *pMbc = pChannel->getMbc();
+    ALOGE_IF(pMbc == NULL, "DPMbc NULL invalid MBC");
+    return pMbc;
+}
+
+static dp_fx::DPMbcBand* DP_getMbcBand(DynamicsProcessingContext *pContext, int32_t channel,
+        int32_t band) {
+    dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+    if (pMbc == NULL) {
+        return NULL;
+    }
+    dp_fx::DPMbcBand *pMbcBand = pMbc->getBand(band);
+    ALOGE_IF(pMbcBand == NULL, "pMbcBand NULL. invalid band %d", band);
+    return pMbcBand;
+}
+
+int DP_getParameter(DynamicsProcessingContext *pContext,
+                           uint32_t paramSize,
+                           void *pParam,
+                           uint32_t *pValueSize,
+                           void *pValue) {
+    int status = 0;
+    int32_t *params = (int32_t *)pParam;
+    static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+            alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+            "Size/alignment mismatch for float/int32_t/value_t");
+    value_t *values = reinterpret_cast<value_t*>(pValue);
+
+    ALOGVV("%s start", __func__);
+#ifdef VERY_VERY_VERBOSE_LOGGING
+    for (size_t i = 0; i < paramSize/sizeof(int32_t); i++) {
+        ALOGVV("Param[%zu] %d", i, params[i]);
+    }
+#endif
+    if (paramSize < sizeof(int32_t)) {
+        ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+        return -EINVAL;
+    }
+    const int32_t command = params[0];
+    switch (command) {
+    case DP_PARAM_GET_CHANNEL_COUNT: {
+        if (!DP_checkSizesInt(paramSize,*pValueSize, 1 /*params*/, 1 /*values*/)) {
+            ALOGE("%s DP_PARAM_GET_CHANNEL_COUNT (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+        *pValueSize = sizeof(uint32_t);
+        *(uint32_t *)pValue = (uint32_t)audio_channel_count_from_out_mask(
+                pContext->mConfig.inputCfg.channels);
+        ALOGVV("%s DP_PARAM_GET_CHANNEL_COUNT channels %d", __func__, *(int32_t *)pValue);
+        break;
+    }
+    case DP_PARAM_ENGINE_ARCHITECTURE: {
+        ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 1 /*params*/, 9 /*values*/)) {
+            ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+//        Number[] values = { 0 /*0 variant */,
+//                0.0f /* 1 preferredFrameDuration */,
+//                0 /*2 preEqInUse */,
+//                0 /*3 preEqBandCount */,
+//                0 /*4 mbcInUse */,
+//                0 /*5 mbcBandCount*/,
+//                0 /*6 postEqInUse */,
+//                0 /*7 postEqBandCount */,
+//                0 /*8 limiterInUse */};
+        if (pContext->mPDynamics == NULL) {
+            ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error mPDynamics is NULL", __func__);
+            status = -EINVAL;
+            break;
+        }
+        values[0].i = pContext->mCurrentVariant;
+        values[1].f = pContext->mPreferredFrameDuration;
+        values[2].i = pContext->mPDynamics->isPreEQInUse();
+        values[3].i = pContext->mPDynamics->getPreEqBandCount();
+        values[4].i = pContext->mPDynamics->isMbcInUse();
+        values[5].i = pContext->mPDynamics->getMbcBandCount();
+        values[6].i = pContext->mPDynamics->isPostEqInUse();
+        values[7].i = pContext->mPDynamics->getPostEqBandCount();
+        values[8].i = pContext->mPDynamics->isLimiterInUse();
+
+        *pValueSize = sizeof(value_t) * 9;
+
+        ALOGVV(" variant %d, preferredFrameDuration: %f, preEqInuse %d, bands %d, mbcinuse %d,"
+                "mbcbands %d, posteqInUse %d, bands %d, limiterinuse %d",
+                values[0].i, values[1].f, values[2].i, values[3].i, values[4].i, values[5].i,
+                values[6].i, values[7].i, values[8].i);
+        break;
+    }
+    case DP_PARAM_INPUT_GAIN: {
+        ALOGVV("engine get PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 1 /*values*/)) {
+            ALOGE("%s get PARAM_INPUT_GAIN invalid sizes.", __func__);
+            status = -EINVAL;
+            break;
+        }
+
+        const int32_t channel = params[1];
+        dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+        if (pChannel == NULL) {
+            ALOGE("%s get PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+        values[0].f = pChannel->getInputGain();
+        *pValueSize = sizeof(value_t) * 1;
+
+        ALOGVV(" channel: %d, input gain %f\n", channel, values[0].f);
+        break;
+    }
+    case DP_PARAM_PRE_EQ:
+    case DP_PARAM_POST_EQ: {
+        ALOGVV("engine get PARAM_*_EQ paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+            ALOGE("%s get PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = {paramSet == PARAM_PRE_EQ ? PARAM_PRE_EQ : PARAM_POST_EQ,
+//                       channelIndex};
+//               Number[] values = {0 /*0 in use */,
+//                                   0 /*1 enabled*/,
+//                                   0 /*2 band count */};
+        const int32_t channel = params[1];
+
+        dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+        if (pEq == NULL) {
+            ALOGE("%s get PARAM_*_EQ invalid eq", __func__);
+            status = -EINVAL;
+            break;
+        }
+        values[0].i = pEq->isInUse();
+        values[1].i = pEq->isEnabled();
+        values[2].i = pEq->getBandCount();
+        *pValueSize = sizeof(value_t) * 3;
+
+        ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+                (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel,
+                values[0].i, values[1].i, values[2].i);
+        break;
+    }
+    case DP_PARAM_PRE_EQ_BAND:
+    case DP_PARAM_POST_EQ_BAND: {
+        ALOGVV("engine get PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 3 /*values*/)) {
+            ALOGE("%s get PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = {paramSet,
+//                channelIndex,
+//                bandIndex};
+//        Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+//              eqBand.getCutoffFrequency(),
+//              eqBand.getGain()};
+        const int32_t channel = params[1];
+        const int32_t band = params[2];
+        int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+                (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+
+        dp_fx::DPEqBand *pEqBand = DP_getEqBand(pContext, channel, eqCommand, band);
+        if (pEqBand == NULL) {
+            ALOGE("%s get PARAM_*_EQ_BAND invalid channel %d or band %d", __func__, channel, band);
+            status = -EINVAL;
+            break;
+        }
+
+        values[0].i = pEqBand->isEnabled();
+        values[1].f = pEqBand->getCutoffFrequency();
+        values[2].f = pEqBand->getGain();
+        *pValueSize = sizeof(value_t) * 3;
+
+        ALOGVV("%s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+                (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+                values[0].i, values[1].f, values[2].f);
+        break;
+    }
+    case DP_PARAM_MBC: {
+        ALOGVV("engine get PDP_PARAM_MBC paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+            ALOGE("%s get PDP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+
+//           Number[] params = {PARAM_MBC,
+//                    channelIndex};
+//            Number[] values = {0 /*0 in use */,
+//                                0 /*1 enabled*/,
+//                                0 /*2 band count */};
+
+        const int32_t channel = params[1];
+
+        dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+        if (pMbc == NULL) {
+            ALOGE("%s get PDP_PARAM_MBC invalid MBC", __func__);
+            status = -EINVAL;
+            break;
+        }
+
+        values[0].i = pMbc->isInUse();
+        values[1].i = pMbc->isEnabled();
+        values[2].i = pMbc->getBandCount();
+        *pValueSize = sizeof(value_t) * 3;
+
+        ALOGVV("DP_PARAM_MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel,
+                values[0].i, values[1].i, values[2].i);
+        break;
+    }
+    case DP_PARAM_MBC_BAND: {
+        ALOGVV("engine get DP_PARAM_MBC_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 11 /*values*/)) {
+            ALOGE("%s get DP_PARAM_MBC_BAND (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = {PARAM_MBC_BAND,
+//                        channelIndex,
+//                        bandIndex};
+//                Number[] values = {0 /*0 enabled */,
+//                        0.0f /*1 cutoffFrequency */,
+//                        0.0f /*2 AttackTime */,
+//                        0.0f /*3 ReleaseTime */,
+//                        0.0f /*4 Ratio */,
+//                        0.0f /*5 Threshold */,
+//                        0.0f /*6 KneeWidth */,
+//                        0.0f /*7 NoiseGateThreshold */,
+//                        0.0f /*8 ExpanderRatio */,
+//                        0.0f /*9 PreGain */,
+//                        0.0f /*10 PostGain*/};
+
+        const int32_t channel = params[1];
+        const int32_t band = params[2];
+
+        dp_fx::DPMbcBand *pMbcBand = DP_getMbcBand(pContext, channel, band);
+        if (pMbcBand == NULL) {
+            ALOGE("%s get PARAM_MBC_BAND invalid channel %d or band %d", __func__, channel, band);
+            status = -EINVAL;
+            break;
+        }
+
+        values[0].i = pMbcBand->isEnabled();
+        values[1].f = pMbcBand->getCutoffFrequency();
+        values[2].f = pMbcBand->getAttackTime();
+        values[3].f = pMbcBand->getReleaseTime();
+        values[4].f = pMbcBand->getRatio();
+        values[5].f = pMbcBand->getThreshold();
+        values[6].f = pMbcBand->getKneeWidth();
+        values[7].f = pMbcBand->getNoiseGateThreshold();
+        values[8].f = pMbcBand->getExpanderRatio();
+        values[9].f = pMbcBand->getPreGain();
+        values[10].f = pMbcBand->getPostGain();
+
+        *pValueSize = sizeof(value_t) * 11;
+        ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+                "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+                "expanderRatio:%f, preGain:%f, postGain:%f\n", channel, band, values[0].i,
+                values[1].f, values[2].f, values[3].f, values[4].f, values[5].f, values[6].f,
+                values[7].f, values[8].f, values[9].f, values[10].f);
+        break;
+    }
+    case DP_PARAM_LIMITER: {
+        ALOGVV("engine get DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, *pValueSize);
+        if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 8 /*values*/)) {
+            ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+
+        int32_t channel = params[1];
+//      Number[] values = {0 /*0 in use (int)*/,
+//              0 /*1 enabled (int)*/,
+//              0 /*2 link group (int)*/,
+//              0.0f /*3 attack time (float)*/,
+//              0.0f /*4 release time (float)*/,
+//              0.0f /*5 ratio (float)*/,
+//              0.0f /*6 threshold (float)*/,
+//              0.0f /*7 post gain(float)*/};
+        dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+        if (pChannel == NULL) {
+            ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+        dp_fx::DPLimiter *pLimiter = pChannel->getLimiter();
+        if (pLimiter == NULL) {
+            ALOGE("%s DP_PARAM_LIMITER null LIMITER", __func__);
+            status = -EINVAL;
+            break;
+        }
+        values[0].i = pLimiter->isInUse();
+        values[1].i = pLimiter->isEnabled();
+        values[2].i = pLimiter->getLinkGroup();
+        values[3].f = pLimiter->getAttackTime();
+        values[4].f = pLimiter->getReleaseTime();
+        values[5].f = pLimiter->getRatio();
+        values[6].f = pLimiter->getThreshold();
+        values[7].f = pLimiter->getPostGain();
+
+        *pValueSize = sizeof(value_t) * 8;
+
+        ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+                "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n",
+                channel, values[0].i/*inUse*/, values[1].i/*enabled*/, values[2].i/*linkGroup*/,
+                values[3].f/*attackTime*/, values[4].f/*releaseTime*/,
+                values[5].f/*ratio*/, values[6].f/*threshold*/,
+                values[7].f/*postGain*/);
+        break;
+    }
+    default:
+        ALOGE("%s invalid param %d", __func__, params[0]);
+        status = -EINVAL;
+        break;
+    }
+
+    ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+    return status;
+} /* end DP_getParameter */
+
+int DP_setParameter(DynamicsProcessingContext *pContext,
+                           uint32_t paramSize,
+                           void *pParam,
+                           uint32_t valueSize,
+                           void *pValue) {
+    int status = 0;
+    int32_t *params = (int32_t *)pParam;
+    static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+            alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+            "Size/alignment mismatch for float/int32_t/value_t");
+    value_t *values = reinterpret_cast<value_t*>(pValue);
+
+    ALOGVV("%s start", __func__);
+    if (paramSize < sizeof(int32_t)) {
+        ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+        return -EINVAL;
+    }
+    const int32_t command = params[0];
+    switch (command) {
+    case DP_PARAM_ENGINE_ARCHITECTURE: {
+        ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 1 /*params*/, 9 /*values*/)) {
+            ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+//        Number[] values = { variant /* variant */,
+//                preferredFrameDuration,
+//                (preEqInUse ? 1 : 0),
+//                preEqBandCount,
+//                (mbcInUse ? 1 : 0),
+//                mbcBandCount,
+//                (postEqInUse ? 1 : 0),
+//                postEqBandCount,
+//                (limiterInUse ? 1 : 0)};
+        const int32_t variant = values[0].i;
+        const float preferredFrameDuration = values[1].f;
+        const int32_t preEqInUse = values[2].i;
+        const int32_t preEqBandCount = values[3].i;
+        const int32_t mbcInUse = values[4].i;
+        const int32_t mbcBandCount = values[5].i;
+        const int32_t postEqInUse = values[6].i;
+        const int32_t postEqBandCount = values[7].i;
+        const int32_t limiterInUse = values[8].i;
+        ALOGVV("variant %d, preEqInuse %d, bands %d, mbcinuse %d, mbcbands %d, posteqInUse %d,"
+                "bands %d, limiterinuse %d", variant, preEqInUse, preEqBandCount, mbcInUse,
+                mbcBandCount, postEqInUse, postEqBandCount, limiterInUse);
+
+        //set variant (instantiate effect)
+        //initArchitecture for effect
+        DP_changeVariant(pContext, variant);
+        if (pContext->mPDynamics == NULL) {
+            ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error setting variant %d", __func__, variant);
+            status = -EINVAL;
+            break;
+        }
+        pContext->mPreferredFrameDuration = preferredFrameDuration;
+        pContext->mPDynamics->init((uint32_t)audio_channel_count_from_out_mask(
+                pContext->mConfig.inputCfg.channels),
+                preEqInUse != 0, (uint32_t)preEqBandCount,
+                mbcInUse != 0, (uint32_t)mbcBandCount,
+                postEqInUse != 0, (uint32_t)postEqBandCount,
+                limiterInUse != 0);
+
+        DP_configureVariant(pContext, variant);
+        break;
+    }
+    case DP_PARAM_INPUT_GAIN: {
+        ALOGVV("engine DP_PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 1 /*values*/)) {
+            ALOGE("%s DP_PARAM_INPUT_GAIN invalid sizes.", __func__);
+            status = -EINVAL;
+            break;
+        }
+
+        const int32_t channel = params[1];
+        dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+        if (pChannel == NULL) {
+            ALOGE("%s DP_PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+        const float gain = values[0].f;
+        ALOGVV("%s DP_PARAM_INPUT_GAIN channel %d, level %f", __func__, channel, gain);
+        pChannel->setInputGain(gain);
+        break;
+    }
+    case DP_PARAM_PRE_EQ:
+    case DP_PARAM_POST_EQ: {
+        ALOGVV("engine DP_PARAM_*_EQ paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+            ALOGE("%s DP_PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = {paramSet,
+//                channelIndex};
+//        Number[] values = { (eq.isInUse() ? 1 : 0),
+//                (eq.isEnabled() ? 1 : 0),
+//                bandCount};
+        const int32_t channel = params[1];
+
+        const int32_t enabled = values[1].i;
+        const int32_t bandCount = values[2].i;
+        ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+                (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel, values[0].i,
+                values[2].i, bandCount);
+
+        dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+        if (pEq == NULL) {
+            ALOGE("%s set PARAM_*_EQ invalid channel %d or command %d", __func__, channel,
+                    command);
+            status = -EINVAL;
+            break;
+        }
+
+        pEq->setEnabled(enabled != 0);
+        //fail if bandcountis different? maybe.
+        if ((int32_t)pEq->getBandCount() != bandCount) {
+            ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+                    pEq->getBandCount(), bandCount);
+        }
+        break;
+    }
+    case DP_PARAM_PRE_EQ_BAND:
+    case DP_PARAM_POST_EQ_BAND: {
+        ALOGVV("engine set PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 3 /*values*/)) {
+            ALOGE("%s PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] values = { channelIndex,
+//                bandIndex,
+//                (eqBand.isEnabled() ? 1 : 0),
+//                eqBand.getCutoffFrequency(),
+//                eqBand.getGain()};
+
+//        Number[] params = {paramSet,
+//                channelIndex,
+//                bandIndex};
+//        Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+//              eqBand.getCutoffFrequency(),
+//              eqBand.getGain()};
+
+        const int32_t channel = params[1];
+        const int32_t band = params[2];
+
+        const int32_t enabled = values[0].i;
+        const float cutoffFrequency = values[1].f;
+        const float gain = values[2].f;
+
+
+        ALOGVV(" %s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+                (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+                enabled, cutoffFrequency, gain);
+
+        int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+                (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+        dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqCommand);
+        if (pEq == NULL) {
+            ALOGE("%s set PARAM_*_EQ_BAND invalid channel %d or command %d", __func__, channel,
+                    command);
+            status = -EINVAL;
+            break;
+        }
+
+        dp_fx::DPEqBand eqBand;
+        eqBand.init(enabled != 0, cutoffFrequency, gain);
+        pEq->setBand(band, eqBand);
+        break;
+    }
+    case DP_PARAM_MBC: {
+        ALOGVV("engine DP_PARAM_MBC paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+            ALOGE("%s DP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//            Number[] params = { PARAM_MBC,
+//                    channelIndex};
+//            Number[] values = {(mbc.isInUse() ? 1 : 0),
+//                    (mbc.isEnabled() ? 1 : 0),
+//                    bandCount};
+        const int32_t channel = params[1];
+
+        const int32_t enabled = values[1].i;
+        const int32_t bandCount = values[2].i;
+        ALOGVV("MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel, values[0].i,
+                enabled, bandCount);
+
+        dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+        if (pMbc == NULL) {
+            ALOGE("%s set DP_PARAM_MBC invalid channel %d ", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+
+        pMbc->setEnabled(enabled != 0);
+        //fail if bandcountis different? maybe.
+        if ((int32_t)pMbc->getBandCount() != bandCount) {
+            ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+                    pMbc->getBandCount(), bandCount);
+        }
+        break;
+    }
+    case DP_PARAM_MBC_BAND: {
+        ALOGVV("engine set DP_PARAM_MBC_BAND paramsize: %d valuesize %d ",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 11 /*values*/)) {
+            ALOGE("%s DP_PARAM_MBC_BAND: (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//        Number[] params = { PARAM_MBC_BAND,
+//                channelIndex,
+//                bandIndex};
+//        Number[] values = {(mbcBand.isEnabled() ? 1 : 0),
+//                mbcBand.getCutoffFrequency(),
+//                mbcBand.getAttackTime(),
+//                mbcBand.getReleaseTime(),
+//                mbcBand.getRatio(),
+//                mbcBand.getThreshold(),
+//                mbcBand.getKneeWidth(),
+//                mbcBand.getNoiseGateThreshold(),
+//                mbcBand.getExpanderRatio(),
+//                mbcBand.getPreGain(),
+//                mbcBand.getPostGain()};
+
+        const int32_t channel = params[1];
+        const int32_t band = params[2];
+
+        const int32_t enabled = values[0].i;
+        const float cutoffFrequency = values[1].f;
+        const float attackTime = values[2].f;
+        const float releaseTime = values[3].f;
+        const float ratio = values[4].f;
+        const float threshold = values[5].f;
+        const float kneeWidth = values[6].f;
+        const float noiseGateThreshold = values[7].f;
+        const float expanderRatio = values[8].f;
+        const float preGain = values[9].f;
+        const float postGain = values[10].f;
+
+        ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+                "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+                "expanderRatio:%f, preGain:%f, postGain:%f\n",
+                channel, band, enabled, cutoffFrequency, attackTime, releaseTime, ratio,
+                threshold, kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+
+        dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+        if (pMbc == NULL) {
+            ALOGE("%s set DP_PARAM_MBC_BAND invalid channel %d", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+
+        dp_fx::DPMbcBand mbcBand;
+        mbcBand.init(enabled != 0, cutoffFrequency, attackTime, releaseTime, ratio, threshold,
+                kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+        pMbc->setBand(band, mbcBand);
+        break;
+    }
+    case DP_PARAM_LIMITER: {
+        ALOGVV("engine DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, valueSize);
+        if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 8 /*values*/)) {
+            ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+            status = -EINVAL;
+            break;
+        }
+//            Number[] params = { PARAM_LIMITER,
+//                             channelIndex};
+//                     Number[] values = {(limiter.isInUse() ? 1 : 0),
+//                             (limiter.isEnabled() ? 1 : 0),
+//                             limiter.getLinkGroup(),
+//                             limiter.getAttackTime(),
+//                             limiter.getReleaseTime(),
+//                             limiter.getRatio(),
+//                             limiter.getThreshold(),
+//                             limiter.getPostGain()};
+
+        const int32_t channel = params[1];
+
+        const int32_t inUse = values[0].i;
+        const int32_t enabled = values[1].i;
+        const int32_t linkGroup = values[2].i;
+        const float attackTime = values[3].f;
+        const float releaseTime = values[4].f;
+        const float ratio = values[5].f;
+        const float threshold = values[6].f;
+        const float postGain = values[7].f;
+
+        ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+                "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n", channel, inUse,
+                enabled, linkGroup, attackTime, releaseTime, ratio, threshold, postGain);
+
+        dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+        if (pChannel == NULL) {
+            ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+            status = -EINVAL;
+            break;
+        }
+        dp_fx::DPLimiter limiter;
+        limiter.init(inUse != 0, enabled != 0, linkGroup, attackTime, releaseTime, ratio,
+                threshold, postGain);
+        pChannel->setLimiter(limiter);
+        break;
+    }
+    default:
+        ALOGE("%s invalid param %d", __func__, params[0]);
+        status = -EINVAL;
+        break;
+    }
+
+    ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+    return status;
+} /* end DP_setParameter */
+
+/* Effect Control Interface Implementation: get_descriptor */
+int DP_getDescriptor(effect_handle_t self,
+        effect_descriptor_t *pDescriptor)
+{
+    DynamicsProcessingContext * pContext = (DynamicsProcessingContext *) self;
+
+    if (pContext == NULL || pDescriptor == NULL) {
+        ALOGE("DP_getDescriptor() invalid param");
+        return -EINVAL;
+    }
+
+    *pDescriptor = gDPDescriptor;
+
+    return 0;
+} /* end DP_getDescriptor */
+
+
+// effect_handle_t interface implementation for Dynamics Processing effect
+const struct effect_interface_s gDPInterface = {
+        DP_process,
+        DP_command,
+        DP_getDescriptor,
+        NULL,
+};
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+    .tag = AUDIO_EFFECT_LIBRARY_TAG,
+    .version = EFFECT_LIBRARY_API_VERSION,
+    .name = "Dynamics Processing Library",
+    .implementor = "The Android Open Source Project",
+    .create_effect = DPLib_Create,
+    .release_effect = DPLib_Release,
+    .get_descriptor = DPLib_GetDescriptor,
+};
+
+}; // extern "C"
+
diff --git a/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2 b/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
diff --git a/media/libeffects/dynamicsproc/NOTICE b/media/libeffects/dynamicsproc/NOTICE
new file mode 100644
index 0000000..31cc6e9
--- /dev/null
+++ b/media/libeffects/dynamicsproc/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2005-2018, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.cpp b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
new file mode 100644
index 0000000..8b79991
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPBase"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPBase.h"
+#include "DPFrequency.h"
+
+namespace dp_fx {
+
+DPStage::DPStage() : mInUse(DP_DEFAULT_STAGE_INUSE),
+        mEnabled(DP_DEFAULT_STAGE_ENABLED) {
+}
+
+void DPStage::init(bool inUse, bool enabled) {
+    mInUse = inUse;
+    mEnabled = enabled;
+}
+
+//----
+DPBandStage::DPBandStage() : mBandCount(0) {
+}
+
+void DPBandStage::init(bool inUse, bool enabled, int bandCount) {
+    DPStage::init(inUse, enabled);
+    mBandCount = inUse ? bandCount : 0;
+}
+
+//---
+DPBandBase::DPBandBase() {
+    init(DP_DEFAULT_BAND_ENABLED,
+            DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ);
+}
+
+void DPBandBase::init(bool enabled, float cutoffFrequency){
+    mEnabled = enabled;
+    mCutoofFrequencyHz = cutoffFrequency;
+}
+
+//-----
+DPEqBand::DPEqBand() {
+    init(DP_DEFAULT_BAND_ENABLED,
+            DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+            DP_DEFAULT_GAIN_DB);
+}
+
+void DPEqBand::init(bool enabled, float cutoffFrequency, float gain) {
+    DPBandBase::init(enabled, cutoffFrequency);
+    setGain(gain);
+}
+
+float DPEqBand::getGain() const{
+    return mGainDb;
+}
+
+void DPEqBand::setGain(float gain) {
+    mGainDb = gain;
+}
+
+//------
+DPMbcBand::DPMbcBand() {
+    init(DP_DEFAULT_BAND_ENABLED,
+            DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+            DP_DEFAULT_ATTACK_TIME_MS,
+            DP_DEFAULT_RELEASE_TIME_MS,
+            DP_DEFAULT_RATIO,
+            DP_DEFAULT_THRESHOLD_DB,
+            DP_DEFAULT_KNEE_WIDTH_DB,
+            DP_DEFAULT_NOISE_GATE_THRESHOLD_DB,
+            DP_DEFAULT_EXPANDER_RATIO,
+            DP_DEFAULT_GAIN_DB,
+            DP_DEFAULT_GAIN_DB);
+}
+
+void DPMbcBand::init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+        float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+        float expanderRatio, float preGain, float postGain) {
+    DPBandBase::init(enabled, cutoffFrequency);
+    setAttackTime(attackTime);
+    setReleaseTime(releaseTime);
+    setRatio(ratio);
+    setThreshold(threshold);
+    setKneeWidth(kneeWidth);
+    setNoiseGateThreshold(noiseGateThreshold);
+    setExpanderRatio(expanderRatio);
+    setPreGain(preGain);
+    setPostGain(postGain);
+}
+
+//------
+DPEq::DPEq() {
+}
+
+void DPEq::init(bool inUse, bool enabled, uint32_t bandCount) {
+    DPBandStage::init(inUse, enabled, bandCount);
+    mBands.resize(getBandCount());
+}
+
+DPEqBand * DPEq::getBand(uint32_t band) {
+    if (band < getBandCount()) {
+        return &mBands[band];
+    }
+    return NULL;
+}
+
+void DPEq::setBand(uint32_t band, DPEqBand &src) {
+    if (band < getBandCount()) {
+        mBands[band] = src;
+    }
+}
+
+//------
+DPMbc::DPMbc() {
+}
+
+void DPMbc::init(bool inUse, bool enabled, uint32_t bandCount) {
+    DPBandStage::init(inUse, enabled, bandCount);
+    if (isInUse()) {
+        mBands.resize(bandCount);
+    } else {
+        mBands.resize(0);
+    }
+}
+
+DPMbcBand * DPMbc::getBand(uint32_t band) {
+    if (band < getBandCount()) {
+        return &mBands[band];
+    }
+    return NULL;
+}
+
+void DPMbc::setBand(uint32_t band, DPMbcBand &src) {
+    if (band < getBandCount()) {
+        mBands[band] = src;
+    }
+}
+
+//------
+DPLimiter::DPLimiter() {
+    init(DP_DEFAULT_STAGE_INUSE,
+            DP_DEFAULT_STAGE_ENABLED,
+            DP_DEFAULT_LINK_GROUP,
+            DP_DEFAULT_ATTACK_TIME_MS,
+            DP_DEFAULT_RELEASE_TIME_MS,
+            DP_DEFAULT_RATIO,
+            DP_DEFAULT_THRESHOLD_DB,
+            DP_DEFAULT_GAIN_DB);
+}
+
+void DPLimiter::init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+        float ratio, float threshold, float postGain) {
+    DPStage::init(inUse, enabled);
+    setLinkGroup(linkGroup);
+    setAttackTime(attackTime);
+    setReleaseTime(releaseTime);
+    setRatio(ratio);
+    setThreshold(threshold);
+    setPostGain(postGain);
+}
+
+//----
+DPChannel::DPChannel() : mInitialized(false), mInputGainDb(0), mPreEqInUse(false), mMbcInUse(false),
+        mPostEqInUse(false), mLimiterInUse(false) {
+}
+
+void DPChannel::init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+        bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+        bool limiterInUse) {
+    setInputGain(inputGain);
+    mPreEqInUse = preEqInUse;
+    mMbcInUse = mbcInUse;
+    mPostEqInUse = postEqInUse;
+    mLimiterInUse = limiterInUse;
+
+    mPreEq.init(mPreEqInUse, false, preEqBandCount);
+    mMbc.init(mMbcInUse, false, mbcBandCount);
+    mPostEq.init(mPostEqInUse, false, postEqBandCount);
+    mLimiter.init(mLimiterInUse, false, 0, 50, 120, 2, -30, 0);
+    mInitialized = true;
+}
+
+DPEq* DPChannel::getPreEq() {
+    if (!mInitialized) {
+        return NULL;
+    }
+    return &mPreEq;
+}
+
+DPMbc* DPChannel::getMbc() {
+    if (!mInitialized) {
+        return NULL;
+    }
+    return &mMbc;
+}
+
+DPEq* DPChannel::getPostEq() {
+    if (!mInitialized) {
+        return NULL;
+    }
+    return &mPostEq;
+}
+
+DPLimiter* DPChannel::getLimiter() {
+    if (!mInitialized) {
+        return NULL;
+    }
+    return &mLimiter;
+}
+
+void DPChannel::setLimiter(DPLimiter &limiter) {
+    if (!mInitialized) {
+        return;
+    }
+    mLimiter = limiter;
+}
+
+//----
+DPBase::DPBase() : mInitialized(false), mChannelCount(0), mPreEqInUse(false), mPreEqBandCount(0),
+        mMbcInUse(false), mMbcBandCount(0), mPostEqInUse(false), mPostEqBandCount(0),
+        mLimiterInUse(false) {
+}
+
+void DPBase::init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+        bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+        bool limiterInUse) {
+    ALOGV("DPBase::init");
+    mChannelCount = channelCount;
+    mPreEqInUse = preEqInUse;
+    mPreEqBandCount = preEqBandCount;
+    mMbcInUse = mbcInUse;
+    mMbcBandCount = mbcBandCount;
+    mPostEqInUse = postEqInUse;
+    mPostEqBandCount = postEqBandCount;
+    mLimiterInUse = limiterInUse;
+    mChannel.resize(mChannelCount);
+    for (size_t ch = 0; ch < mChannelCount; ch++) {
+        mChannel[ch].init(0, preEqInUse, preEqBandCount, mbcInUse, mbcBandCount,
+                postEqInUse, postEqBandCount, limiterInUse);
+    }
+    mInitialized = true;
+}
+
+DPChannel* DPBase::getChannel(uint32_t channelIndex) {
+    if (!mInitialized || channelIndex < 0 || channelIndex >= mChannel.size()) {
+        return NULL;
+    }
+    return & mChannel[channelIndex];
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.h b/media/libeffects/dynamicsproc/dsp/DPBase.h
new file mode 100644
index 0000000..355f64b
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DPBASE_H_
+#define DPBASE_H_
+
+
+#include <stdint.h>
+#include <cmath>
+#include <vector>
+#include <android/log.h>
+
+namespace dp_fx {
+
+#define DP_DEFAULT_BAND_ENABLED false
+#define DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ 1000
+#define DP_DEFAULT_ATTACK_TIME_MS 50
+#define DP_DEFAULT_RELEASE_TIME_MS 120
+#define DP_DEFAULT_RATIO 2
+#define DP_DEFAULT_THRESHOLD_DB -30
+#define DP_DEFAULT_KNEE_WIDTH_DB 0
+#define DP_DEFAULT_NOISE_GATE_THRESHOLD_DB -90
+#define DP_DEFAULT_EXPANDER_RATIO 1
+#define DP_DEFAULT_GAIN_DB 0
+#define DP_DEFAULT_STAGE_INUSE false
+#define DP_DEFAULT_STAGE_ENABLED false
+#define DP_DEFAULT_LINK_GROUP 0
+
+
+
+class DPStage {
+public:
+    DPStage();
+    ~DPStage() = default;
+    void init(bool inUse, bool enabled);
+    bool isInUse() const {
+        return mInUse;
+    }
+    bool isEnabled() const {
+        return mEnabled;
+    }
+    void setEnabled(bool enabled) {
+        mEnabled = enabled;
+    }
+private:
+    bool mInUse;
+    bool mEnabled;
+};
+
+class DPBandStage : public DPStage {
+public:
+    DPBandStage();
+    ~DPBandStage() = default;
+    void init(bool inUse, bool enabled, int bandCount);
+    uint32_t getBandCount() const {
+        return mBandCount;
+    }
+    void setBandCount(uint32_t bandCount) {
+        mBandCount = bandCount;
+    }
+private:
+    uint32_t mBandCount;
+};
+
+class DPBandBase {
+public:
+    DPBandBase();
+    ~DPBandBase() = default;
+    void init(bool enabled, float cutoffFrequency);
+    bool isEnabled() const {
+        return mEnabled;
+    }
+    void setEnabled(bool enabled) {
+        mEnabled = enabled;
+    }
+    float getCutoffFrequency() const {
+        return mCutoofFrequencyHz;
+    }
+    void setCutoffFrequency(float cutoffFrequency) {
+        mCutoofFrequencyHz = cutoffFrequency;
+    }
+private:
+    bool mEnabled;
+    float mCutoofFrequencyHz;
+};
+
+class DPEqBand : public DPBandBase {
+public:
+    DPEqBand();
+    ~DPEqBand() = default;
+    void init(bool enabled, float cutoffFrequency, float gain);
+    float getGain() const;
+    void setGain(float gain);
+private:
+    float mGainDb;
+};
+
+class DPMbcBand : public DPBandBase {
+public:
+    DPMbcBand();
+    ~DPMbcBand() = default;
+    void init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+            float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+            float expanderRatio, float preGain, float postGain);
+    float getAttackTime() const {
+        return mAttackTimeMs;
+    }
+    void setAttackTime(float attackTime) {
+        mAttackTimeMs = attackTime;
+    }
+    float getReleaseTime() const {
+        return mReleaseTimeMs;
+    }
+    void setReleaseTime(float releaseTime) {
+        mReleaseTimeMs = releaseTime;
+    }
+    float getRatio() const {
+        return mRatio;
+    }
+    void setRatio(float ratio) {
+        mRatio = ratio;
+    }
+    float getThreshold() const {
+        return mThresholdDb;
+    }
+    void setThreshold(float threshold) {
+        mThresholdDb = threshold;
+    }
+    float getKneeWidth() const {
+        return mKneeWidthDb;
+    }
+    void setKneeWidth(float kneeWidth) {
+        mKneeWidthDb = kneeWidth;
+    }
+    float getNoiseGateThreshold() const {
+        return mNoiseGateThresholdDb;
+    }
+    void setNoiseGateThreshold(float noiseGateThreshold) {
+        mNoiseGateThresholdDb = noiseGateThreshold;
+    }
+    float getExpanderRatio() const {
+        return mExpanderRatio;
+    }
+    void setExpanderRatio(float expanderRatio) {
+        mExpanderRatio = expanderRatio;
+    }
+    float getPreGain() const {
+        return mPreGainDb;
+    }
+    void setPreGain(float preGain) {
+        mPreGainDb = preGain;
+    }
+    float getPostGain() const {
+        return mPostGainDb;
+    }
+    void setPostGain(float postGain) {
+        mPostGainDb = postGain;
+    }
+private:
+    float mAttackTimeMs;
+    float mReleaseTimeMs;
+    float mRatio;
+    float mThresholdDb;
+    float mKneeWidthDb;
+    float mNoiseGateThresholdDb;
+    float mExpanderRatio;
+    float mPreGainDb;
+    float mPostGainDb;
+};
+
+class DPEq : public DPBandStage {
+public:
+    DPEq();
+    ~DPEq() = default;
+    void init(bool inUse, bool enabled, uint32_t bandCount);
+    DPEqBand * getBand(uint32_t band);
+    void setBand(uint32_t band, DPEqBand &src);
+private:
+    std::vector<DPEqBand> mBands;
+};
+
+class DPMbc : public DPBandStage {
+public:
+    DPMbc();
+    ~DPMbc() = default;
+    void init(bool inUse, bool enabled, uint32_t bandCount);
+    DPMbcBand * getBand(uint32_t band);
+    void setBand(uint32_t band, DPMbcBand &src);
+private:
+    std::vector<DPMbcBand> mBands;
+};
+
+class DPLimiter : public DPStage {
+public:
+    DPLimiter();
+    ~DPLimiter() = default;
+    void init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+            float ratio, float threshold, float postGain);
+    uint32_t getLinkGroup() const {
+        return mLinkGroup;
+    }
+    void setLinkGroup(uint32_t linkGroup) {
+        mLinkGroup = linkGroup;
+    }
+    float getAttackTime() const {
+        return mAttackTimeMs;
+    }
+    void setAttackTime(float attackTime) {
+        mAttackTimeMs = attackTime;
+    }
+    float getReleaseTime() const {
+        return mReleaseTimeMs;
+    }
+    void setReleaseTime(float releaseTime) {
+        mReleaseTimeMs = releaseTime;
+    }
+    float getRatio() const {
+        return mRatio;
+    }
+    void setRatio(float ratio) {
+        mRatio = ratio;
+    }
+    float getThreshold() const {
+        return mThresholdDb;
+    }
+    void setThreshold(float threshold) {
+        mThresholdDb = threshold;
+    }
+    float getPostGain() const {
+        return mPostGainDb;
+    }
+    void setPostGain(float postGain) {
+        mPostGainDb = postGain;
+    }
+private:
+    uint32_t mLinkGroup;
+    float mAttackTimeMs;
+    float mReleaseTimeMs;
+    float mRatio;
+    float mThresholdDb;
+    float mPostGainDb;
+};
+
+class DPChannel {
+public:
+    DPChannel();
+    ~DPChannel() = default;
+    void init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+            bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+            bool limiterInUse);
+
+    float getInputGain() const {
+        if (!mInitialized) {
+            return 0;
+        }
+        return mInputGainDb;
+    }
+    void setInputGain(float gain) {
+        mInputGainDb = gain;
+    }
+
+    DPEq* getPreEq();
+    DPMbc* getMbc();
+    DPEq* getPostEq();
+    DPLimiter *getLimiter();
+    void setLimiter(DPLimiter &limiter);
+
+private:
+    bool mInitialized;
+    float mInputGainDb;
+
+    DPEq mPreEq;
+    DPMbc mMbc;
+    DPEq mPostEq;
+    DPLimiter mLimiter;
+
+    bool mPreEqInUse;
+    bool mMbcInUse;
+    bool mPostEqInUse;
+    bool mLimiterInUse;
+};
+
+class DPBase {
+public:
+    DPBase();
+    virtual ~DPBase() = default;
+
+    void init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+            bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+            bool limiterInUse);
+    virtual size_t processSamples(const float *in, float *out, size_t samples) = 0;
+    virtual void reset() = 0;
+
+    DPChannel* getChannel(uint32_t channelIndex);
+    uint32_t getChannelCount() const {
+        return mChannelCount;
+    }
+    uint32_t getPreEqBandCount() const {
+        return mPreEqBandCount;
+    }
+    uint32_t getMbcBandCount() const {
+        return mMbcBandCount;
+    }
+    uint32_t getPostEqBandCount() const {
+        return mPostEqBandCount;
+    }
+    bool isPreEQInUse() const {
+        return mPreEqInUse;
+    }
+    bool isMbcInUse() const {
+        return mMbcInUse;
+    }
+    bool isPostEqInUse() const {
+        return mPostEqInUse;
+    }
+    bool isLimiterInUse() const {
+        return mLimiterInUse;
+    }
+
+private:
+    bool mInitialized;
+    //general
+    uint32_t mChannelCount;
+    bool mPreEqInUse;
+    uint32_t mPreEqBandCount;
+    bool mMbcInUse;
+    uint32_t mMbcBandCount;
+    bool mPostEqInUse;
+    uint32_t mPostEqBandCount;
+    bool mLimiterInUse;
+
+    std::vector<DPChannel> mChannel;
+};
+
+} //namespace dp_fx
+
+
+#endif  // DPBASE_H_
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
new file mode 100644
index 0000000..59195fc
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPFrequency"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPFrequency.h"
+#include <algorithm>
+
+namespace dp_fx {
+
+using Eigen::MatrixXd;
+#define MAX_BLOCKSIZE 16384 //For this implementation
+#define MIN_BLOCKSIZE 8
+
+#define CIRCULAR_BUFFER_UPSAMPLE 4  //4 times buffer size
+
+static constexpr float MIN_ENVELOPE = 0.000001f;
+//helper functionS
+static inline bool isPowerOf2(unsigned long n) {
+    return (n & (n - 1)) == 0;
+}
+static constexpr float EPSILON = 0.0000001f;
+
+static inline bool isZero(float f) {
+    return fabs(f) <= EPSILON;
+}
+
+template <class T>
+bool compareEquality(T a, T b) {
+    return (a == b);
+}
+
+template <> bool compareEquality<float>(float a, float b) {
+    return isZero(a - b);
+}
+
+//TODO: avoid using macro for estimating change and assignment.
+#define IS_CHANGED(c, a, b) { c |= !compareEquality(a,b); \
+    (a) = (b); }
+
+float dBtoLinear(float valueDb) {
+    return  pow (10, valueDb / 20.0);
+}
+
+float linearToDb(float value) {
+    return 20 * log10(value);
+}
+
+//ChannelBuffers helper
+void ChannelBuffer::initBuffers(unsigned int blockSize, unsigned int overlapSize,
+        unsigned int halfFftSize, unsigned int samplingRate, DPBase &dpBase) {
+    ALOGV("ChannelBuffer::initBuffers blockSize %d, overlap %d, halfFft %d",
+            blockSize, overlapSize, halfFftSize);
+
+    mSamplingRate = samplingRate;
+    mBlockSize = blockSize;
+
+    cBInput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+    cBOutput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+
+    //fill input with half block size...
+    for (unsigned int k = 0;  k < mBlockSize/2; k++) {
+        cBInput.write(0);
+    }
+
+    //temp vectors
+    input.resize(mBlockSize);
+    output.resize(mBlockSize);
+    outTail.resize(overlapSize);
+
+    //module vectors
+    mPreEqFactorVector.resize(halfFftSize, 1.0);
+    mPostEqFactorVector.resize(halfFftSize, 1.0);
+
+    mPreEqBands.resize(dpBase.getPreEqBandCount());
+    mMbcBands.resize(dpBase.getMbcBandCount());
+    mPostEqBands.resize(dpBase.getPostEqBandCount());
+    ALOGV("mPreEqBands %zu, mMbcBands %zu, mPostEqBands %zu",mPreEqBands.size(),
+            mMbcBands.size(), mPostEqBands.size());
+
+    DPChannel *pChannel = dpBase.getChannel(0);
+    if (pChannel != NULL) {
+        mPreEqInUse = pChannel->getPreEq()->isInUse();
+        mMbcInUse = pChannel->getMbc()->isInUse();
+        mPostEqInUse = pChannel->getPostEq()->isInUse();
+        mLimiterInUse = pChannel->getLimiter()->isInUse();
+    }
+}
+
+void ChannelBuffer::computeBinStartStop(BandParams &bp, size_t binStart) {
+
+    bp.binStart = binStart;
+    bp.binStop = (int)(0.5 + bp.freqCutoffHz * mBlockSize / mSamplingRate);
+}
+
+//== DPFrequency
+
+void DPFrequency::reset() {
+}
+
+size_t DPFrequency::getMinBockSize() {
+    return MIN_BLOCKSIZE;
+}
+
+size_t DPFrequency::getMaxBockSize() {
+    return MAX_BLOCKSIZE;
+}
+
+void DPFrequency::configure(size_t blockSize, size_t overlapSize,
+        size_t samplingRate) {
+    ALOGV("configure");
+    mBlockSize = blockSize;
+    if (mBlockSize > MAX_BLOCKSIZE) {
+        mBlockSize = MAX_BLOCKSIZE;
+    } else if (mBlockSize < MIN_BLOCKSIZE) {
+        mBlockSize = MIN_BLOCKSIZE;
+    } else {
+        if (!isPowerOf2(blockSize)) {
+            //find next highest power of 2.
+            mBlockSize = 1 << (32 - __builtin_clz(blockSize));
+        }
+    }
+
+    mHalfFFTSize = 1 + mBlockSize / 2; //including Nyquist bin
+    mOverlapSize = std::min(overlapSize, mBlockSize/2);
+
+    int channelcount = getChannelCount();
+    mSamplingRate = samplingRate;
+    mChannelBuffers.resize(channelcount);
+    for (int ch = 0; ch < channelcount; ch++) {
+        mChannelBuffers[ch].initBuffers(mBlockSize, mOverlapSize, mHalfFFTSize,
+                mSamplingRate, *this);
+    }
+
+    //dsp
+    fill_window(mVWindow, RDSP_WINDOW_HANNING_FLAT_TOP, mBlockSize, mOverlapSize);
+}
+
+void DPFrequency::updateParameters(ChannelBuffer &cb, int channelIndex) {
+    DPChannel *pChannel = getChannel(channelIndex);
+
+    if (pChannel == NULL) {
+        ALOGE("Error: updateParameters null DPChannel %d", channelIndex);
+        return;
+    }
+
+    //===Input Gain and preEq
+    {
+        bool changed = false;
+        IS_CHANGED(changed, cb.inputGainDb, pChannel->getInputGain());
+        //===EqPre
+        if (cb.mPreEqInUse) {
+            DPEq *pPreEq = pChannel->getPreEq();
+            if (pPreEq == NULL) {
+                ALOGE("Error: updateParameters null PreEq for channel: %d", channelIndex);
+                return;
+            }
+            IS_CHANGED(changed, cb.mPreEqEnabled, pPreEq->isEnabled());
+            if (cb.mPreEqEnabled) {
+                for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+                    DPEqBand *pEqBand = pPreEq->getBand(b);
+                    if (pEqBand == NULL) {
+                        ALOGE("Error: updateParameters null PreEqBand for band %d", b);
+                        return; //failed.
+                    }
+                    ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+                    IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+                    IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+                            pEqBand->getCutoffFrequency());
+                    IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+                }
+            }
+        }
+
+        if (changed) {
+            float inputGainFactor = dBtoLinear(cb.inputGainDb);
+            if (cb.mPreEqInUse && cb.mPreEqEnabled) {
+                ALOGV("preEq changed, recomputing! channel %d", channelIndex);
+                size_t binNext = 0;
+                for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+                    ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+
+                    //frequency translation
+                    cb.computeBinStartStop(*pEqBandParams, binNext);
+                    binNext = pEqBandParams->binStop + 1;
+                    float factor = dBtoLinear(pEqBandParams->gainDb);
+                    if (!pEqBandParams->enabled) {
+                        factor = inputGainFactor;
+                    }
+                    for (size_t k = pEqBandParams->binStart;
+                            k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+                        cb.mPreEqFactorVector[k] = factor * inputGainFactor;
+                    }
+                }
+            } else {
+                ALOGV("only input gain changed, recomputing!");
+                //populate PreEq factor with input gain factor.
+                for (size_t k = 0; k < mHalfFFTSize; k++) {
+                    cb.mPreEqFactorVector[k] = inputGainFactor;
+                }
+            }
+        }
+    } //inputGain and preEq
+
+    //===EqPost
+    if (cb.mPostEqInUse) {
+        bool changed = false;
+
+        DPEq *pPostEq = pChannel->getPostEq();
+        if (pPostEq == NULL) {
+            ALOGE("Error: updateParameters null postEq for channel: %d", channelIndex);
+            return; //failed.
+        }
+        IS_CHANGED(changed, cb.mPostEqEnabled, pPostEq->isEnabled());
+        if (cb.mPostEqEnabled) {
+            for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+                DPEqBand *pEqBand = pPostEq->getBand(b);
+                if (pEqBand == NULL) {
+                    ALOGE("Error: updateParameters PostEqBand NULL for band %d", b);
+                    return; //failed.
+                }
+                ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+                IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+                IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+                        pEqBand->getCutoffFrequency());
+                IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+            }
+            if (changed) {
+                ALOGV("postEq changed, recomputing! channel %d", channelIndex);
+                size_t binNext = 0;
+                for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+                    ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+
+                    //frequency translation
+                    cb.computeBinStartStop(*pEqBandParams, binNext);
+                    binNext = pEqBandParams->binStop + 1;
+                    float factor = dBtoLinear(pEqBandParams->gainDb);
+                    if (!pEqBandParams->enabled) {
+                        factor = 1.0;
+                    }
+                    for (size_t k = pEqBandParams->binStart;
+                            k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+                        cb.mPostEqFactorVector[k] = factor;
+                    }
+                }
+            }
+        } //enabled
+    }
+
+    //===MBC
+    if (cb.mMbcInUse) {
+        DPMbc *pMbc = pChannel->getMbc();
+        if (pMbc == NULL) {
+            ALOGE("Error: updateParameters Mbc NULL for channel: %d", channelIndex);
+            return;
+        }
+        cb.mMbcEnabled = pMbc->isEnabled();
+        if (cb.mMbcEnabled) {
+            bool changed = false;
+            for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+                DPMbcBand *pMbcBand = pMbc->getBand(b);
+                if (pMbcBand == NULL) {
+                    ALOGE("Error: updateParameters MbcBand NULL for band %d", b);
+                    return; //failed.
+                }
+                ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+                pMbcBandParams->enabled = pMbcBand->isEnabled();
+                IS_CHANGED(changed, pMbcBandParams->freqCutoffHz,
+                        pMbcBand->getCutoffFrequency());
+
+                pMbcBandParams->gainPreDb = pMbcBand->getPreGain();
+                pMbcBandParams->gainPostDb = pMbcBand->getPostGain();
+                pMbcBandParams->attackTimeMs = pMbcBand->getAttackTime();
+                pMbcBandParams->releaseTimeMs = pMbcBand->getReleaseTime();
+                pMbcBandParams->ratio = pMbcBand->getRatio();
+                pMbcBandParams->thresholdDb = pMbcBand->getThreshold();
+                pMbcBandParams->kneeWidthDb = pMbcBand->getKneeWidth();
+                pMbcBandParams->noiseGateThresholdDb = pMbcBand->getNoiseGateThreshold();
+                pMbcBandParams->expanderRatio = pMbcBand->getExpanderRatio();
+
+            }
+
+            if (changed) {
+                ALOGV("mbc changed, recomputing! channel %d", channelIndex);
+                size_t binNext= 0;
+                for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+                    ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+
+                    pMbcBandParams->previousEnvelope = 0;
+
+                    //frequency translation
+                    cb.computeBinStartStop(*pMbcBandParams, binNext);
+                    binNext = pMbcBandParams->binStop + 1;
+                }
+
+            }
+
+        }
+    }
+}
+
+size_t DPFrequency::processSamples(const float *in, float *out, size_t samples) {
+       const float *pIn = in;
+       float *pOut = out;
+
+       int channelCount = mChannelBuffers.size();
+       if (channelCount < 1) {
+           ALOGW("warning: no Channels ready for processing");
+           return 0;
+       }
+
+       //**Check if parameters have changed and update
+       for (int ch = 0; ch < channelCount; ch++) {
+           updateParameters(mChannelBuffers[ch], ch);
+       }
+
+       //**separate into channels
+       for (size_t k = 0; k < samples; k += channelCount) {
+           for (int ch = 0; ch < channelCount; ch++) {
+               mChannelBuffers[ch].cBInput.write(*pIn++);
+           }
+       }
+
+       //TODO: lookahead limiters
+       //TODO: apply linked limiters to all channels.
+       //**Process each Channel
+       for (int ch = 0; ch < channelCount; ch++) {
+           processMono(mChannelBuffers[ch]);
+       }
+
+       //** estimate how much data is available in ALL channels
+       size_t available = mChannelBuffers[0].cBOutput.availableToRead();
+       for (int ch = 1; ch < channelCount; ch++) {
+           available = std::min(available, mChannelBuffers[ch].cBOutput.availableToRead());
+       }
+
+       //** make sure to output just what the buffer can handle
+       if (available > samples/channelCount) {
+           available = samples/channelCount;
+       }
+
+       //**Prepend zeroes if necessary
+       size_t fill = samples - (channelCount * available);
+       for (size_t k = 0; k < fill; k++) {
+           *pOut++ = 0;
+       }
+
+       //**interleave channels
+       for (size_t k = 0; k < available; k++) {
+           for (int ch = 0; ch < channelCount; ch++) {
+               *pOut++ = mChannelBuffers[ch].cBOutput.read();
+           }
+       }
+
+       return samples;
+}
+
+size_t DPFrequency::processMono(ChannelBuffer &cb) {
+
+    size_t processedSamples = 0;
+
+    size_t available = cb.cBInput.availableToRead();
+    while (available >= mBlockSize - mOverlapSize) {
+
+        //move tail of previous
+        for (unsigned int k = 0; k < mOverlapSize; ++k) {
+            cb.input[k] = cb.input[mBlockSize - mOverlapSize + k];
+        }
+
+        //read new available data
+        for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
+            cb.input[mOverlapSize + k] = cb.cBInput.read();
+        }
+
+        //## Actual process
+        processOneVector(cb.output, cb.input, cb);
+        //##End of Process
+
+        //mix tail (and capture new tail
+        for (unsigned int k = 0; k < mOverlapSize; k++) {
+            cb.output[k] += cb.outTail[k];
+            cb.outTail[k] = cb.output[mBlockSize - mOverlapSize + k]; //new tail
+        }
+
+        //output data
+        for (unsigned int k = 0; k < mBlockSize - mOverlapSize; k++) {
+            cb.cBOutput.write(cb.output[k]);
+        }
+
+        available = cb.cBInput.availableToRead();
+    }
+
+    return processedSamples;
+}
+
+size_t DPFrequency::processOneVector(FloatVec & output, FloatVec & input,
+        ChannelBuffer &cb) {
+
+    //##apply window
+    Eigen::Map<Eigen::VectorXf> eWindow(&mVWindow[0], mVWindow.size());
+    Eigen::Map<Eigen::VectorXf> eInput(&input[0], input.size());
+
+    Eigen::VectorXf eWin = eInput.cwiseProduct(eWindow); //apply window
+
+    //##fft //TODO: refactor frequency transformations away from other stages.
+    mFftServer.fwd(mComplexTemp, eWin);
+
+    size_t cSize = mComplexTemp.size();
+    size_t maxBin = std::min(cSize/2, mHalfFFTSize);
+
+    //== EqPre (always runs)
+    for (size_t k = 0; k < maxBin; k++) {
+        mComplexTemp[k] *= cb.mPreEqFactorVector[k];
+    }
+
+    //== MBC
+    if (cb.mMbcInUse && cb.mMbcEnabled) {
+        for (size_t band = 0; band < cb.mMbcBands.size(); band++) {
+            ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[band];
+            float fEnergySum = 0;
+
+            //apply pre gain.
+            float preGainFactor = dBtoLinear(pMbcBandParams->gainPreDb);
+            float preGainSquared = preGainFactor * preGainFactor;
+
+            for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+                float fReal = mComplexTemp[k].real();
+                float fImag = mComplexTemp[k].imag();
+                float fSquare = (fReal * fReal + fImag * fImag) * preGainSquared;
+
+                fEnergySum += fSquare;
+            }
+
+            fEnergySum = sqrt(fEnergySum /2.0);
+            float fTheta = 0.0;
+            float fFAtt = pMbcBandParams->attackTimeMs;
+            float fFRel = pMbcBandParams->releaseTimeMs;
+
+            float fUpdatesPerSecond = 10; //TODO: compute from framerate
+
+
+            if (fEnergySum > pMbcBandParams->previousEnvelope) {
+                fTheta = exp(-1.0 / (fFAtt * fUpdatesPerSecond));
+            } else {
+                fTheta = exp(-1.0 / (fFRel * fUpdatesPerSecond));
+            }
+
+            float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * pMbcBandParams->previousEnvelope;
+
+            //preserve for next iteration
+            pMbcBandParams->previousEnvelope = fEnv;
+
+            float fThreshold = dBtoLinear(pMbcBandParams->thresholdDb);
+            float fNoiseGateThreshold = dBtoLinear(pMbcBandParams->noiseGateThresholdDb);
+
+            float fNewFactor = 1.0;
+
+            if (fEnv > fThreshold) {
+                float fDbAbove = linearToDb(fThreshold / fEnv);
+                float fDbTarget = fDbAbove / pMbcBandParams->ratio;
+                float fDbChange = fDbAbove - fDbTarget;
+                fNewFactor = dBtoLinear(fDbChange);
+            } else if (fEnv < fNoiseGateThreshold) {
+                if (fEnv < MIN_ENVELOPE) {
+                    fEnv = MIN_ENVELOPE;
+                }
+                float fDbBelow = linearToDb(fNoiseGateThreshold / fEnv);
+                float fDbTarget = fDbBelow / pMbcBandParams->expanderRatio;
+                float fDbChange = fDbBelow - fDbTarget;
+                fNewFactor = dBtoLinear(fDbChange);
+            }
+
+            //apply post gain.
+            fNewFactor *= dBtoLinear(pMbcBandParams->gainPostDb);
+
+            if (fNewFactor < 0) {
+                fNewFactor = 0;
+            }
+
+            //apply to this band
+            for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+                mComplexTemp[k] *= fNewFactor;
+            }
+
+        } //end per band process
+
+    } //end MBC
+
+    //== EqPost
+    if (cb.mPostEqInUse && cb.mPostEqEnabled) {
+        for (size_t k = 0; k < maxBin; k++) {
+            mComplexTemp[k] *= cb.mPostEqFactorVector[k];
+        }
+    }
+
+    //##ifft directly to output.
+    Eigen::Map<Eigen::VectorXf> eOutput(&output[0], output.size());
+    mFftServer.inv(eOutput, mComplexTemp);
+
+    return mBlockSize;
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.h b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
new file mode 100644
index 0000000..9919142
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef DPFREQUENCY_H_
+#define DPFREQUENCY_H_
+
+#include <Eigen/Dense>
+#include <unsupported/Eigen/FFT>
+
+#include "RDsp.h"
+#include "SHCircularBuffer.h"
+
+#include "DPBase.h"
+
+
+namespace dp_fx {
+
+using FXBuffer = SHCircularBuffer<float>;
+
+class ChannelBuffer {
+public:
+    FXBuffer cBInput;   // Circular Buffer input
+    FXBuffer cBOutput;  // Circular Buffer output
+    FloatVec input;     // time domain temp vector for input
+    FloatVec output;    // time domain temp vector for output
+    FloatVec outTail;   // time domain temp vector for output tail (for overlap-add method)
+
+    //Current parameters
+    float inputGainDb;
+    struct BandParams {
+        bool enabled;
+        float freqCutoffHz;
+        size_t binStart;
+        size_t binStop;
+    };
+    struct EqBandParams : public BandParams {
+        float gainDb;
+    };
+    struct MbcBandParams : public BandParams {
+        float gainPreDb;
+        float gainPostDb;
+        float attackTimeMs;
+        float releaseTimeMs;
+        float ratio;
+        float thresholdDb;
+        float kneeWidthDb;
+        float noiseGateThresholdDb;
+        float expanderRatio;
+
+        //Historic values
+        float previousEnvelope;
+    };
+
+    bool mPreEqInUse;
+    bool mPreEqEnabled;
+    std::vector<EqBandParams> mPreEqBands;
+
+    bool mMbcInUse;
+    bool mMbcEnabled;
+    std::vector<MbcBandParams> mMbcBands;
+
+    bool mPostEqInUse;
+    bool mPostEqEnabled;
+    std::vector<EqBandParams> mPostEqBands;
+
+    bool mLimiterInUse;
+    bool mLimiterEnabled;
+    FloatVec mPreEqFactorVector; // temp pre-computed vector to shape spectrum at preEQ stage
+    FloatVec mPostEqFactorVector; // temp pre-computed vector to shape spectrum at postEQ stage
+
+    void initBuffers(unsigned int blockSize, unsigned int overlapSize, unsigned int halfFftSize,
+            unsigned int samplingRate, DPBase &dpBase);
+    void computeBinStartStop(BandParams &bp, size_t binStart);
+private:
+    unsigned int mSamplingRate;
+    unsigned int mBlockSize;
+
+};
+
+class DPFrequency : public DPBase {
+public:
+    virtual size_t processSamples(const float *in, float *out, size_t samples);
+    virtual void reset();
+    void configure(size_t blockSize, size_t overlapSize, size_t samplingRate);
+    static size_t getMinBockSize();
+    static size_t getMaxBockSize();
+
+private:
+    void updateParameters(ChannelBuffer &cb, int channelIndex);
+    size_t processMono(ChannelBuffer &cb);
+    size_t processOneVector(FloatVec &output, FloatVec &input, ChannelBuffer &cb);
+
+    size_t mBlockSize;
+    size_t mHalfFFTSize;
+    size_t mOverlapSize;
+    size_t mSamplingRate;
+
+    std::vector<ChannelBuffer> mChannelBuffers;
+
+    //dsp
+    FloatVec mVWindow;  //window class.
+    Eigen::VectorXcf mComplexTemp;
+    Eigen::FFT<float> mFftServer;
+};
+
+} //namespace dp_fx
+
+#endif  // DPFREQUENCY_H_
diff --git a/media/libeffects/dynamicsproc/dsp/RDsp.h b/media/libeffects/dynamicsproc/dsp/RDsp.h
new file mode 100644
index 0000000..1048442
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/RDsp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RDSP_H
+#define RDSP_H
+
+#include <complex>
+#include <log/log.h>
+#include <vector>
+using FloatVec = std::vector<float>;
+using ComplexVec  = std::vector<std::complex<float>>;
+
+// =======
+// DSP window creation
+// =======
+
+#define TWOPI (M_PI * 2)
+
+enum rdsp_window_type {
+    RDSP_WINDOW_RECTANGULAR,
+    RDSP_WINDOW_TRIANGULAR,
+    RDSP_WINDOW_TRIANGULAR_FLAT_TOP,
+    RDSP_WINDOW_HAMMING,
+    RDSP_WINDOW_HAMMING_FLAT_TOP,
+    RDSP_WINDOW_HANNING,
+    RDSP_WINDOW_HANNING_FLAT_TOP,
+};
+
+template <typename T>
+static void fillRectangular(T &v) {
+    const size_t size = v.size();
+    for (size_t i = 0; i < size; i++) {
+        v[i] = 1.0;
+    }
+} //rectangular
+
+template <typename T>
+static void fillTriangular(T &v, size_t overlap) {
+    const size_t size = v.size();
+    //ramp up
+    size_t i = 0;
+    if (overlap > 0) {
+        for (; i < overlap; i++) {
+            v[i] = (2.0 * i + 1) / (2 * overlap);
+        }
+    }
+
+    //flat top
+    for (; i < size - overlap; i++) {
+        v[i] = 1.0;
+    }
+
+    //ramp down
+    if (overlap > 0) {
+        for (; i < size; i++) {
+            v[i] = (2.0 * (size - i) - 1) / (2 * overlap);
+        }
+    }
+} //triangular
+
+template <typename T>
+static void fillHamming(T &v, size_t overlap) {
+    const size_t size = v.size();
+    const size_t twoOverlap = 2 * overlap;
+    size_t i = 0;
+    if (overlap > 0) {
+        for (; i < overlap; i++) {
+            v[i] = 0.54 - 0.46 * cos(TWOPI * i /(twoOverlap - 1));
+        }
+    }
+
+    //flat top
+    for (; i < size - overlap; i++) {
+        v[i] = 1.0;
+    }
+
+    //ramp down
+    if (overlap > 0) {
+        for (; i < size; i++) {
+            int k = i - ((int)size - 2 * overlap);
+            v[i] = 0.54 - 0.46 * cos(TWOPI * k / (twoOverlap - 1));
+        }
+    }
+} //hamming
+
+template <typename T>
+static void fillHanning(T &v, size_t overlap) {
+    const size_t size = v.size();
+    const size_t twoOverlap = 2 * overlap;
+    //ramp up
+    size_t i = 0;
+    if (overlap > 0) {
+        for (; i < overlap; i++) {
+            v[i] = 0.5 * (1.0 - cos(TWOPI * i / (twoOverlap - 1)));
+        }
+    }
+
+    //flat top
+    for (; i < size - overlap; i++) {
+        v[i] = 1.0;
+    }
+
+    //ramp down
+    if (overlap > 0) {
+        for (; i < size; i++) {
+            int k = i - ((int)size - 2 * overlap);
+            v[i] = 0.5 * (1.0 - cos(TWOPI * k / (twoOverlap - 1)));
+        }
+    }
+}
+
+template <typename T>
+static void fill_window(T &v, int type, size_t size, size_t overlap) {
+    if (overlap > size / 2) {
+        overlap = size / 2;
+    }
+    v.resize(size);
+
+    switch (type) {
+    case RDSP_WINDOW_RECTANGULAR:
+        fillRectangular(v);
+        break;
+    case RDSP_WINDOW_TRIANGULAR:
+        fillTriangular(v, size / 2);
+        break;
+    case RDSP_WINDOW_TRIANGULAR_FLAT_TOP:
+        fillTriangular(v, overlap);
+        break;
+    case RDSP_WINDOW_HAMMING:
+        fillHamming(v, size / 2);
+        break;
+    case RDSP_WINDOW_HAMMING_FLAT_TOP:
+        fillHamming(v, overlap);
+        break;
+    case RDSP_WINDOW_HANNING:
+        fillHanning(v, size / 2);
+        break;
+    case RDSP_WINDOW_HANNING_FLAT_TOP:
+        fillHanning(v, overlap);
+        break;
+    default:
+        ALOGE("Error: unknown window type %d", type);
+    }
+}
+
+//};
+#endif //RDSP_H
diff --git a/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
new file mode 100644
index 0000000..c139cd8
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHCIRCULARBUFFER_H
+#define SHCIRCULARBUFFER_H
+
+#include <log/log.h>
+#include <vector>
+
+template <class T>
+class SHCircularBuffer {
+
+public:
+    SHCircularBuffer() : mReadIndex(0), mWriteIndex(0), mReadAvailable(0) {
+    }
+
+    explicit SHCircularBuffer(size_t maxSize) {
+        resize(maxSize);
+    }
+    void resize(size_t maxSize) {
+        mBuffer.resize(maxSize);
+        mReadIndex = 0;
+        mWriteIndex = 0;
+        mReadAvailable = 0;
+    }
+    inline void write(T value) {
+        if (availableToWrite()) {
+            mBuffer[mWriteIndex++] = value;
+            if (mWriteIndex >= getSize()) {
+                mWriteIndex = 0;
+            }
+            mReadAvailable++;
+        } else {
+            ALOGE("Error: SHCircularBuffer no space to write. allocated size %zu ", getSize());
+        }
+    }
+    inline T read() {
+        T value = T();
+        if (availableToRead()) {
+            value = mBuffer[mReadIndex++];
+            if (mReadIndex >= getSize()) {
+                mReadIndex = 0;
+            }
+            mReadAvailable--;
+        } else {
+            ALOGW("Warning: SHCircularBuffer no data available to read. Default value returned");
+        }
+        return value;
+    }
+    inline size_t availableToRead() const {
+        return mReadAvailable;
+    }
+    inline size_t availableToWrite() const {
+        return getSize() - mReadAvailable;
+    }
+    inline size_t getSize() const {
+        return mBuffer.size();
+    }
+
+private:
+    std::vector<T> mBuffer;
+    size_t mReadIndex;
+    size_t mWriteIndex;
+    size_t mReadAvailable;
+};
+
+
+#endif //SHCIRCULARBUFFER_H
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 03700bf..a3db754 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -333,6 +333,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
     MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
     MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
+    MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANT),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 3511253..b41da80 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -66,6 +66,8 @@
     METADATA_KEY_IMAGE_HEIGHT    = 30,
     METADATA_KEY_IMAGE_ROTATION  = 31,
     METADATA_KEY_VIDEO_FRAME_COUNT  = 32,
+    METADATA_KEY_EXIF_OFFSET     = 33,
+    METADATA_KEY_EXIF_LENGTH     = 34,
 
     // Add more here...
 };
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index c4dbf42..2335c5a 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -59,6 +59,7 @@
     MEDIA_SUBTITLE_DATA     = 201,
     MEDIA_META_DATA         = 202,
     MEDIA_DRM_INFO          = 210,
+    MEDIA_TIME_DISCONTINUITY = 211,
     MEDIA_AUDIO_ROUTING_CHANGED = 10000,
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index bd83c6d..14ffb1d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -220,8 +220,11 @@
     mUID = uid;
 }
 
-void NuPlayer::setDriver(const wp<NuPlayerDriver> &driver) {
+void NuPlayer::init(const wp<NuPlayerDriver> &driver) {
     mDriver = driver;
+
+    sp<AMessage> notify = new AMessage(kWhatMediaClockNotify, this);
+    mMediaClock->setNotificationMessage(notify);
 }
 
 void NuPlayer::setDataSourceAsync(const sp<IStreamSource> &source) {
@@ -1425,6 +1428,24 @@
             break;
         }
 
+        case kWhatMediaClockNotify:
+        {
+            ALOGV("kWhatMediaClockNotify");
+            int64_t anchorMediaUs, anchorRealUs;
+            float playbackRate;
+            CHECK(msg->findInt64("anchor-media-us", &anchorMediaUs));
+            CHECK(msg->findInt64("anchor-real-us", &anchorRealUs));
+            CHECK(msg->findFloat("playback-rate", &playbackRate));
+
+            Parcel in;
+            in.writeInt64(anchorMediaUs);
+            in.writeInt64(anchorRealUs);
+            in.writeFloat(playbackRate);
+
+            notifyListener(MEDIA_TIME_DISCONTINUITY, 0, 0, &in);
+            break;
+        }
+
         default:
             TRESPASS();
             break;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 9481234..3a7ef4e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -39,7 +39,7 @@
 
     void setUID(uid_t uid);
 
-    void setDriver(const wp<NuPlayerDriver> &driver);
+    void init(const wp<NuPlayerDriver> &driver);
 
     void setDataSourceAsync(const sp<IStreamSource> &source);
 
@@ -158,6 +158,7 @@
         kWhatSetBufferingSettings       = 'sBuS',
         kWhatPrepareDrm                 = 'pDrm',
         kWhatReleaseDrm                 = 'rDrm',
+        kWhatMediaClockNotify           = 'mckN',
     };
 
     wp<NuPlayerDriver> mDriver;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 731fdba..63c887b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -104,7 +104,7 @@
 
     mLooper->registerHandler(mPlayer);
 
-    mPlayer->setDriver(this);
+    mPlayer->init(this);
 }
 
 NuPlayerDriver::~NuPlayerDriver() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index cc7f688..a762e76 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1617,14 +1617,7 @@
             // internal buffer before resuming playback.
             // FIXME: this is ignored after flush().
             mAudioSink->stop();
-            if (mPaused) {
-                // Race condition: if renderer is paused and audio sink is stopped,
-                // we need to make sure that the audio track buffer fully drains
-                // before delivering data.
-                // FIXME: remove this if we can detect if stop() is complete.
-                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
-                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
-            } else {
+            if (!mPaused) {
                 mAudioSink->start();
             }
             mNumFramesWritten = 0;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a8c6d15..3bbba49 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3291,6 +3291,22 @@
         return err;
     }
 
+    if (compressionFormat == OMX_VIDEO_CodingHEVC) {
+        int32_t profile;
+        if (msg->findInt32("profile", &profile)) {
+            // verify if Main10 profile is supported at all, and fail
+            // immediately if it's not supported.
+            if (profile == OMX_VIDEO_HEVCProfileMain10 ||
+                profile == OMX_VIDEO_HEVCProfileMain10HDR10) {
+                err = verifySupportForProfileAndLevel(
+                        kPortIndexInput, profile, 0);
+                if (err != OK) {
+                    return err;
+                }
+            }
+        }
+    }
+
     if (compressionFormat == OMX_VIDEO_CodingVP9) {
         OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
         InitOMXParams(&params);
@@ -4059,7 +4075,7 @@
             return INVALID_OPERATION;
         }
 
-        err = verifySupportForProfileAndLevel(profile, level);
+        err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
 
         if (err != OK) {
             return err;
@@ -4131,7 +4147,7 @@
             return INVALID_OPERATION;
         }
 
-        err = verifySupportForProfileAndLevel(profile, level);
+        err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
 
         if (err != OK) {
             return err;
@@ -4266,7 +4282,7 @@
             return INVALID_OPERATION;
         }
 
-        err = verifySupportForProfileAndLevel(profile, level);
+        err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
 
         if (err != OK) {
             return err;
@@ -4280,7 +4296,7 @@
         // Use largest supported profile for AVC recording if profile is not specified.
         for (OMX_VIDEO_AVCPROFILETYPE profile : {
                 OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCProfileMain }) {
-            if (verifySupportForProfileAndLevel(profile, 0) == OK) {
+            if (verifySupportForProfileAndLevel(kPortIndexOutput, profile, 0) == OK) {
                 h264type.eProfile = profile;
                 break;
             }
@@ -4457,7 +4473,7 @@
             return INVALID_OPERATION;
         }
 
-        err = verifySupportForProfileAndLevel(profile, level);
+        err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
         if (err != OK) {
             return err;
         }
@@ -4602,10 +4618,10 @@
 }
 
 status_t ACodec::verifySupportForProfileAndLevel(
-        int32_t profile, int32_t level) {
+        OMX_U32 portIndex, int32_t profile, int32_t level) {
     OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
     InitOMXParams(&params);
-    params.nPortIndex = kPortIndexOutput;
+    params.nPortIndex = portIndex;
 
     for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         params.nProfileIndex = index;
@@ -4906,8 +4922,8 @@
                             rect.nHeight = videoDef->nFrameHeight;
                         }
 
-                        if (rect.nLeft < 0 ||
-                            rect.nTop < 0 ||
+                        if (rect.nLeft < 0 || rect.nTop < 0 ||
+                            rect.nWidth == 0 || rect.nHeight == 0 ||
                             rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
                             rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
                             ALOGE("Wrong cropped rect (%d, %d, %u, %u) vs. frame (%u, %u)",
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 15843a2..41dbfd4 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -70,11 +70,9 @@
         it->mNotify->post();
         it = mTimers.erase(it);
     }
-    mAnchorTimeMediaUs = -1;
-    mAnchorTimeRealUs = -1;
     mMaxTimeMediaUs = INT64_MAX;
     mStartingTimeMediaUs = -1;
-    mPlaybackRate = 1.0;
+    updateAnchorTimesAndPlaybackRate_l(-1, -1, 1.0);
     ++mGeneration;
 }
 
@@ -85,8 +83,7 @@
 
 void MediaClock::clearAnchor() {
     Mutex::Autolock autoLock(mLock);
-    mAnchorTimeMediaUs = -1;
-    mAnchorTimeRealUs = -1;
+    updateAnchorTimesAndPlaybackRate_l(-1, -1, mPlaybackRate);
 }
 
 void MediaClock::updateAnchor(
@@ -118,8 +115,7 @@
             return;
         }
     }
-    mAnchorTimeRealUs = nowUs;
-    mAnchorTimeMediaUs = nowMediaUs;
+    updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, mPlaybackRate);
 
     ++mGeneration;
     processTimers_l();
@@ -139,13 +135,12 @@
     }
 
     int64_t nowUs = ALooper::GetNowUs();
-    mAnchorTimeMediaUs += (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
-    if (mAnchorTimeMediaUs < 0) {
+    int64_t nowMediaUs = mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
+    if (nowMediaUs < 0) {
         ALOGW("setRate: anchor time should not be negative, set to 0.");
-        mAnchorTimeMediaUs = 0;
+        nowMediaUs = 0;
     }
-    mAnchorTimeRealUs = nowUs;
-    mPlaybackRate = rate;
+    updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, rate);
 
     if (rate > 0.0) {
         ++mGeneration;
@@ -313,4 +308,31 @@
     msg->post(nextLapseRealUs);
 }
 
+void MediaClock::updateAnchorTimesAndPlaybackRate_l(int64_t anchorTimeMediaUs,
+        int64_t anchorTimeRealUs, float playbackRate) {
+    if (mAnchorTimeMediaUs != anchorTimeMediaUs
+            || mAnchorTimeRealUs != anchorTimeRealUs
+            || mPlaybackRate != playbackRate) {
+        mAnchorTimeMediaUs = anchorTimeMediaUs;
+        mAnchorTimeRealUs = anchorTimeRealUs;
+        mPlaybackRate = playbackRate;
+        notifyDiscontinuity_l();
+    }
+}
+
+void MediaClock::setNotificationMessage(const sp<AMessage> &msg) {
+    Mutex::Autolock autoLock(mLock);
+    mNotify = msg;
+}
+
+void MediaClock::notifyDiscontinuity_l() {
+    if (mNotify != nullptr) {
+        sp<AMessage> msg = mNotify->dup();
+        msg->setInt64("anchor-media-us", mAnchorTimeMediaUs);
+        msg->setInt64("anchor-real-us", mAnchorTimeRealUs);
+        msg->setFloat("playback-rate", mPlaybackRate);
+        msg->post();
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index b874df4..f25d1f1 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -82,7 +82,6 @@
 
 // NB: These are not yet exposed as public Java API constants.
 static const char *kCodecCrypto = "android.media.mediacodec.crypto";   /* 0,1 */
-static const char *kCodecBytesIn = "android.media.mediacodec.bytesin";  /* 0..n */
 static const char *kCodecProfile = "android.media.mediacodec.profile";  /* 0..n */
 static const char *kCodecLevel = "android.media.mediacodec.level";  /* 0..n */
 static const char *kCodecMaxWidth = "android.media.mediacodec.maxwidth";  /* 0..n */
@@ -3202,10 +3201,6 @@
         info->mData.clear();
 
         statsBufferSent(timeUs);
-
-        if (mAnalyticsItem != NULL) {
-            mAnalyticsItem->addInt64(kCodecBytesIn, size);
-        }
     }
 
     return err;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 179e0e6..5ae5644 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -438,6 +438,15 @@
         mMetaData.add(METADATA_KEY_CAPTURE_FRAMERATE, String8(tmp));
     }
 
+    int64_t exifOffset, exifSize;
+    if (meta->findInt64(kKeyExifOffset, &exifOffset)
+     && meta->findInt64(kKeyExifSize, &exifSize)) {
+        sprintf(tmp, "%lld", (long long)exifOffset);
+        mMetaData.add(METADATA_KEY_EXIF_OFFSET, String8(tmp));
+        sprintf(tmp, "%lld", (long long)exifSize);
+        mMetaData.add(METADATA_KEY_EXIF_LENGTH, String8(tmp));
+    }
+
     bool hasAudio = false;
     bool hasVideo = false;
     int32_t videoWidth = -1;
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index eae73fc..1b38852 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -818,7 +818,8 @@
     uint16_t *dst_ptr = (uint16_t *)dst.mBits
         + dst.mCropTop * dst.mWidth + dst.mCropLeft;
 
-    const uint8_t *src_y = (const uint8_t *)src.mBits;
+    const uint8_t *src_y =
+        (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
 
     const uint8_t *src_u =
         (const uint8_t *)src_y + src.mWidth * (src.mHeight - src.mCropTop / 2);
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index f55de64..3b84018 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -944,4 +944,47 @@
     return mItems[index].mName;
 }
 
+status_t AMessage::setEntryNameAt(size_t index, const char *name) {
+    if (index >= mNumItems) {
+        return BAD_INDEX;
+    }
+    if (name == nullptr) {
+        return BAD_VALUE;
+    }
+    if (!strcmp(name, mItems[index].mName)) {
+        return OK; // name has not changed
+    }
+    size_t len = strlen(name);
+    if (findItemIndex(name, len) < mNumItems) {
+        return ALREADY_EXISTS;
+    }
+    delete[] mItems[index].mName;
+    mItems[index].mName = nullptr;
+    mItems[index].setName(name, len);
+    return OK;
+}
+
+status_t AMessage::removeEntryAt(size_t index) {
+    if (index >= mNumItems) {
+        return BAD_INDEX;
+    }
+    // delete entry data and objects
+    --mNumItems;
+    delete[] mItems[index].mName;
+    mItems[index].mName = nullptr;
+    freeItemValue(&mItems[index]);
+
+    // swap entry with last entry and clear last entry's data
+    if (index < mNumItems) {
+        mItems[index] = mItems[mNumItems];
+        mItems[mNumItems].mName = nullptr;
+        mItems[mNumItems].mType = kTypeInt32;
+    }
+    return OK;
+}
+
+size_t AMessage::findEntryByName(const char *name) const {
+    return name == nullptr ? countEntries() : findItemIndex(name, strlen(name));
+}
+
 }  // namespace android
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index b343c16..f663542 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -24,10 +24,12 @@
     header_libs: [
         "libhardware_headers",
         "libstagefright_foundation_headers",
+        "media_plugin_headers",
     ],
 
     export_header_lib_headers: [
         "libstagefright_foundation_headers",
+        "media_plugin_headers",
     ],
 
     export_shared_lib_headers: [
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 8580eb5..d90a0de 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -183,6 +183,36 @@
     size_t countEntries() const;
     const char *getEntryNameAt(size_t index, Type *type) const;
 
+    /**
+     * Finds an entry by name and returns its index.
+     *
+     * \retval countEntries() if the entry is not found.
+     */
+    size_t findEntryByName(const char *name) const;
+
+    /**
+     * Sets the name of an entry based on index.
+     *
+     * \param index index of the entry
+     * \param name (new) name of the entry
+     *
+     * \retval OK the name was set successfully
+     * \retval BAD_INDEX invalid index
+     * \retval BAD_VALUE name is invalid (null)
+     * \retval ALREADY_EXISTS name is already used by another entry
+     */
+    status_t setEntryNameAt(size_t index, const char *name);
+
+    /**
+     * Removes an entry based on index.
+     *
+     * \param index index of the entry
+     *
+     * \retval OK the entry was removed successfully
+     * \retval BAD_INDEX invalid index
+     */
+    status_t removeEntryAt(size_t index);
+
 protected:
     virtual ~AMessage();
 
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 1a5304b..64caeed 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -493,7 +493,8 @@
     status_t setupHEVCEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
     status_t setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
 
-    status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
+    status_t verifySupportForProfileAndLevel(
+            OMX_U32 portIndex, int32_t profile, int32_t level);
 
     status_t configureImageGrid(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
     status_t configureBitrate(
diff --git a/media/libstagefright/include/media/stagefright/MediaClock.h b/media/libstagefright/include/media/stagefright/MediaClock.h
index 7511913..3ddeb82 100644
--- a/media/libstagefright/include/media/stagefright/MediaClock.h
+++ b/media/libstagefright/include/media/stagefright/MediaClock.h
@@ -66,6 +66,8 @@
     // mediaTimeUs + (adjustRealUs / playbackRate)
     void addTimer(const sp<AMessage> &notify, int64_t mediaTimeUs, int64_t adjustRealUs = 0);
 
+    void setNotificationMessage(const sp<AMessage> &msg);
+
     void reset();
 
 protected:
@@ -92,6 +94,11 @@
 
     void processTimers_l();
 
+    void updateAnchorTimesAndPlaybackRate_l(
+            int64_t anchorTimeMediaUs, int64_t anchorTimeRealUs , float playbackRate);
+
+    void notifyDiscontinuity_l();
+
     sp<ALooper> mLooper;
     mutable Mutex mLock;
 
@@ -104,6 +111,7 @@
 
     int32_t mGeneration;
     std::list<Timer> mTimers;
+    sp<AMessage> mNotify;
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaClock);
 };
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d8c41d2..28524b0 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -49,10 +49,8 @@
     AMEDIAFORMAT_KEY_DURATION; # var
     AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
     AMEDIAFORMAT_KEY_FRAME_RATE; # var
-    AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
-    AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
     AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
-    AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
     AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
     AMEDIAFORMAT_KEY_HEIGHT; # var
     AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
@@ -79,6 +77,8 @@
     AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_STRIDE; # var
     AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+    AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
     AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
     AMEDIAFORMAT_KEY_WIDTH; # var
     AMediaCodecActionCode_isRecoverable; # introduced=28
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ea06b6c..b38d37f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -753,8 +753,8 @@
         output.notificationFrameCount = input.notificationFrameCount;
         output.flags = input.flags;
 
-        track = thread->createTrack_l(client, streamType, &output.sampleRate, input.config.format,
-                                      input.config.channel_mask,
+        track = thread->createTrack_l(client, streamType, input.attr, &output.sampleRate,
+                                      input.config.format, input.config.channel_mask,
                                       &output.frameCount, &output.notificationFrameCount,
                                       input.notificationsPerBuffer, input.speed,
                                       input.sharedBuffer, sessionId, &output.flags,
@@ -1673,7 +1673,7 @@
         output.frameCount = input.frameCount;
         output.notificationFrameCount = input.notificationFrameCount;
 
-        recordTrack = thread->createRecordTrack_l(client, &output.sampleRate,
+        recordTrack = thread->createRecordTrack_l(client, input.attr, &output.sampleRate,
                                                   input.config.format, input.config.channel_mask,
                                                   &output.frameCount, sessionId,
                                                   &output.notificationFrameCount,
@@ -1962,39 +1962,10 @@
 
 status_t AudioFlinger::getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
 {
-    // Fake data
-    size_t fakeNum = 2;
-    audio_devices_t fakeTypes[] = { AUDIO_DEVICE_IN_BUILTIN_MIC, AUDIO_DEVICE_IN_BACK_MIC };
-    for (size_t i = 0; i < fakeNum; i++) {
-        struct audio_microphone_characteristic_t characteristics;
-        sprintf(characteristics.device_id, "microphone:%zu", i);
-        characteristics.device = fakeTypes[i];
-        sprintf(characteristics.address, "");
-        characteristics.location = AUDIO_MICROPHONE_LOCATION_MAINBODY;
-        characteristics.group = 0;
-        characteristics.index_in_the_group = i;
-        characteristics.sensitivity = 1.0f;
-        characteristics.max_spl = 100.0f;
-        characteristics.min_spl = 0.0f;
-        characteristics.directionality = AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
-        characteristics.num_frequency_responses = 5 - i;
-        for (size_t j = 0; j < characteristics.num_frequency_responses; j++) {
-            characteristics.frequency_responses[0][j] = 100.0f - j;
-            characteristics.frequency_responses[1][j] = 100.0f + j;
-        }
-        for (size_t j = 0; j < AUDIO_CHANNEL_COUNT_MAX; j++) {
-            characteristics.channel_mapping[j] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
-        }
-        characteristics.geometric_location.x = 0.1f;
-        characteristics.geometric_location.y = 0.2f;
-        characteristics.geometric_location.z = 0.3f;
-        characteristics.orientation.x = 0.0f;
-        characteristics.orientation.y = 1.0f;
-        characteristics.orientation.z = 0.0f;
-        media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(characteristics);
-        microphones->push_back(microphoneInfo);
-    }
-    return NO_ERROR;
+    AutoMutex lock(mHardwareLock);
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
+    status_t status = dev->getMicrophones(microphones);
+    return status;
 }
 
 // setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index 366a164..a210a1b 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -23,6 +23,7 @@
 class MmapTrack : public TrackBase {
 public:
                 MmapTrack(ThreadBase *thread,
+                            const audio_attributes_t& attr,
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 6454be5..ea01a25 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -25,6 +25,7 @@
                         Track(  PlaybackThread *thread,
                                 const sp<Client>& client,
                                 audio_stream_type_t streamType,
+                                const audio_attributes_t& attr,
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 1733ef5..2b993ee 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -24,6 +24,7 @@
 public:
                         RecordTrack(RecordThread *thread,
                                 const sp<Client>& client,
+                                const audio_attributes_t& attr,
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 62e9fe7..b5b50f8 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -57,6 +57,7 @@
 #include <powermanager/PowerManager.h>
 
 #include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
 
 #include "AudioFlinger.h"
 #include "FastMixer.h"
@@ -1554,6 +1555,7 @@
     mActiveTracksGeneration++;
     mLatestActiveTrack = track;
     ++mBatteryCounter[track->uid()].second;
+    mHasChanged = true;
     return mActiveTracks.add(track);
 }
 
@@ -1568,6 +1570,7 @@
     mActiveTracksGeneration++;
     --mBatteryCounter[track->uid()].second;
     // mLatestActiveTrack is not cleared even if is the same as track.
+    mHasChanged = true;
     return index;
 }
 
@@ -1578,6 +1581,7 @@
         logTrack("clear", track);
     }
     mLastActiveTracksGeneration = mActiveTracksGeneration;
+    if (!mActiveTracks.empty()) { mHasChanged = true; }
     mActiveTracks.clear();
     mLatestActiveTrack.clear();
     mBatteryCounter.clear();
@@ -1615,6 +1619,13 @@
 }
 
 template <typename T>
+bool AudioFlinger::ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
+    const bool hasChanged = mHasChanged;
+    mHasChanged = false;
+    return hasChanged;
+}
+
+template <typename T>
 void AudioFlinger::ThreadBase::ActiveTracks<T>::logTrack(
         const char *funcName, const sp<T> &track) const {
     if (mLocalLog != nullptr) {
@@ -1847,6 +1858,7 @@
 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
         audio_stream_type_t streamType,
+        const audio_attributes_t& attr,
         uint32_t *pSampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
@@ -2125,7 +2137,7 @@
             }
         }
 
-        track = new Track(this, client, streamType, sampleRate, format,
+        track = new Track(this, client, streamType, attr, sampleRate, format,
                           channelMask, frameCount,
                           nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
                           sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
@@ -2609,6 +2621,24 @@
     }
 }
 
+void AudioFlinger::PlaybackThread::updateMetadata_l()
+{
+    // TODO: add volume support
+    if (mOutput == nullptr || mOutput->stream == nullptr ||
+            !mActiveTracks.readAndClearHasChanged()) {
+        return;
+    }
+    StreamOutHalInterface::SourceMetadata metadata;
+    for (const sp<Track> &track : mActiveTracks) {
+        // No track is invalid as this is called after prepareTrack_l in the same critical section
+        metadata.tracks.push_back({
+                .usage = track->attributes().usage,
+                .content_type = track->attributes().content_type,
+                .gain = 1,
+        });
+    }
+    mOutput->stream->updateSourceMetadata(metadata);
+}
 
 status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
 {
@@ -3306,6 +3336,8 @@
 
             mActiveTracks.updatePowerState(this);
 
+            updateMetadata_l();
+
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
             // or modified if an effect is created or deleted
@@ -6117,6 +6149,17 @@
     return true;
 }
 
+void AudioFlinger::DuplicatingThread::updateMetadata_l()
+{
+    // TODO: The duplicated track metadata are stored in other threads
+    // (accessible through mActiveTracks::OutputTrack::thread()::mActiveTracks::Track::attributes())
+    // but this information can be mutated at any time by the owning threads.
+    // Taking the lock of any other owning threads is no possible due to timing constrains.
+    // Similarly, the other threads can not push the metadatas in this thread as cross deadlock
+    // would be possible.
+    // A lock-free structure needs to be used to shared the metadata (maybe an atomic shared_ptr ?).
+}
+
 uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
 {
     return (mWaitTimeMs * 1000) / 2;
@@ -6444,6 +6487,8 @@
 
             mActiveTracks.updatePowerState(this);
 
+            updateMetadata_l();
+
             if (allStopped) {
                 standbyIfNotAlreadyInStandby();
             }
@@ -6808,6 +6853,7 @@
 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
+        const audio_attributes_t& attr,
         uint32_t *pSampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
@@ -6941,7 +6987,7 @@
     { // scope for mLock
         Mutex::Autolock _l(mLock);
 
-        track = new RecordTrack(this, client, sampleRate,
+        track = new RecordTrack(this, client, attr, sampleRate,
                       format, channelMask, frameCount,
                       nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, uid,
                       *flags, TrackBase::TYPE_DEFAULT, portId);
@@ -7129,42 +7175,25 @@
 {
     ALOGV("RecordThread::getActiveMicrophones");
     AutoMutex _l(mLock);
-    // Fake data
-    struct audio_microphone_characteristic_t characteristic;
-    sprintf(characteristic.device_id, "builtin_mic");
-    characteristic.device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-    sprintf(characteristic.address, "");
-    characteristic.location = AUDIO_MICROPHONE_LOCATION_MAINBODY;
-    characteristic.group = 0;
-    characteristic.index_in_the_group = 0;
-    characteristic.sensitivity = 1.0f;
-    characteristic.max_spl = 100.0f;
-    characteristic.min_spl = 0.0f;
-    characteristic.directionality = AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
-    characteristic.num_frequency_responses = 5;
-    for (size_t i = 0; i < characteristic.num_frequency_responses; i++) {
-        characteristic.frequency_responses[0][i] = 100.0f - i;
-        characteristic.frequency_responses[1][i] = 100.0f + i;
+    status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
+    return status;
+}
+
+void AudioFlinger::RecordThread::updateMetadata_l()
+{
+    if (mInput == nullptr || mInput->stream == nullptr ||
+            !mActiveTracks.readAndClearHasChanged()) {
+        return;
     }
-    for (size_t i = 0; i < AUDIO_CHANNEL_COUNT_MAX; i++) {
-        characteristic.channel_mapping[i] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+    StreamInHalInterface::SinkMetadata metadata;
+    for (const sp<RecordTrack> &track : mActiveTracks) {
+        // No track is invalid as this is called after prepareTrack_l in the same critical section
+        metadata.tracks.push_back({
+                .source = track->attributes().source,
+                .gain = 1, // capture tracks do not have volumes
+        });
     }
-    audio_microphone_channel_mapping_t channel_mappings[] = {
-        AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT,
-        AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED,
-    };
-    for (size_t i = 0; i < mChannelCount; i++) {
-        characteristic.channel_mapping[i] = channel_mappings[i % 2];
-    }
-    characteristic.geometric_location.x = 0.1f;
-    characteristic.geometric_location.y = 0.2f;
-    characteristic.geometric_location.z = 0.3f;
-    characteristic.orientation.x = 0.0f;
-    characteristic.orientation.y = 1.0f;
-    characteristic.orientation.z = 0.0f;
-    media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(characteristic);
-    activeMicrophones->push_back(microphoneInfo);
-    return NO_ERROR;
+    mInput->stream->updateSinkMetadata(metadata);
 }
 
 // destroyTrack_l() must be called with ThreadBase::mLock held
@@ -7994,7 +8023,8 @@
         return PERMISSION_DENIED;
     }
 
-    sp<MmapTrack> track = new MmapTrack(this, mSampleRate, mFormat, mChannelMask, mSessionId,
+    // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
+    sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
                                         client.clientUid, client.clientPid, portId);
 
     mActiveTracks.add(track);
@@ -8130,6 +8160,8 @@
 
         mActiveTracks.updatePowerState(this);
 
+        updateMetadata_l();
+
         lockEffectChains_l(effectChains);
         for (size_t i = 0; i < effectChains.size(); i ++) {
             effectChains[i]->process_l();
@@ -8677,6 +8709,24 @@
     }
 }
 
+void AudioFlinger::MmapPlaybackThread::updateMetadata_l()
+{
+    if (mOutput == nullptr || mOutput->stream == nullptr ||
+            !mActiveTracks.readAndClearHasChanged()) {
+        return;
+    }
+    StreamOutHalInterface::SourceMetadata metadata;
+    for (const sp<MmapTrack> &track : mActiveTracks) {
+        // No track is invalid as this is called after prepareTrack_l in the same critical section
+        metadata.tracks.push_back({
+                .usage = track->attributes().usage,
+                .content_type = track->attributes().content_type,
+                .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
+        });
+    }
+    mOutput->stream->updateSourceMetadata(metadata);
+}
+
 void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
 {
     if (!mMasterMute) {
@@ -8721,4 +8771,22 @@
     mInput = NULL;
     return input;
 }
+
+void AudioFlinger::MmapCaptureThread::updateMetadata_l()
+{
+    if (mInput == nullptr || mInput->stream == nullptr ||
+            !mActiveTracks.readAndClearHasChanged()) {
+        return;
+    }
+    StreamInHalInterface::SinkMetadata metadata;
+    for (const sp<MmapTrack> &track : mActiveTracks) {
+        // No track is invalid as this is called after prepareTrack_l in the same critical section
+        metadata.tracks.push_back({
+                .source = track->attributes().source,
+                .gain = 1, // capture tracks do not have volumes
+        });
+    }
+    mInput->stream->updateSinkMetadata(metadata);
+}
+
 } // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7cd46a7..bb81224 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -425,6 +425,9 @@
                 // check if some effects must be suspended when an effect chain is added
                 void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
 
+                // sends the metadata of the active tracks to the HAL
+    virtual     void        updateMetadata_l() = 0;
+
                 String16 getWakeLockTag();
 
     virtual     void        preExit() { }
@@ -563,6 +566,10 @@
                     // periodically called in the threadLoop() to update power state uids.
                     void            updatePowerState(sp<ThreadBase> thread, bool force = false);
 
+                    /** @return true if the active tracks have changed since the last time
+                     *          this function was called or the vector was created. */
+                    bool            readAndClearHasChanged();
+
                 private:
                     void            logTrack(const char *funcName, const sp<T> &track) const;
 
@@ -581,6 +588,8 @@
                     int                 mLastActiveTracksGeneration;
                     wp<T>               mLatestActiveTrack; // latest track added to ActiveTracks
                     SimpleLog * const   mLocalLog;
+                    // If the active tracks have changed since last call to readAndClearHasChanged
+                    bool                mHasChanged = false;
                 };
 
                 SimpleLog mLocalLog;
@@ -706,6 +715,7 @@
                 sp<Track>   createTrack_l(
                                 const sp<AudioFlinger::Client>& client,
                                 audio_stream_type_t streamType,
+                                const audio_attributes_t& attr,
                                 uint32_t *sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
@@ -917,6 +927,7 @@
     void        removeTrack_l(const sp<Track>& track);
 
     void        readOutputParameters_l();
+    void        updateMetadata_l() override;
 
     virtual void dumpInternals(int fd, const Vector<String16>& args);
     void        dumpTracks(int fd, const Vector<String16>& args);
@@ -1275,6 +1286,8 @@
                 void        addOutputTrack(MixerThread* thread);
                 void        removeOutputTrack(MixerThread* thread);
                 uint32_t    waitTimeMs() const { return mWaitTimeMs; }
+
+                void        updateMetadata_l() override;
 protected:
     virtual     uint32_t    activeSleepTimeUs() const;
 
@@ -1387,6 +1400,7 @@
 
             sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                     const sp<AudioFlinger::Client>& client,
+                    const audio_attributes_t& attr,
                     uint32_t *pSampleRate,
                     audio_format_t format,
                     audio_channel_mask_t channelMask,
@@ -1461,6 +1475,8 @@
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+            void        updateMetadata_l() override;
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
@@ -1658,6 +1674,8 @@
 
     virtual     bool        isOutput() const override { return true; }
 
+                void        updateMetadata_l() override;
+
 protected:
 
                 audio_stream_type_t         mStreamType;
@@ -1684,6 +1702,8 @@
 
     virtual     bool           isOutput() const override { return false; }
 
+                void           updateMetadata_l() override;
+
 protected:
 
                 AudioStreamIn*  mInput;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index a7e966f..ccfb69f 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -61,6 +61,7 @@
 
                         TrackBase(ThreadBase *thread,
                                 const sp<Client>& client,
+                                const audio_attributes_t& mAttr,
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
@@ -97,6 +98,7 @@
     virtual void        invalidate() { mIsInvalid = true; }
             bool        isInvalid() const { return mIsInvalid; }
 
+    audio_attributes_t  attributes() const { return mAttr; }
 
 protected:
     DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -188,6 +190,7 @@
     size_t              mBufferSize; // size of mBuffer in bytes
     // we don't really need a lock for these
     track_state         mState;
+    const audio_attributes_t mAttr;
     const uint32_t      mSampleRate;    // initial sample rate only; for tracks which
                         // support dynamic rates, the current value is in control block
     const audio_format_t mFormat;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9b93939..236412b 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -63,6 +63,7 @@
 AudioFlinger::ThreadBase::TrackBase::TrackBase(
             ThreadBase *thread,
             const sp<Client>& client,
+            const audio_attributes_t& attr,
             uint32_t sampleRate,
             audio_format_t format,
             audio_channel_mask_t channelMask,
@@ -81,6 +82,7 @@
         mCblk(NULL),
         // mBuffer, mBufferSize
         mState(IDLE),
+        mAttr(attr),
         mSampleRate(sampleRate),
         mFormat(format),
         mChannelMask(channelMask),
@@ -372,6 +374,7 @@
             PlaybackThread *thread,
             const sp<Client>& client,
             audio_stream_type_t streamType,
+            const audio_attributes_t& attr,
             uint32_t sampleRate,
             audio_format_t format,
             audio_channel_mask_t channelMask,
@@ -384,7 +387,7 @@
             audio_output_flags_t flags,
             track_type type,
             audio_port_handle_t portId)
-    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+    :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
                   (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
                   (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
                   sessionId, uid, true /*isOut*/,
@@ -761,6 +764,12 @@
                 mState = state;
             }
         }
+
+        if (status == NO_ERROR || status == ALREADY_EXISTS) {
+            // for streaming tracks, remove the buffer read stop limit.
+            mAudioTrackServerProxy->start();
+        }
+
         // track was already in the active list, not a problem
         if (status == ALREADY_EXISTS) {
             status = NO_ERROR;
@@ -1259,6 +1268,7 @@
             size_t frameCount,
             uid_t uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
+              audio_attributes_t{} /* currently unused for output track */,
               sampleRate, format, channelMask, frameCount,
               nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
               AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
@@ -1461,6 +1471,7 @@
                                                      size_t bufferSize,
                                                      audio_output_flags_t flags)
     :   Track(playbackThread, NULL, streamType,
+              audio_attributes_t{} /* currently unused for patch track */,
               sampleRate, format, channelMask, frameCount,
               buffer, bufferSize, nullptr /* sharedBuffer */,
               AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
@@ -1595,6 +1606,7 @@
 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
             RecordThread *thread,
             const sp<Client>& client,
+            const audio_attributes_t& attr,
             uint32_t sampleRate,
             audio_format_t format,
             audio_channel_mask_t channelMask,
@@ -1606,7 +1618,7 @@
             audio_input_flags_t flags,
             track_type type,
             audio_port_handle_t portId)
-    :   TrackBase(thread, client, sampleRate, format,
+    :   TrackBase(thread, client, attr, sampleRate, format,
                   channelMask, frameCount, buffer, bufferSize, sessionId, uid, false /*isOut*/,
                   (type == TYPE_DEFAULT) ?
                           ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
@@ -1821,7 +1833,9 @@
                                                      void *buffer,
                                                      size_t bufferSize,
                                                      audio_input_flags_t flags)
-    :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
+    :   RecordTrack(recordThread, NULL,
+                audio_attributes_t{} /* currently unused for patch track */,
+                sampleRate, format, channelMask, frameCount,
                 buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
                 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
 {
@@ -1882,6 +1896,7 @@
 
 
 AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
+        const audio_attributes_t& attr,
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
@@ -1889,7 +1904,7 @@
         uid_t uid,
         pid_t pid,
         audio_port_handle_t portId)
-    :   TrackBase(thread, NULL, sampleRate, format,
+    :   TrackBase(thread, NULL, attr, sampleRate, format,
                   channelMask, (size_t)0 /* frameCount */,
                   nullptr /* buffer */, (size_t)0 /* bufferSize */,
                   sessionId, uid, false /* isOut */,
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 4862684..fc012a2 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -38,6 +38,7 @@
     DEVICE_CATEGORY_SPEAKER,
     DEVICE_CATEGORY_EARPIECE,
     DEVICE_CATEGORY_EXT_MEDIA,
+    DEVICE_CATEGORY_HEARING_AID,
     DEVICE_CATEGORY_CNT
 };
 
@@ -125,8 +126,9 @@
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
         case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
         case AUDIO_DEVICE_OUT_USB_HEADSET:
-        case AUDIO_DEVICE_OUT_HEARING_AID:
             return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_HEARING_AID:
+            return DEVICE_CATEGORY_HEARING_AID;
         case AUDIO_DEVICE_OUT_LINE:
         case AUDIO_DEVICE_OUT_AUX_DIGITAL:
         case AUDIO_DEVICE_OUT_USB_DEVICE:
diff --git a/services/audiopolicy/common/managerdefinitions/include/Gains.h b/services/audiopolicy/common/managerdefinitions/include/Gains.h
index 8332af9..cb229a4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Gains.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Gains.h
@@ -52,6 +52,7 @@
     static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
     static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
     static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sHearingAidVolumeCurve[Volume::VOLCNT];
     // default volume curves per stream and device category. See initializeVolumeCurves()
     static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
 };
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 094ff65..d85562e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -391,6 +391,7 @@
     mSamplingRate = 0;
     mChannelMask = AUDIO_CHANNEL_NONE;
     mFormat = AUDIO_FORMAT_INVALID;
+    memset(&mGain, 0, sizeof(struct audio_gain_config));
     mGain.index = -1;
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
index b2dafdd..6407a17 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
@@ -113,86 +113,104 @@
     {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
 };
 
+const VolumeCurvePoint
+Gains::sHearingAidVolumeCurve[Volume::VOLCNT] = {
+    {1, -128.0f}, {20, -80.0f}, {60, -40.0f}, {100, 0.0f}
+};
+
 const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
                                                   [DEVICE_CATEGORY_CNT] = {
     { // AUDIO_STREAM_VOICE_CALL
         Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_SYSTEM
         Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_RING
         Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_MUSIC
         Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_ALARM
         Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_NOTIFICATION
         Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_BLUETOOTH_SCO
         Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve      // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_ENFORCED_AUDIBLE
         Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
     },
     {  // AUDIO_STREAM_DTMF
         Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve       // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_TTS
       // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
         Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_HEADSET
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_EARPIECE
-        Gains::sSilentVolumeCurve     // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sSilentVolumeCurve,    // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_ACCESSIBILITY
         Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sHearingAidVolumeCurve     // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_REROUTING
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
     },
     { // AUDIO_STREAM_PATCH
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
         Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_HEARING_AID
     },
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 7273d0d..6f48eae 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -29,6 +29,7 @@
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEARING_AID),
     TERMINATOR
 };
 
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index 43a47b0..ec64a7c 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -43,6 +43,8 @@
     </volume>
     <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                              ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                             ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>1,-3000</point>
         <point>33,-2600</point>
@@ -55,6 +57,8 @@
                                          ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                          ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                         ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                        ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -67,6 +71,8 @@
                                        ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                        ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                       ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                         ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -75,18 +81,22 @@
                                         ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                         ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                        ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEADSET"
-                                        ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+                                        ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_SPEAKER">
-        <point>1,-2970</point>
+        <point>0,-2970</point>
         <point>33,-2010</point>
         <point>66,-1020</point>
         <point>100,0</point>
     </volume>
     <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
-                                        ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+                                        ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
-                                        ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+                                        ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                        ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                                ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -99,6 +109,8 @@
                                                ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                                ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                               ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>0,-4200</point>
         <point>33,-2800</point>
@@ -119,6 +131,8 @@
     </volume>
     <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                                 ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                                ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>1,-3000</point>
         <point>33,-2600</point>
@@ -131,6 +145,8 @@
                                                    ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                                    ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                                   ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>1,-3000</point>
         <point>33,-2600</point>
@@ -143,6 +159,8 @@
                                        ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                        ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                       ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                       ref="SILENT_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -151,14 +169,18 @@
                                       ref="SILENT_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                       ref="SILENT_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                      ref="SILENT_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEADSET"
-                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+                                                ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_SPEAKER"
-                                                ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+                                                ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EARPIECE"
-                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+                                                ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
-                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+                                                ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                                ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                             ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -167,6 +189,8 @@
                                             ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                             ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                            ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEADSET"
                                         ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -175,5 +199,7 @@
                                         ref="FULL_SCALE_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                         ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+                                        ref="FULL_SCALE_VOLUME_CURVE"/>
 </volumes>
 
diff --git a/services/audiopolicy/config/default_volume_tables.xml b/services/audiopolicy/config/default_volume_tables.xml
index 9a22b1d..207be41 100644
--- a/services/audiopolicy/config/default_volume_tables.xml
+++ b/services/audiopolicy/config/default_volume_tables.xml
@@ -67,4 +67,63 @@
         <point>60,-2100</point>
         <point>100,-1000</point>
     </reference>
+    <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+    <!-- Default Hearing Aid Volume Curve -->
+        <point>1,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
+    <!-- **************************************************************** -->
+    <!-- Non-mutable default volume curves:                               -->
+    <!--     * first point is always for index 0                          -->
+    <!--     * attenuation is small enough that stream can still be heard -->
+    <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+    <!-- Default non-mutable reference Volume Curve -->
+    <!--        based on DEFAULT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve for headset -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+    <!-- Default non-mutable Speaker Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+    <!-- Default non-mutable Ext Media System Volume Curve -->
+    <!--     based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+    <!-- Default non-mutable Hearing Aid Volume Curve -->
+    <!--     based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+        <point>0,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
 </volumes>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 92a2030..ee68900 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2141,6 +2141,7 @@
         }
         inputDesc->close();
     }
+    mInputRoutes.clear();
     mInputs.clear();
     SoundTrigger::setCaptureState(false);
     nextAudioPortGeneration();
@@ -3777,6 +3778,16 @@
         ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
         status = NO_INIT;
     }
+    // If microphones address is empty, set it according to device type
+    for (size_t i = 0; i  < mAvailableInputDevices.size(); i++) {
+        if (mAvailableInputDevices[i]->mAddress.isEmpty()) {
+            if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                mAvailableInputDevices[i]->mAddress = String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+            } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
+                mAvailableInputDevices[i]->mAddress = String8(AUDIO_BACK_MICROPHONE_ADDRESS);
+            }
+        }
+    }
 
     if (mPrimaryOutput == 0) {
         ALOGE("Failed to open primary output");
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 5d90408..082923a 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -505,91 +505,129 @@
             | ActivityManager::UID_OBSERVER_ACTIVE,
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("audioserver"));
+    status_t res = am.linkToDeath(this);
+    if (!res) {
+        Mutex::Autolock _l(mLock);
+        mObserverRegistered = true;
+    } else {
+        ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
+        am.unregisterUidObserver(this);
+    }
 }
 
 void AudioPolicyService::UidPolicy::unregisterSelf() {
     ActivityManager am;
+    am.unlinkToDeath(this);
     am.unregisterUidObserver(this);
+    Mutex::Autolock _l(mLock);
+    mObserverRegistered = false;
 }
 
-void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
-    onUidIdle(uid, disabled);
-}
-
-void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
-    {
-        Mutex::Autolock _l(mUidLock);
-        mActiveUids.insert(uid);
-    }
-    sp<AudioPolicyService> service = mService.promote();
-    if (service != nullptr) {
-        service->setRecordSilenced(uid, false);
-    }
-}
-
-void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
-    bool deleted = false;
-    {
-        Mutex::Autolock _l(mUidLock);
-        if (mActiveUids.erase(uid) > 0) {
-            deleted = true;
-        }
-    }
-    if (deleted) {
-        sp<AudioPolicyService> service = mService.promote();
-        if (service != nullptr) {
-            service->setRecordSilenced(uid, true);
-        }
-    }
-}
-
-void AudioPolicyService::UidPolicy::addOverrideUid(uid_t uid, bool active) {
-    updateOverrideUid(uid, active, true);
-}
-
-void AudioPolicyService::UidPolicy::removeOverrideUid(uid_t uid) {
-    updateOverrideUid(uid, false, false);
-}
-
-void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
-    bool wasActive = false;
-    bool isActive = false;
-    {
-        Mutex::Autolock _l(mUidLock);
-        wasActive = isUidActiveLocked(uid);
-        mOverrideUids.erase(uid);
-        if (insert) {
-            mOverrideUids.insert(std::pair<uid_t, bool>(uid, active));
-        }
-        isActive = isUidActiveLocked(uid);
-    }
-    if (wasActive != isActive) {
-        sp<AudioPolicyService> service = mService.promote();
-        if (service != nullptr) {
-            service->setRecordSilenced(uid, !isActive);
-        }
-    }
+void AudioPolicyService::UidPolicy::binderDied(__unused const wp<IBinder> &who) {
+    Mutex::Autolock _l(mLock);
+    mCachedUids.clear();
+    mObserverRegistered = false;
 }
 
 bool AudioPolicyService::UidPolicy::isUidActive(uid_t uid) {
-    // Non-app UIDs are considered always active
-    if (uid < FIRST_APPLICATION_UID) {
-        return true;
+    if (isServiceUid(uid)) return true;
+    bool needToReregister = false;
+    {
+        Mutex::Autolock _l(mLock);
+        needToReregister = !mObserverRegistered;
     }
-    Mutex::Autolock _l(mUidLock);
-    return isUidActiveLocked(uid);
+    if (needToReregister) {
+        // Looks like ActivityManager has died previously, attempt to re-register.
+        registerSelf();
+    }
+    {
+        Mutex::Autolock _l(mLock);
+        auto overrideIter = mOverrideUids.find(uid);
+        if (overrideIter != mOverrideUids.end()) {
+            return overrideIter->second;
+        }
+        // In an absense of the ActivityManager, assume everything to be active.
+        if (!mObserverRegistered) return true;
+        auto cacheIter = mCachedUids.find(uid);
+        if (cacheIter != mCachedUids.end()) {
+            return cacheIter->second;
+        }
+    }
+    ActivityManager am;
+    bool active = am.isUidActive(uid, String16("audioserver"));
+    {
+        Mutex::Autolock _l(mLock);
+        mCachedUids.insert(std::pair<uid_t, bool>(uid, active));
+    }
+    return active;
 }
 
-bool AudioPolicyService::UidPolicy::isUidActiveLocked(uid_t uid) {
-    // Non-app UIDs are considered always active
-    if (uid < FIRST_APPLICATION_UID) {
-        return true;
+void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
+    updateUidCache(uid, true, true);
+}
+
+void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
+    updateUidCache(uid, false, false);
+}
+
+void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
+    updateUidCache(uid, false, true);
+}
+
+bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
+    return uid % AID_USER_OFFSET < AID_APP_START;
+}
+
+void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
+    sp<AudioPolicyService> service = mService.promote();
+    if (service != nullptr) {
+        service->setRecordSilenced(uid, !active);
     }
-    auto it = mOverrideUids.find(uid);
-    if (it != mOverrideUids.end()) {
-        return it->second;
+}
+
+void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
+    if (isServiceUid(uid)) return;
+    bool wasOverridden = false, wasActive = false;
+    {
+        Mutex::Autolock _l(mLock);
+        updateUidLocked(&mOverrideUids, uid, active, insert, &wasOverridden, &wasActive);
     }
-    return mActiveUids.find(uid) != mActiveUids.end();
+    if (!wasOverridden && insert) {
+        notifyService(uid, active);  // Started to override.
+    } else if (wasOverridden && !insert) {
+        notifyService(uid, isUidActive(uid));  // Override ceased, notify with ground truth.
+    } else if (wasActive != active) {
+        notifyService(uid, active);  // Override updated.
+    }
+}
+
+void AudioPolicyService::UidPolicy::updateUidCache(uid_t uid, bool active, bool insert) {
+    if (isServiceUid(uid)) return;
+    bool wasActive = false;
+    {
+        Mutex::Autolock _l(mLock);
+        updateUidLocked(&mCachedUids, uid, active, insert, nullptr, &wasActive);
+        // Do not notify service if currently overridden.
+        if (mOverrideUids.find(uid) != mOverrideUids.end()) return;
+    }
+    bool nowActive = active && insert;
+    if (wasActive != nowActive) notifyService(uid, nowActive);
+}
+
+void AudioPolicyService::UidPolicy::updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+        uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive) {
+    auto it = uids->find(uid);
+    if (it != uids->end()) {
+        if (wasThere != nullptr) *wasThere = true;
+        if (wasActive != nullptr) *wasActive = it->second;
+        if (insert) {
+            it->second = active;
+        } else {
+            uids->erase(it);
+        }
+    } else if (insert) {
+        uids->insert(std::pair<uid_t, bool>(uid, active));
+    }
 }
 
 // -----------  AudioPolicyService::AudioCommandThread implementation ----------
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index bfa3ef4..b3bc12b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -35,7 +35,6 @@
 #include "managerdefault/AudioPolicyManager.h"
 
 #include <unordered_map>
-#include <unordered_set>
 
 namespace android {
 
@@ -264,31 +263,40 @@
     // transparently handles recording while the UID transitions between idle/active state
     // avoiding to get stuck in a state receiving non-empty buffers while idle or in a state
     // receiving empty buffers while active.
-    class UidPolicy : public BnUidObserver {
+    class UidPolicy : public BnUidObserver, public virtual IBinder::DeathRecipient {
     public:
         explicit UidPolicy(wp<AudioPolicyService> service)
-                : mService(service) {}
+                : mService(service), mObserverRegistered(false) {}
 
         void registerSelf();
         void unregisterSelf();
 
+        // IBinder::DeathRecipient implementation
+        void binderDied(const wp<IBinder> &who) override;
+
         bool isUidActive(uid_t uid);
 
-        void onUidGone(uid_t uid, bool disabled);
-        void onUidActive(uid_t uid);
-        void onUidIdle(uid_t uid, bool disabled);
+        // BnUidObserver implementation
+        void onUidActive(uid_t uid) override;
+        void onUidGone(uid_t uid, bool disabled) override;
+        void onUidIdle(uid_t uid, bool disabled) override;
 
-        void addOverrideUid(uid_t uid, bool active);
-        void removeOverrideUid(uid_t uid);
+        void addOverrideUid(uid_t uid, bool active) { updateOverrideUid(uid, active, true); }
+        void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
 
     private:
-        bool isUidActiveLocked(uid_t uid);
+        bool isServiceUid(uid_t uid) const;
+        void notifyService(uid_t uid, bool active);
         void updateOverrideUid(uid_t uid, bool active, bool insert);
+        void updateUidCache(uid_t uid, bool active, bool insert);
+        void updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+                uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive);
 
-        Mutex mUidLock;
         wp<AudioPolicyService> mService;
-        std::unordered_set<uid_t> mActiveUids;
+        Mutex mLock;
+        bool mObserverRegistered;
         std::unordered_map<uid_t, bool> mOverrideUids;
+        std::unordered_map<uid_t, bool> mCachedUids;
     };
 
     // Thread used for tone playback and to send audio config commands to audio flinger