Merge changes I92f232e9,I955b4ef3

* changes:
  Added cc_defaults for decoder fuzzers
  Added mpeg4_enc_fuzzer and h263_enc_fuzzer
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 1843ec4..ebc09d7 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -94,12 +94,12 @@
     // Do not distinguish null arrays from 0-sized arrays.
     for (int32_t i = 0; i < size; ++i) {
         // Parcel.writeParcelableArray
-        size_t len;
-        const char16_t* className = parcel->readString16Inplace(&len);
+        std::optional<std::string> className;
+        parcel->readUtf8FromUtf16(&className);
         ALOGV("%s: Read surface class = %s", __FUNCTION__,
-              className != NULL ? String8(className).string() : "<null>");
+              className.value_or("<null>").c_str());
 
-        if (className == NULL) {
+        if (className == std::nullopt) {
             continue;
         }
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index ba19565..1654b11 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -625,21 +625,19 @@
         Mutexed<Output>::Locked output(mOutput);
         if (!output->buffers ||
                 output->buffers->hasPending() ||
-                output->buffers->numClientBuffers() >= output->numSlots) {
+                output->buffers->numActiveSlots() >= output->numSlots) {
             return;
         }
     }
-    size_t numInputSlots = mInput.lock()->numSlots;
-    for (size_t i = 0; i < numInputSlots; ++i) {
-        if (mPipelineWatcher.lock()->pipelineFull()) {
-            return;
-        }
+    size_t numActiveSlots = 0;
+    while (!mPipelineWatcher.lock()->pipelineFull()) {
         sp<MediaCodecBuffer> inBuffer;
         size_t index;
         {
             Mutexed<Input>::Locked input(mInput);
-            if (input->buffers->numClientBuffers() >= input->numSlots) {
-                return;
+            numActiveSlots = input->buffers->numActiveSlots();
+            if (numActiveSlots >= input->numSlots) {
+                break;
             }
             if (!input->buffers->requestNewBuffer(&index, &inBuffer)) {
                 ALOGV("[%s] no new buffer available", mName);
@@ -649,6 +647,7 @@
         ALOGV("[%s] new input index = %zu [%p]", mName, index, inBuffer.get());
         mCallback->onInputBufferAvailable(index, inBuffer);
     }
+    ALOGV("[%s] # active slots after feedInputBufferIfAvailable = %zu", mName, numActiveSlots);
 }
 
 status_t CCodecBufferChannel::renderOutputBuffer(
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 10f7e66..689e3bb 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -489,11 +489,12 @@
     mBuffers.clear();
 }
 
-size_t FlexBuffersImpl::numClientBuffers() const {
+size_t FlexBuffersImpl::numActiveSlots() const {
     return std::count_if(
             mBuffers.begin(), mBuffers.end(),
             [](const Entry &entry) {
-                return (entry.clientBuffer != nullptr);
+                return (entry.clientBuffer != nullptr
+                        || !entry.compBuffer.expired());
             });
 }
 
@@ -639,11 +640,11 @@
     }
 }
 
-size_t BuffersArrayImpl::numClientBuffers() const {
+size_t BuffersArrayImpl::numActiveSlots() const {
     return std::count_if(
             mBuffers.begin(), mBuffers.end(),
             [](const Entry &entry) {
-                return entry.ownedByClient;
+                return entry.ownedByClient || !entry.compBuffer.expired();
             });
 }
 
@@ -693,8 +694,8 @@
     mImpl.flush();
 }
 
-size_t InputBuffersArray::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t InputBuffersArray::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> InputBuffersArray::createNewBuffer() {
@@ -731,8 +732,8 @@
     return nullptr;
 }
 
-size_t SlotInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t SlotInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> SlotInputBuffers::createNewBuffer() {
@@ -783,8 +784,8 @@
     return std::move(array);
 }
 
-size_t LinearInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t LinearInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 // static
@@ -960,8 +961,8 @@
     return std::move(array);
 }
 
-size_t GraphicMetadataInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t GraphicMetadataInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> GraphicMetadataInputBuffers::createNewBuffer() {
@@ -1025,8 +1026,8 @@
     return std::move(array);
 }
 
-size_t GraphicInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t GraphicInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> GraphicInputBuffers::createNewBuffer() {
@@ -1115,8 +1116,8 @@
     mImpl.getArray(array);
 }
 
-size_t OutputBuffersArray::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t OutputBuffersArray::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 void OutputBuffersArray::realloc(const std::shared_ptr<C2Buffer> &c2buffer) {
@@ -1226,8 +1227,8 @@
     return array;
 }
 
-size_t FlexOutputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t FlexOutputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 // LinearOutputBuffers
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 0d4fa81..4772ab5 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -72,7 +72,7 @@
     /**
      * Return number of buffers the client owns.
      */
-    virtual size_t numClientBuffers() const = 0;
+    virtual size_t numActiveSlots() const = 0;
 
     /**
      * Examine image data from the buffer and update the format if necessary.
@@ -584,7 +584,7 @@
      * Return the number of buffers that are sent to the client but not released
      * yet.
      */
-    size_t numClientBuffers() const;
+    size_t numActiveSlots() const;
 
     /**
      * Return the number of buffers that are sent to the component but not
@@ -705,7 +705,7 @@
      * Return the number of buffers that are sent to the client but not released
      * yet.
      */
-    size_t numClientBuffers() const;
+    size_t numActiveSlots() const;
 
     /**
      * Return the size of the array.
@@ -765,7 +765,7 @@
 
     void flush() override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -796,7 +796,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() final;
@@ -826,7 +826,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -894,7 +894,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -924,7 +924,7 @@
     std::unique_ptr<InputBuffers> toArrayMode(
             size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -965,7 +965,7 @@
         array->clear();
     }
 
-    size_t numClientBuffers() const final {
+    size_t numActiveSlots() const final {
         return 0u;
     }
 
@@ -1019,7 +1019,7 @@
 
     void getArray(Vector<sp<MediaCodecBuffer>> *array) const final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
     /**
      * Reallocate the array, filled with buffers with the same size as given
@@ -1073,7 +1073,7 @@
 
     std::unique_ptr<OutputBuffersArray> toArrayMode(size_t size) override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
     /**
      * Return an appropriate Codec2Buffer object for the type of buffers.
diff --git a/media/libeffects/lvm/benchmarks/Android.bp b/media/libeffects/lvm/benchmarks/Android.bp
new file mode 100644
index 0000000..420e172
--- /dev/null
+++ b/media/libeffects/lvm/benchmarks/Android.bp
@@ -0,0 +1,16 @@
+cc_benchmark {
+    name: "lvm_benchmark",
+    vendor: true,
+    srcs: ["lvm_benchmark.cpp"],
+    static_libs: [
+        "libbundlewrapper",
+        "libmusicbundle",
+    ],
+    shared_libs: [
+        "libaudioutils",
+        "liblog",
+    ],
+    header_libs: [
+        "libhardware_headers",
+    ],
+}
diff --git a/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
new file mode 100644
index 0000000..ee9da3f
--- /dev/null
+++ b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <array>
+#include <climits>
+#include <cstdlib>
+#include <random>
+#include <vector>
+#include <log/log.h>
+#include <benchmark/benchmark.h>
+#include <hardware/audio_effect.h>
+#include <system/audio.h>
+
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+constexpr effect_uuid_t kEffectUuids[] = {
+        // NXP SW BassBoost
+        {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Virtualizer
+        {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Equalizer
+        {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Volume
+        {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+};
+
+constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+
+constexpr size_t kFrameCount = 2048;
+
+constexpr audio_channel_mask_t kChMasks[] = {
+        AUDIO_CHANNEL_OUT_MONO,    AUDIO_CHANNEL_OUT_STEREO,  AUDIO_CHANNEL_OUT_2POINT1,
+        AUDIO_CHANNEL_OUT_QUAD,    AUDIO_CHANNEL_OUT_PENTA,   AUDIO_CHANNEL_OUT_5POINT1,
+        AUDIO_CHANNEL_OUT_6POINT1, AUDIO_CHANNEL_OUT_7POINT1,
+};
+
+constexpr size_t kNumChMasks = std::size(kChMasks);
+constexpr int kSampleRate = 44100;
+// TODO(b/131240940) Remove once effects are updated to produce mono output
+constexpr size_t kMinOutputChannelCount = 2;
+
+/*******************************************************************
+ * A test result running on Pixel 3 for comparison.
+ * The first parameter indicates the number of channels.
+ * The second parameter indicates the effect.
+ * 0: Bass Boost, 1: Virtualizer, 2: Equalizer, 3: Volume
+ * -----------------------------------------------------
+ * Benchmark           Time             CPU   Iterations
+ * -----------------------------------------------------
+ * BM_LVM/2/0     131279 ns       130855 ns         5195
+ * BM_LVM/2/1     184814 ns       184219 ns         3799
+ * BM_LVM/2/2      91935 ns        91649 ns         7647
+ * BM_LVM/2/3      26707 ns        26623 ns        26281
+ * BM_LVM/3/0     172130 ns       171562 ns         4085
+ * BM_LVM/3/1     192443 ns       191923 ns         3644
+ * BM_LVM/3/2     127444 ns       127107 ns         5483
+ * BM_LVM/3/3      26811 ns        26730 ns        26163
+ * BM_LVM/4/0     223688 ns       223076 ns         3133
+ * BM_LVM/4/1     204961 ns       204408 ns         3425
+ * BM_LVM/4/2     169162 ns       168708 ns         4143
+ * BM_LVM/4/3      37330 ns        37225 ns        18795
+ * BM_LVM/5/0     272628 ns       271668 ns         2568
+ * BM_LVM/5/1     218487 ns       217883 ns         3212
+ * BM_LVM/5/2     211049 ns       210479 ns         3324
+ * BM_LVM/5/3      46962 ns        46835 ns        15051
+ * BM_LVM/6/0     318881 ns       317734 ns         2216
+ * BM_LVM/6/1     231899 ns       231244 ns         3028
+ * BM_LVM/6/2     252655 ns       251963 ns         2771
+ * BM_LVM/6/3      54944 ns        54794 ns        12799
+ * BM_LVM/7/0     366622 ns       365262 ns         1916
+ * BM_LVM/7/1     245076 ns       244388 ns         2866
+ * BM_LVM/7/2     295105 ns       294304 ns         2379
+ * BM_LVM/7/3      63595 ns        63420 ns        11070
+ * BM_LVM/8/0     410957 ns       409387 ns         1706
+ * BM_LVM/8/1     257824 ns       257098 ns         2723
+ * BM_LVM/8/2     342546 ns       341530 ns         2059
+ * BM_LVM/8/3      72896 ns        72700 ns         9685
+ *******************************************************************/
+
+static void BM_LVM(benchmark::State& state) {
+    const size_t chMask = kChMasks[state.range(0) - 1];
+    const effect_uuid_t uuid = kEffectUuids[state.range(1)];
+    const size_t channelCount = audio_channel_count_from_out_mask(chMask);
+
+    // Initialize input buffer with deterministic pseudo-random values
+    std::minstd_rand gen(chMask);
+    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::vector<float> input(kFrameCount * channelCount);
+    for (auto& in : input) {
+        in = dis(gen);
+    }
+
+    effect_handle_t effectHandle = nullptr;
+    if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(&uuid, 1, 1, &effectHandle);
+        status != 0) {
+        ALOGE("create_effect returned an error = %d\n", status);
+        return;
+    }
+
+    effect_config_t config{};
+    config.inputCfg.samplingRate = config.outputCfg.samplingRate = kSampleRate;
+    config.inputCfg.channels = config.outputCfg.channels = chMask;
+    config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+
+    int reply = 0;
+    uint32_t replySize = sizeof(reply);
+    if (int status = (*effectHandle)
+                             ->command(effectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t),
+                                       &config, &replySize, &reply);
+        status != 0) {
+        ALOGE("command returned an error = %d\n", status);
+        return;
+    }
+
+    if (int status =
+                (*effectHandle)
+                        ->command(effectHandle, EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+        status != 0) {
+        ALOGE("Command enable call returned error %d\n", reply);
+        return;
+    }
+
+    // Run the test
+    for (auto _ : state) {
+        std::vector<float> output(kFrameCount * std::max(channelCount, kMinOutputChannelCount));
+
+        benchmark::DoNotOptimize(input.data());
+        benchmark::DoNotOptimize(output.data());
+
+        audio_buffer_t inBuffer = {.frameCount = kFrameCount, .f32 = input.data()};
+        audio_buffer_t outBuffer = {.frameCount = kFrameCount, .f32 = output.data()};
+        (*effectHandle)->process(effectHandle, &inBuffer, &outBuffer);
+
+        benchmark::ClobberMemory();
+    }
+
+    state.SetComplexityN(state.range(0));
+
+    if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
+        ALOGE("release_effect returned an error = %d\n", status);
+        return;
+    }
+}
+
+static void LVMArgs(benchmark::internal::Benchmark* b) {
+    // TODO(b/131240940) Test single channel once effects are updated to process mono data
+    for (int i = 2; i <= kNumChMasks; i++) {
+        for (int j = 0; j < kNumEffectUuids; ++j) {
+            b->Args({i, j});
+        }
+    }
+}
+
+BENCHMARK(BM_LVM)->Apply(LVMArgs);
+
+BENCHMARK_MAIN();
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index f08caec..be60aae 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -1,5 +1,5 @@
 // music bundle wrapper
-cc_library_shared {
+cc_library {
     name: "libbundlewrapper",
 
     arch: {
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 16cd0ad..5217cf9 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,6 +1,6 @@
 // audio preprocessing wrapper
 cc_library_shared {
-    name: "libaudiopreprocessing",
+    name: "libaudiopreprocessing_legacy",
 
     vendor: true,
 
@@ -17,6 +17,7 @@
 
     cflags: [
         "-DWEBRTC_POSIX",
+        "-DWEBRTC_LEGACY",
         "-fvisibility=hidden",
         "-Wall",
         "-Werror",
@@ -27,3 +28,34 @@
         "libhardware_headers",
     ],
 }
+
+cc_library_shared {
+    name: "libaudiopreprocessing",
+    vendor: true,
+    relative_install_path: "soundfx",
+    srcs: ["PreProcessing.cpp"],
+    local_include_dirs: [
+        ".",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+        "-Wno-unused-parameter",
+    ],
+
+    shared_libs: [
+        "liblog",
+        "libutils",
+    ],
+
+    static_libs: [
+        "webrtc_audio_processing",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+        "libhardware_headers",
+        "libwebrtc_absl_headers",
+    ],
+}
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index c7afe2f..f2f74a5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -23,10 +23,15 @@
 #include <hardware/audio_effect.h>
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
+#ifndef WEBRTC_LEGACY
+#include <audio_effects/effect_agc2.h>
+#endif
 #include <audio_effects/effect_ns.h>
 #include <module_common_types.h>
 #include <audio_processing.h>
+#ifdef WEBRTC_LEGACY
 #include "speex/speex_resampler.h"
+#endif
 
 // undefine to perform multi channels API functional tests
 //#define DUAL_MIC_TEST
@@ -42,6 +47,9 @@
 enum preproc_id
 {
     PREPROC_AGC,        // Automatic Gain Control
+#ifndef WEBRTC_LEGACY
+    PREPROC_AGC2,       // Automatic Gain Control 2
+#endif
     PREPROC_AEC,        // Acoustic Echo Canceler
     PREPROC_NS,         // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -103,6 +111,10 @@
     int id;                             // audio session ID
     int io;                             // handle of input stream this session is on
     webrtc::AudioProcessing* apm;       // handle on webRTC audio processing module (APM)
+#ifndef WEBRTC_LEGACY
+    // Audio Processing module builder
+    webrtc::AudioProcessingBuilder ap_builder;
+#endif
     size_t apmFrameCount;               // buffer size for webRTC process (10 ms)
     uint32_t apmSamplingRate;           // webRTC APM sampling rate (8/16 or 32 kHz)
     size_t frameCount;                  // buffer size before input resampler ( <=> apmFrameCount)
@@ -113,25 +125,42 @@
     uint32_t enabledMsk;                // bit field containing IDs of enabled pre processors
     uint32_t processedMsk;              // bit field containing IDs of pre processors already
                                         // processed in current round
+#ifdef WEBRTC_LEGACY
     webrtc::AudioFrame *procFrame;      // audio frame passed to webRTC AMP ProcessStream()
+#else
+    // audio config strucutre
+    webrtc::AudioProcessing::Config config;
+    webrtc::StreamConfig inputConfig;   // input stream configuration
+    webrtc::StreamConfig outputConfig;  // output stream configuration
+#endif
     int16_t *inBuf;                     // input buffer used when resampling
     size_t inBufSize;                   // input buffer size in frames
     size_t framesIn;                    // number of frames in input buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *inResampler;   // handle on input speex resampler
+#endif
     int16_t *outBuf;                    // output buffer used when resampling
     size_t outBufSize;                  // output buffer size in frames
     size_t framesOut;                   // number of frames in output buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *outResampler;  // handle on output speex resampler
+#endif
     uint32_t revChannelCount;           // number of channels on reverse stream
     uint32_t revEnabledMsk;             // bit field containing IDs of enabled pre processors
                                         // with reverse channel
     uint32_t revProcessedMsk;           // bit field containing IDs of pre processors with reverse
                                         // channel already processed in current round
+#ifdef WEBRTC_LEGACY
     webrtc::AudioFrame *revFrame;       // audio frame passed to webRTC AMP AnalyzeReverseStream()
+#else
+    webrtc::StreamConfig revConfig;     // reverse stream configuration.
+#endif
     int16_t *revBuf;                    // reverse channel input buffer
     size_t revBufSize;                  // reverse channel input buffer size
     size_t framesRev;                   // number of frames in reverse channel input buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *revResampler;  // handle on reverse channel input speex resampler
+#endif
 };
 
 #ifdef DUAL_MIC_TEST
@@ -188,6 +217,20 @@
         "The Android Open Source Project"
 };
 
+#ifndef WEBRTC_LEGACY
+// Automatic Gain Control 2
+static const effect_descriptor_t sAgc2Descriptor = {
+        { 0xae3c653b, 0xbe18, 0x4ab8, 0x8938, { 0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac } }, // type
+        { 0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, { 0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86 } }, // uuid
+        EFFECT_CONTROL_API_VERSION,
+        (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
+        0, //FIXME indicate CPU load
+        0, //FIXME indicate memory usage
+        "Automatic Gain Control 2",
+        "The Android Open Source Project"
+};
+#endif
+
 // Acoustic Echo Cancellation
 static const effect_descriptor_t sAecDescriptor = {
         { 0x7b491460, 0x8d4d, 0x11e0, 0xbd61, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
@@ -215,6 +258,9 @@
 
 static const effect_descriptor_t *sDescriptors[PREPROC_NUM_EFFECTS] = {
         &sAgcDescriptor,
+#ifndef WEBRTC_LEGACY
+        &sAgc2Descriptor,
+#endif
         &sAecDescriptor,
         &sNsDescriptor
 };
@@ -225,6 +271,9 @@
 
 const effect_uuid_t * const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {
         FX_IID_AGC,
+#ifndef WEBRTC_LEGACY
+        FX_IID_AGC2,
+#endif
         FX_IID_AEC,
         FX_IID_NS
 };
@@ -266,19 +315,50 @@
 static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
+#ifndef WEBRTC_LEGACY
+int  Agc2Init (preproc_effect_t *effect)
+{
+    ALOGV("Agc2Init");
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.fixed_digital.gain_db = 0.f;
+    effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+        effect->session->config.gain_controller2.kRms;
+    effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db = 2.f;
+    effect->session->apm->ApplyConfig(effect->session->config);
+    return 0;
+}
+#endif
+
 int  AgcInit (preproc_effect_t *effect)
 {
     ALOGV("AgcInit");
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     agc->set_mode(webrtc::GainControl::kFixedDigital);
     agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
     agc->set_compression_gain_db(kAgcDefaultCompGain);
     agc->enable_limiter(kAgcDefaultLimiter);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
+    effect->session->config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
+    effect->session->config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     return 0;
 }
 
+#ifndef WEBRTC_LEGACY
+int  Agc2Create(preproc_effect_t *effect)
+{
+    Agc2Init(effect);
+    return 0;
+}
+#endif
+
 int  AgcCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = effect->session->apm->gain_control();
     ALOGV("AgcCreate got agc %p", agc);
     if (agc == NULL) {
@@ -286,10 +366,93 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(agc);
+#endif
     AgcInit(effect);
     return 0;
 }
 
+#ifndef WEBRTC_LEGACY
+int Agc2GetParameter(preproc_effect_t *effect,
+                    void *pParam,
+                    uint32_t *pValueSize,
+                    void *pValue)
+{
+    int status = 0;
+    uint32_t param = *(uint32_t *)pParam;
+    agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        if (*pValueSize < sizeof(float)) {
+            *pValueSize = 0.f;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        if (*pValueSize < sizeof(int32_t)) {
+            *pValueSize = 0;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        if (*pValueSize < sizeof(float)) {
+            *pValueSize = 0.f;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        if (*pValueSize < sizeof(agc2_settings_t)) {
+            *pValueSize = 0;
+            return -EINVAL;
+        }
+        break;
+
+    default:
+        ALOGW("Agc2GetParameter() unknown param %08x", param);
+        status = -EINVAL;
+        break;
+    }
+
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        *(float *) pValue =
+                (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+        ALOGV("Agc2GetParameter() target level %f dB", *(float *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        *(uint32_t *) pValue =
+                (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
+                level_estimator);
+        ALOGV("Agc2GetParameter() level estimator %d",
+                *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        *(float *) pValue =
+                (float)(effect->session->config.gain_controller2.adaptive_digital.
+                extra_saturation_margin_db);
+        ALOGV("Agc2GetParameter() extra saturation margin %f dB", *(float *) pValue);
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        pProperties->fixedDigitalGain =
+                (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+        pProperties->level_estimator =
+                (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
+                level_estimator);
+        pProperties->extraSaturationMargin =
+                (float)(effect->session->config.gain_controller2.adaptive_digital.
+                extra_saturation_margin_db);
+        break;
+    default:
+        ALOGW("Agc2GetParameter() unknown param %d", param);
+        status = -EINVAL;
+        break;
+    }
+
+    return status;
+}
+#endif
+
 int AgcGetParameter(preproc_effect_t *effect,
                     void *pParam,
                     uint32_t *pValueSize,
@@ -298,7 +461,9 @@
     int status = 0;
     uint32_t param = *(uint32_t *)pParam;
     t_agc_settings *pProperties = (t_agc_settings *)pValue;
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+#endif
 
     switch (param) {
     case AGC_PARAM_TARGET_LEVEL:
@@ -327,6 +492,7 @@
         break;
     }
 
+#ifdef WEBRTC_LEGACY
     switch (param) {
     case AGC_PARAM_TARGET_LEVEL:
         *(int16_t *) pValue = (int16_t)(agc->target_level_dbfs() * -100);
@@ -351,12 +517,98 @@
         status = -EINVAL;
         break;
     }
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC_PARAM_TARGET_LEVEL:
+        *(int16_t *) pValue =
+                (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+        ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t *) pValue);
+        break;
+    case AGC_PARAM_COMP_GAIN:
+        *(int16_t *) pValue =
+                (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+        ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t *) pValue);
+        break;
+    case AGC_PARAM_LIMITER_ENA:
+        *(bool *) pValue =
+                (bool)(effect->session->config.gain_controller1.enable_limiter);
+        ALOGV("AgcGetParameter() limiter enabled %s",
+                (*(int16_t *) pValue != 0) ? "true" : "false");
+        break;
+    case AGC_PARAM_PROPERTIES:
+        pProperties->targetLevel =
+                (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+        pProperties->compGain =
+                (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+        pProperties->limiterEnabled =
+                (bool)(effect->session->config.gain_controller1.enable_limiter);
+        break;
+    default:
+        ALOGW("AgcGetParameter() unknown param %d", param);
+        status = -EINVAL;
+        break;
+    }
+#endif
     return status;
 }
 
+#ifndef WEBRTC_LEGACY
+int Agc2SetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
+{
+    int status = 0;
+    uint32_t param = *(uint32_t *)pParam;
+    float valueFloat = 0.f;
+    agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        valueFloat = (float)(*(int32_t *) pValue);
+        ALOGV("Agc2SetParameter() fixed digital gain %f dB", valueFloat);
+        effect->session->config.gain_controller2.fixed_digital.gain_db = valueFloat;
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        ALOGV("Agc2SetParameter() level estimator %d", *(webrtc::AudioProcessing::Config::
+                GainController2::LevelEstimator *) pValue);
+        effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+                (*(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        valueFloat = (float)(*(int32_t *) pValue);
+        ALOGV("Agc2SetParameter() extra saturation margin %f dB", valueFloat);
+        effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+                valueFloat;
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        ALOGV("Agc2SetParameter() properties gain %f, level %d margin %f",
+                pProperties->fixedDigitalGain,
+                pProperties->level_estimator,
+                pProperties->extraSaturationMargin);
+        effect->session->config.gain_controller2.fixed_digital.gain_db =
+                pProperties->fixedDigitalGain;
+        effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+                (webrtc::AudioProcessing::Config::GainController2::LevelEstimator)pProperties->
+                level_estimator;
+        effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+                pProperties->extraSaturationMargin;
+        break;
+    default:
+        ALOGW("Agc2SetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+        status = -EINVAL;
+        break;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+
+    ALOGV("Agc2SetParameter() done status %d", status);
+
+    return status;
+}
+#endif
+
 int AgcSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
 {
     int status = 0;
+#ifdef WEBRTC_LEGACY
     uint32_t param = *(uint32_t *)pParam;
     t_agc_settings *pProperties = (t_agc_settings *)pValue;
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
@@ -390,27 +642,95 @@
         status = -EINVAL;
         break;
     }
+#else
+    uint32_t param = *(uint32_t *)pParam;
+    t_agc_settings *pProperties = (t_agc_settings *)pValue;
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC_PARAM_TARGET_LEVEL:
+        ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t *)pValue);
+        effect->session->config.gain_controller1.target_level_dbfs =
+             (-(*(int16_t *)pValue / 100));
+        break;
+    case AGC_PARAM_COMP_GAIN:
+        ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t *)pValue);
+        effect->session->config.gain_controller1.compression_gain_db =
+             (*(int16_t *)pValue / 100);
+        break;
+    case AGC_PARAM_LIMITER_ENA:
+        ALOGV("AgcSetParameter() limiter enabled %s", *(bool *)pValue ? "true" : "false");
+        effect->session->config.gain_controller1.enable_limiter =
+             (*(bool *)pValue);
+        break;
+    case AGC_PARAM_PROPERTIES:
+        ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
+              pProperties->targetLevel,
+              pProperties->compGain,
+              pProperties->limiterEnabled);
+        effect->session->config.gain_controller1.target_level_dbfs =
+              -(pProperties->targetLevel / 100);
+        effect->session->config.gain_controller1.compression_gain_db =
+              pProperties->compGain / 100;
+        effect->session->config.gain_controller1.enable_limiter =
+              pProperties->limiterEnabled;
+        break;
+    default:
+        ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+        status = -EINVAL;
+        break;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 
     ALOGV("AgcSetParameter() done status %d", status);
 
     return status;
 }
 
+#ifndef WEBRTC_LEGACY
+void Agc2Enable(preproc_effect_t *effect)
+{
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+}
+#endif
+
 void AgcEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     ALOGV("AgcEnable agc %p", agc);
     agc->Enable(true);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
+#ifndef WEBRTC_LEGACY
+void Agc2Disable(preproc_effect_t *effect)
+{
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+}
+#endif
+
 void AgcDisable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     ALOGV("AgcDisable");
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     agc->Enable(false);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
-
 static const preproc_ops_t sAgcOps = {
         AgcCreate,
         AgcInit,
@@ -422,26 +742,48 @@
         NULL
 };
 
+#ifndef WEBRTC_LEGACY
+static const preproc_ops_t sAgc2Ops = {
+        Agc2Create,
+        Agc2Init,
+        NULL,
+        Agc2Enable,
+        Agc2Disable,
+        Agc2SetParameter,
+        Agc2GetParameter,
+        NULL
+};
+#endif
 
 //------------------------------------------------------------------------------
 // Acoustic Echo Canceler (AEC)
 //------------------------------------------------------------------------------
 
+#ifdef WEBRTC_LEGACY
 static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
         webrtc::EchoControlMobile::kEarpiece;
 static const bool kAecDefaultComfortNoise = true;
+#endif
 
 int  AecInit (preproc_effect_t *effect)
 {
     ALOGV("AecInit");
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     aec->set_routing_mode(kAecDefaultMode);
     aec->enable_comfort_noise(kAecDefaultComfortNoise);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig() ;
+    effect->session->config.echo_canceller.mobile_mode = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     return 0;
 }
 
 int  AecCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = effect->session->apm->echo_control_mobile();
     ALOGV("AecCreate got aec %p", aec);
     if (aec == NULL) {
@@ -449,6 +791,7 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(aec);
+#endif
     AecInit (effect);
     return 0;
 }
@@ -470,6 +813,14 @@
         *(uint32_t *)pValue = 1000 * effect->session->apm->stream_delay_ms();
         ALOGV("AecGetParameter() echo delay %d us", *(uint32_t *)pValue);
         break;
+#ifndef WEBRTC_LEGACY
+    case AEC_PARAM_MOBILE_MODE:
+        effect->session->config =
+            effect->session->apm->GetConfig() ;
+        *(uint32_t *)pValue = effect->session->config.echo_canceller.mobile_mode;
+        ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t *)pValue);
+        break;
+#endif
     default:
         ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
         status = -EINVAL;
@@ -490,6 +841,15 @@
         status = effect->session->apm->set_stream_delay_ms(value/1000);
         ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
         break;
+#ifndef WEBRTC_LEGACY
+    case AEC_PARAM_MOBILE_MODE:
+        effect->session->config =
+            effect->session->apm->GetConfig() ;
+        effect->session->config.echo_canceller.mobile_mode = value;
+        ALOGV("AecSetParameter() mobile mode %d us", value);
+        effect->session->apm->ApplyConfig(effect->session->config);
+        break;
+#endif
     default:
         ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
         status = -EINVAL;
@@ -500,28 +860,43 @@
 
 void AecEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     ALOGV("AecEnable aec %p", aec);
     aec->Enable(true);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.echo_canceller.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 void AecDisable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     ALOGV("AecDisable");
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     aec->Enable(false);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.echo_canceller.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 int AecSetDevice(preproc_effect_t *effect, uint32_t device)
 {
     ALOGV("AecSetDevice %08x", device);
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     webrtc::EchoControlMobile::RoutingMode mode = webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
+#endif
 
     if (audio_is_input_device(device)) {
         return 0;
     }
 
+#ifdef WEBRTC_LEGACY
     switch(device) {
     case AUDIO_DEVICE_OUT_EARPIECE:
         mode = webrtc::EchoControlMobile::kEarpiece;
@@ -536,6 +911,7 @@
         break;
     }
     aec->set_routing_mode(mode);
+#endif
     return 0;
 }
 
@@ -554,11 +930,17 @@
 // Noise Suppression (NS)
 //------------------------------------------------------------------------------
 
+#ifdef WEBRTC_LEGACY
 static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
+#else
+static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
+                webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
+#endif
 
 int  NsInit (preproc_effect_t *effect)
 {
     ALOGV("NsInit");
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->set_level(kNsDefaultLevel);
     webrtc::Config config;
@@ -575,12 +957,20 @@
     config.Set<webrtc::Beamforming>(
             new webrtc::Beamforming(false, geometry));
     effect->session->apm->SetExtraOptions(config);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig() ;
+    effect->session->config.noise_suppression.level =
+        kNsDefaultLevel;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
 int  NsCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = effect->session->apm->noise_suppression();
     ALOGV("NsCreate got ns %p", ns);
     if (ns == NULL) {
@@ -588,6 +978,7 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(ns);
+#endif
     NsInit (effect);
     return 0;
 }
@@ -604,6 +995,7 @@
 int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
 {
     int status = 0;
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     uint32_t param = *(uint32_t *)pParam;
     uint32_t value = *(uint32_t *)pValue;
@@ -629,12 +1021,30 @@
             ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
             status = -EINVAL;
     }
+#else
+    uint32_t param = *(uint32_t *)pParam;
+    uint32_t value = *(uint32_t *)pValue;
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    switch (param) {
+        case NS_PARAM_LEVEL:
+            effect->session->config.noise_suppression.level =
+               (webrtc::AudioProcessing::Config::NoiseSuppression::Level)value;
+            ALOGV("NsSetParameter() level %d", value);
+            break;
+        default:
+            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
+            status = -EINVAL;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 
     return status;
 }
 
 void NsEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ALOGV("NsEnable ns %p", ns);
     ns->Enable(true);
@@ -644,17 +1054,30 @@
         config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
         effect->session->apm->SetExtraOptions(config);
     }
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    effect->session->config.noise_suppression.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 void NsDisable(preproc_effect_t *effect)
 {
     ALOGV("NsDisable");
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->Enable(false);
     webrtc::Config config;
     std::vector<webrtc::Point> geometry;
     config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
     effect->session->apm->SetExtraOptions(config);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    effect->session->config.noise_suppression.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 static const preproc_ops_t sNsOps = {
@@ -669,8 +1092,12 @@
 };
 
 
+
 static const preproc_ops_t *sPreProcOps[PREPROC_NUM_EFFECTS] = {
         &sAgcOps,
+#ifndef WEBRTC_LEGACY
+        &sAgc2Ops,
+#endif
         &sAecOps,
         &sNsOps
 };
@@ -812,7 +1239,9 @@
     session->id = 0;
     session->io = 0;
     session->createdMsk = 0;
+#ifdef WEBRTC_LEGACY
     session->apm = NULL;
+#endif
     for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
         status = Effect_Init(&session->effects[i], i);
     }
@@ -829,6 +1258,7 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         session->apm = webrtc::AudioProcessing::Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
@@ -850,28 +1280,53 @@
             ALOGW("Session_CreateEffect could not allocate reverse audio frame");
             goto error;
         }
+#else
+        session->apm = session->ap_builder.Create();
+        if (session->apm == NULL) {
+            ALOGW("Session_CreateEffect could not get apm engine");
+            goto error;
+        }
+#endif
         session->apmSamplingRate = kPreprocDefaultSr;
         session->apmFrameCount = (kPreprocDefaultSr) / 100;
         session->frameCount = session->apmFrameCount;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
+#ifdef WEBRTC_LEGACY
         session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
         session->procFrame->num_channels_ = kPreProcDefaultCnl;
+#else
+        session->inputConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->inputConfig.set_num_channels(kPreProcDefaultCnl);
+        session->outputConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->outputConfig.set_num_channels(kPreProcDefaultCnl);
+#endif
         session->revChannelCount = kPreProcDefaultCnl;
+#ifdef WEBRTC_LEGACY
         session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
         session->revFrame->num_channels_ = kPreProcDefaultCnl;
+#else
+        session->revConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->revConfig.set_num_channels(kPreProcDefaultCnl);
+#endif
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
+#ifdef WEBRTC_LEGACY
         session->inResampler = NULL;
+#endif
         session->inBuf = NULL;
         session->inBufSize = 0;
+#ifdef WEBRTC_LEGACY
         session->outResampler = NULL;
+#endif
         session->outBuf = NULL;
         session->outBufSize = 0;
+#ifdef WEBRTC_LEGACY
         session->revResampler = NULL;
+#endif
         session->revBuf = NULL;
         session->revBufSize = 0;
     }
@@ -885,12 +1340,17 @@
 
 error:
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         delete session->revFrame;
         session->revFrame = NULL;
         delete session->procFrame;
         session->procFrame = NULL;
         delete session->apm;
         session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
+#else
+        delete session->apm;
+        session->apm = NULL;
+#endif
     }
     return status;
 }
@@ -901,6 +1361,7 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1<<fx->procId);
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         delete session->apm;
         session->apm = NULL;
         delete session->procFrame;
@@ -919,6 +1380,10 @@
             speex_resampler_destroy(session->revResampler);
             session->revResampler = NULL;
         }
+#else
+        delete session->apm;
+        session->apm = NULL;
+#endif
         delete session->inBuf;
         session->inBuf = NULL;
         delete session->outBuf;
@@ -946,7 +1411,9 @@
 
     ALOGV("Session_SetConfig sr %d cnl %08x",
          config->inputCfg.samplingRate, config->inputCfg.channels);
+#ifdef WEBRTC_LEGACY
     int status;
+#endif
 
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
@@ -958,6 +1425,7 @@
         session->apmSamplingRate = 8000;
     }
 
+#ifdef WEBRTC_LEGACY
     const webrtc::ProcessingConfig processing_config = {
       {{static_cast<int>(session->apmSamplingRate), inCnl},
        {static_cast<int>(session->apmSamplingRate), outCnl},
@@ -967,23 +1435,41 @@
     if (status < 0) {
         return -EINVAL;
     }
+#endif
 
     session->samplingRate = config->inputCfg.samplingRate;
     session->apmFrameCount = session->apmSamplingRate / 100;
     if (session->samplingRate == session->apmSamplingRate) {
         session->frameCount = session->apmFrameCount;
     } else {
+#ifdef WEBRTC_LEGACY
         session->frameCount = (session->apmFrameCount * session->samplingRate) /
                 session->apmSamplingRate  + 1;
+#else
+        session->frameCount = (session->apmFrameCount * session->samplingRate) /
+                session->apmSamplingRate;
+#endif
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
+#ifdef WEBRTC_LEGACY
     session->procFrame->num_channels_ = inCnl;
     session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
+#else
+    session->inputConfig.set_sample_rate_hz(session->samplingRate);
+    session->inputConfig.set_num_channels(inCnl);
+    session->outputConfig.set_sample_rate_hz(session->samplingRate);
+    session->outputConfig.set_num_channels(inCnl);
+#endif
 
     session->revChannelCount = inCnl;
+#ifdef WEBRTC_LEGACY
     session->revFrame->num_channels_ = inCnl;
     session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
+#else
+    session->revConfig.set_sample_rate_hz(session->samplingRate);
+    session->revConfig.set_num_channels(inCnl);
+#endif
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -992,6 +1478,7 @@
     session->framesOut = 0;
 
 
+#ifdef WEBRTC_LEGACY
     if (session->inResampler != NULL) {
         speex_resampler_destroy(session->inResampler);
         session->inResampler = NULL;
@@ -1043,6 +1530,7 @@
             return -EINVAL;
         }
     }
+#endif
 
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
@@ -1079,6 +1567,7 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
+#ifdef WEBRTC_LEGACY
     const webrtc::ProcessingConfig processing_config = {
        {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
         {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
@@ -1088,9 +1577,12 @@
     if (status < 0) {
         return -EINVAL;
     }
+#endif
     session->revChannelCount = inCnl;
+#ifdef WEBRTC_LEGACY
     session->revFrame->num_channels_ = inCnl;
     session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
+#endif
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1114,6 +1606,7 @@
     if (enabled) {
         if(session->enabledMsk == 0) {
             session->framesIn = 0;
+#ifdef WEBRTC_LEGACY
             if (session->inResampler != NULL) {
                 speex_resampler_reset_mem(session->inResampler);
             }
@@ -1121,13 +1614,16 @@
             if (session->outResampler != NULL) {
                 speex_resampler_reset_mem(session->outResampler);
             }
+#endif
         }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
             session->framesRev = 0;
+#ifdef WEBRTC_LEGACY
             if (session->revResampler != NULL) {
                 speex_resampler_reset_mem(session->revResampler);
             }
+#endif
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1252,6 +1748,7 @@
             return 0;
         }
 
+#ifdef WEBRTC_LEGACY
         if (session->inResampler != NULL) {
             size_t fr = session->frameCount - session->framesIn;
             if (inBuffer->frameCount < fr) {
@@ -1335,6 +1832,28 @@
         session->procFrame->samples_per_channel_ = session->apmFrameCount;
 
         effect->session->apm->ProcessStream(session->procFrame);
+#else
+        size_t fr = session->frameCount - session->framesIn;
+        if (inBuffer->frameCount < fr) {
+            fr = inBuffer->frameCount;
+        }
+        session->framesIn += fr;
+        inBuffer->frameCount = fr;
+        if (session->framesIn < session->frameCount) {
+            return 0;
+        }
+        session->framesIn = 0;
+        if (int status = effect->session->apm->ProcessStream(
+                                    (const int16_t* const)inBuffer->s16,
+                                    (const webrtc::StreamConfig)effect->session->inputConfig,
+                                    (const webrtc::StreamConfig)effect->session->outputConfig,
+                                    (int16_t* const)outBuffer->s16);
+             status != 0) {
+            ALOGE("Process Stream failed with error %d\n", status);
+            return status;
+        }
+        outBuffer->frameCount = inBuffer->frameCount;
+#endif
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
             int16_t *buf;
@@ -1350,6 +1869,7 @@
             session->outBuf = buf;
         }
 
+#ifdef WEBRTC_LEGACY
         if (session->outResampler != NULL) {
             spx_uint32_t frIn = session->apmFrameCount;
             spx_uint32_t frOut = session->frameCount;
@@ -1375,6 +1895,9 @@
             session->framesOut += session->frameCount;
         }
         size_t fr = session->framesOut;
+#else
+        fr = session->framesOut;
+#endif
         if (framesRq - framesWr < fr) {
             fr = framesRq - framesWr;
         }
@@ -1794,6 +2317,7 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
+#ifdef WEBRTC_LEGACY
         if (session->revResampler != NULL) {
             size_t fr = session->frameCount - session->framesRev;
             if (inBuffer->frameCount < fr) {
@@ -1858,6 +2382,27 @@
         }
         session->revFrame->samples_per_channel_ = session->apmFrameCount;
         effect->session->apm->AnalyzeReverseStream(session->revFrame);
+#else
+        size_t fr = session->frameCount - session->framesRev;
+        if (inBuffer->frameCount < fr) {
+            fr = inBuffer->frameCount;
+        }
+        session->framesRev += fr;
+        inBuffer->frameCount = fr;
+        if (session->framesRev < session->frameCount) {
+            return 0;
+        }
+        session->framesRev = 0;
+        if (int status = effect->session->apm->ProcessReverseStream(
+                        (const int16_t* const)inBuffer->s16,
+                        (const webrtc::StreamConfig)effect->session->revConfig,
+                        (const webrtc::StreamConfig)effect->session->revConfig,
+                        (int16_t* const)outBuffer->s16);
+             status != 0) {
+            ALOGE("Process Reverse Stream failed with error %d\n", status);
+            return status;
+        }
+#endif
         return 0;
     } else {
         return -ENODATA;
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 71f6e8f..045b0d3 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,5 +1,37 @@
 // audio preprocessing unit test
 cc_test {
+    name: "AudioPreProcessingLegacyTest",
+
+    vendor: true,
+
+    relative_install_path: "soundfx",
+
+    srcs: ["PreProcessingTest.cpp"],
+
+    shared_libs: [
+        "libaudiopreprocessing_legacy",
+        "libaudioutils",
+        "liblog",
+        "libutils",
+        "libwebrtc_audio_preprocessing",
+    ],
+
+    cflags: [
+        "-DWEBRTC_POSIX",
+        "-DWEBRTC_LEGACY",
+        "-fvisibility=default",
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+        "libhardware_headers",
+    ],
+}
+
+cc_test {
     name: "AudioPreProcessingTest",
 
     vendor: true,
@@ -13,16 +45,7 @@
         "libaudioutils",
         "liblog",
         "libutils",
-        "libwebrtc_audio_preprocessing",
     ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-    ],
-
     header_libs: [
         "libaudioeffects",
         "libhardware_headers",
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 5c81d78..3244c1f 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -14,23 +14,19 @@
  * limitations under the License.
  */
 
+#include <getopt.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <vector>
+
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
+#ifndef WEBRTC_LEGACY
+#include <audio_effects/effect_agc2.h>
+#endif
 #include <audio_effects/effect_ns.h>
-#include <audio_processing.h>
-#include <getopt.h>
-#include <hardware/audio_effect.h>
-#include <module_common_types.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <utils/Log.h>
-#include <utils/Timers.h>
-
-#include <audio_utils/channels.h>
-#include <audio_utils/primitives.h>
 #include <log/log.h>
-#include <system/audio.h>
 
 // This is the only symbol that needs to be imported
 extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
@@ -42,6 +38,9 @@
 // types of pre processing modules
 enum PreProcId {
   PREPROC_AGC,  // Automatic Gain Control
+#ifndef WEBRTC_LEGACY
+  PREPROC_AGC2,  // Automatic Gain Control 2
+#endif
   PREPROC_AEC,  // Acoustic Echo Canceler
   PREPROC_NS,   // Noise Suppressor
   PREPROC_NUM_EFFECTS
@@ -58,6 +57,12 @@
   ARG_AGC_COMP_LVL,
   ARG_AEC_DELAY,
   ARG_NS_LVL,
+#ifndef WEBRTC_LEGACY
+  ARG_AEC_MOBILE,
+  ARG_AGC2_GAIN,
+  ARG_AGC2_LVL,
+  ARG_AGC2_SAT_MGN
+#endif
 };
 
 struct preProcConfigParams_t {
@@ -66,11 +71,19 @@
   int nsLevel = 0;         // a value between 0-3
   int agcTargetLevel = 3;  // in dB
   int agcCompLevel = 9;    // in dB
+#ifndef WEBRTC_LEGACY
+  float agc2Gain = 0.f;             // in dB
+  float agc2SaturationMargin = 2.f; // in dB
+  int agc2Level = 0;                // either kRms(0) or kPeak(1)
+#endif
   int aecDelay = 0;        // in ms
 };
 
 const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
     {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // agc uuid
+#ifndef WEBRTC_LEGACY
+    {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},  // agc2 uuid
+#endif
     {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // aec uuid
     {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // ns  uuid
 };
@@ -126,14 +139,30 @@
   printf("\n           Enable Noise Suppression, default disabled");
   printf("\n     --agc");
   printf("\n           Enable Gain Control, default disabled");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --agc2");
+  printf("\n           Enable Gain Controller 2, default disabled");
+#endif
   printf("\n     --ns_lvl <ns_level>");
   printf("\n           Noise Suppression level in dB, default value 0dB");
   printf("\n     --agc_tgt_lvl <target_level>");
   printf("\n           AGC Target Level in dB, default value 3dB");
   printf("\n     --agc_comp_lvl <comp_level>");
   printf("\n           AGC Comp Level in dB, default value 9dB");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --agc2_gain <fixed_digital_gain>");
+  printf("\n           AGC Fixed Digital Gain in dB, default value 0dB");
+  printf("\n     --agc2_lvl <level_estimator>");
+  printf("\n           AGC Adaptive Digital Level Estimator, default value kRms");
+  printf("\n     --agc2_sat_mgn <saturation_margin>");
+  printf("\n           AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
+#endif
   printf("\n     --aec_delay <delay>");
   printf("\n           AEC delay value in ms, default value 0ms");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --aec_mobile");
+  printf("\n           Enable mobile mode of echo canceller, default disabled");
+#endif
   printf("\n");
 }
 
@@ -184,6 +213,9 @@
   const char *outputFile = nullptr;
   const char *farFile = nullptr;
   int effectEn[PREPROC_NUM_EFFECTS] = {0};
+#ifndef WEBRTC_LEGACY
+  int aecMobileMode = 0;
+#endif
 
   const option long_opts[] = {
       {"help", no_argument, nullptr, ARG_HELP},
@@ -194,11 +226,22 @@
       {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
       {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
       {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
+#ifndef WEBRTC_LEGACY
+      {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
+      {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
+      {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
+#endif
       {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
       {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
       {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
       {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
+#ifndef WEBRTC_LEGACY
+      {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
+#endif
       {"ns", no_argument, &effectEn[PREPROC_NS], 1},
+#ifndef WEBRTC_LEGACY
+      {"aec_mobile", no_argument, &aecMobileMode, 1},
+#endif
       {nullptr, 0, nullptr, 0},
   };
   struct preProcConfigParams_t preProcCfgParams {};
@@ -246,6 +289,20 @@
         preProcCfgParams.agcCompLevel = atoi(optarg);
         break;
       }
+#ifndef WEBRTC_LEGACY
+      case ARG_AGC2_GAIN: {
+        preProcCfgParams.agc2Gain = atof(optarg);
+        break;
+      }
+      case ARG_AGC2_LVL: {
+        preProcCfgParams.agc2Level = atoi(optarg);
+        break;
+      }
+      case ARG_AGC2_SAT_MGN: {
+        preProcCfgParams.agc2SaturationMargin = atof(optarg);
+        break;
+      }
+#endif
       case ARG_AEC_DELAY: {
         preProcCfgParams.aecDelay = atoi(optarg);
         break;
@@ -342,6 +399,31 @@
       return EXIT_FAILURE;
     }
   }
+#ifndef WEBRTC_LEGACY
+  if (effectEn[PREPROC_AGC2]) {
+    if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
+                                           (float)preProcCfgParams.agc2Gain,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Fixed Digital Gain. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+    if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
+                                           (uint32_t)preProcCfgParams.agc2Level,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Level Estimator. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+    if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
+                                           (float)preProcCfgParams.agc2SaturationMargin,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Saturation Margin. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+  }
+#endif
   if (effectEn[PREPROC_NS]) {
     if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
                                            effectHandle[PREPROC_NS]);
@@ -350,6 +432,16 @@
       return EXIT_FAILURE;
     }
   }
+#ifndef WEBRTC_LEGACY
+  if (effectEn[PREPROC_AEC]) {
+    if (int status = preProcSetConfigParam(AEC_PARAM_MOBILE_MODE, (uint32_t)aecMobileMode,
+                                           effectHandle[PREPROC_AEC]);
+        status != 0) {
+      ALOGE("Invalid AEC mobile mode value %d\n", status);
+      return EXIT_FAILURE;
+    }
+  }
+#endif
 
   // Process Call
   const int frameLength = (int)(preProcCfgParams.samplingFreq * kTenMilliSecVal);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 83da092..9533ae5 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -146,7 +146,9 @@
     }
 
     // Close socket before posting message to RTSPSource message handler.
-    close(mHandler->getARTSPConnection()->getSocket());
+    if (mHandler != NULL) {
+        close(mHandler->getARTSPConnection()->getSocket());
+    }
 
     sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c38de64..69084bf 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -637,6 +637,9 @@
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
       mDequeueOutputReplyID(0),
+      mTunneledInputWidth(0),
+      mTunneledInputHeight(0),
+      mTunneled(false),
       mHaveInputSurface(false),
       mHavePendingInputBuffers(false),
       mCpuBoostRequested(false),
@@ -2951,6 +2954,14 @@
 
             extractCSD(format);
 
+            int32_t tunneled;
+            if (format->findInt32("feature-tunneled-playback", &tunneled) && tunneled != 0) {
+                ALOGI("Configuring TUNNELED video playback.");
+                mTunneled = true;
+            } else {
+                mTunneled = false;
+            }
+
             mCodec->initiateConfigureComponent(format);
             break;
         }
@@ -3930,7 +3941,18 @@
     if (hasCryptoOrDescrambler() && !c2Buffer && !memory) {
         AString *errorDetailMsg;
         CHECK(msg->findPointer("errorDetailMsg", (void **)&errorDetailMsg));
-
+        // Notify mCrypto of video resolution changes
+        if (mTunneled && mCrypto != NULL) {
+            int32_t width, height;
+            if (mInputFormat->findInt32("width", &width) &&
+                mInputFormat->findInt32("height", &height) && width > 0 && height > 0) {
+                if (width != mTunneledInputWidth || height != mTunneledInputHeight) {
+                    mTunneledInputWidth = width;
+                    mTunneledInputHeight = height;
+                    mCrypto->notifyResolution(width, height);
+                }
+            }
+        }
         err = mBufferChannel->queueSecureInputBuffer(
                 buffer,
                 (mFlags & kFlagIsSecure),
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index a4836cd..4705e4a 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -418,6 +418,10 @@
 
     sp<ICrypto> mCrypto;
 
+    int32_t mTunneledInputWidth;
+    int32_t mTunneledInputHeight;
+    bool mTunneled;
+
     sp<IDescrambler> mDescrambler;
 
     List<sp<ABuffer> > mCSD;