Replace MemoryDealer in AudioFlinger

MemoryDealer can cause internal fragmentation and provides
somewhat arbitrary memory limitations for clients of AudioFlinger.

Replace with composable allocator allowing for flexible allocation
policies. Return deallocated pages to kernel.

Test: atest shared_memory_allocator_tests, atest AudioTrackTest, atest
AudioRecordTest

Bug: 139061005
Change-Id: Ifde23c6024b2ad11ddf1960b572a91e8eedfdb79
diff --git a/media/utils/include/mediautils/SharedMemoryAllocator.h b/media/utils/include/mediautils/SharedMemoryAllocator.h
new file mode 100644
index 0000000..cf3a662
--- /dev/null
+++ b/media/utils/include/mediautils/SharedMemoryAllocator.h
@@ -0,0 +1,470 @@
+/*
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#pragma once
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <iomanip>
+#include <limits>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <log/log_main.h>
+#include <utils/StrongPointer.h>
+
+namespace std {
+template <typename T>
+struct hash<::android::wp<T>> {
+    size_t operator()(const ::android::wp<T>& x) const {
+        return std::hash<const T*>()(x.unsafe_get());
+    }
+};
+}  // namespace std
+
+namespace android::mediautils {
+
+// Allocations represent owning handles to a region of shared memory (and thus
+// should not be copied in order to fulfill RAII).
+// To share ownership between multiple objects, a
+// ref-counting solution such as sp or shared ptr is appropriate, so the dtor
+// is called once for a particular block of memory.
+
+using AllocationType = ::android::sp<IMemory>;
+using WeakAllocationType = ::android::wp<IMemory>;
+
+namespace shared_allocator_impl {
+constexpr inline size_t roundup(size_t size, size_t pageSize) {
+    LOG_ALWAYS_FATAL_IF(pageSize == 0 || (pageSize & (pageSize - 1)) != 0,
+                        "Page size not multiple of 2");
+    return ((size + pageSize - 1) & ~(pageSize - 1));
+}
+
+constexpr inline bool isHeapValid(const sp<IMemoryHeap>& heap) {
+    return (heap && heap->getBase() &&
+            heap->getBase() != MAP_FAILED);  // TODO if not mapped locally
+}
+
+template <typename, typename = void>
+static constexpr bool has_deallocate_all = false;
+
+template <typename T>
+static constexpr bool has_deallocate_all<
+        T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().deallocate_all()), void>,
+                            void>> = true;
+
+template <typename, typename = void>
+static constexpr bool has_owns = false;
+
+template <typename T>
+static constexpr bool
+        has_owns<T, std::enable_if_t<std::is_same_v<decltype(std::declval<T>().owns(
+                                                            std::declval<const AllocationType>())),
+                                                    bool>,
+                                     void>> = true;
+
+template <typename, typename = void>
+static constexpr bool has_dump = false;
+
+template <typename T>
+static constexpr bool has_dump<
+        T,
+        std::enable_if_t<std::is_same_v<decltype(std::declval<T>().dump()), std::string>, void>> =
+        true;
+
+}  // namespace shared_allocator_impl
+
+struct BasicAllocRequest {
+    size_t size;
+};
+struct NamedAllocRequest : public BasicAllocRequest {
+    std::string_view name;
+};
+
+// We are required to add a layer of indirection to hold a handle to the actual
+// block due to sp<> being unable to be created from an object once its
+// ref-count has dropped to zero. So, we have to hold onto an extra reference
+// here. We effectively want to know when the refCount of the object drops to
+// one, since we need to hold on to a reference to pass the object to interfaces
+// requiring an sp<>.
+// TODO is there some way to avoid paying this cost?
+template <typename Allocator>
+class ScopedAllocator;
+template <typename AllocationT, typename AllocatorHandleType>
+class ScopedAllocation : public BnMemory {
+  public:
+    template <typename T>
+    friend class ScopedAllocator;
+    ScopedAllocation(const AllocationT& allocation, const AllocatorHandleType& handle)
+        : mAllocation(allocation), mHandle(handle) {}
+
+    // Defer the implementation to the underlying mAllocation
+
+    virtual sp<IMemoryHeap> getMemory(ssize_t* offset = nullptr,
+                                      size_t* size = nullptr) const override {
+        return mAllocation->getMemory(offset, size);
+    }
+
+  private:
+    ~ScopedAllocation() override { mHandle->deallocate(mAllocation); }
+
+    const AllocationT mAllocation;
+    const AllocatorHandleType mHandle;
+};
+
+// Allocations are only deallocated when going out of scope.
+// This should almost always be the outermost allocator.
+template <typename Allocator>
+class ScopedAllocator {
+  public:
+    using HandleT = std::shared_ptr<Allocator>;
+    static constexpr size_t alignment() { return Allocator::alignment(); }
+
+    explicit ScopedAllocator(const std::shared_ptr<Allocator>& allocator) : mAllocator(allocator) {}
+
+    ScopedAllocator() : mAllocator(std::make_shared<Allocator>()) {}
+
+    template <typename T>
+    auto allocate(T&& request) {
+        const auto allocation = mAllocator->allocate(std::forward<T>(request));
+        if (!allocation) {
+            return sp<ScopedAllocation<AllocationType, HandleT>>{};
+        }
+        return sp<ScopedAllocation<AllocationType, HandleT>>::make(allocation, mAllocator);
+    }
+
+    // Deallocate and deallocate_all are implicitly unsafe due to double
+    // deallocates upon ScopedAllocation destruction. We can protect against this
+    // efficiently with a gencount (for deallocate_all) or inefficiently (for
+    // deallocate) but we choose not to
+    //
+    // Owns is only safe to pseudo-impl due to static cast reqs
+    template <typename Enable = bool>
+    auto owns(const sp<ScopedAllocation<AllocationType, HandleT>>& allocation) const
+            -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
+        return mAllocator->owns(allocation->mAllocation);
+    }
+
+    template <typename Enable = std::string>
+    auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
+        return mAllocator->dump();
+    }
+
+  private:
+    // We store a shared pointer in order to ensure that the allocator outlives
+    // allocations (which call back to become dereferenced).
+    const HandleT mAllocator;
+};
+
+// A simple policy for PolicyAllocator which enforces a pool size and an allocation
+// size range.
+template <size_t PoolSize, size_t MinAllocSize = 0,
+          size_t MaxAllocSize = std::numeric_limits<size_t>::max()>
+class SizePolicy {
+    static_assert(PoolSize > 0);
+
+  public:
+    template <typename T>
+    bool isValid(T&& request) const {
+        static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
+        return !(request.size > kMaxAllocSize || request.size < kMinAllocSize ||
+                 mPoolSize + request.size > kPoolSize);
+    }
+
+    void allocated(const AllocationType& alloc) { mPoolSize += alloc->size(); }
+
+    void deallocated(const AllocationType& alloc) { mPoolSize -= alloc->size(); }
+
+    void deallocated_all() { mPoolSize = 0; }
+
+    static constexpr size_t kPoolSize = PoolSize;
+    static constexpr size_t kMinAllocSize = MinAllocSize;
+    static constexpr size_t kMaxAllocSize = MaxAllocSize;
+
+  private:
+    size_t mPoolSize = 0;
+};
+
+// An allocator which accepts or rejects allocation requests by a parametrized
+// policy (which can carry state).
+template <typename Allocator, typename Policy>
+class PolicyAllocator {
+  public:
+    static constexpr size_t alignment() { return Allocator::alignment(); }
+
+    PolicyAllocator(Allocator allocator, Policy policy)
+        : mAllocator(allocator), mPolicy(std::move(policy)) {}
+
+    // Default initialize the allocator and policy
+    PolicyAllocator() = default;
+
+    template <typename T>
+    AllocationType allocate(T&& request) {
+        static_assert(std::is_base_of_v<android::mediautils::BasicAllocRequest, std::decay_t<T>>);
+        request.size = shared_allocator_impl::roundup(request.size, alignment());
+        if (!mPolicy.isValid(request)) {
+            return {};
+        }
+        AllocationType val = mAllocator.allocate(std::forward<T>(request));
+        if (val == nullptr) return val;
+        mPolicy.allocated(val);
+        return val;
+    }
+
+    void deallocate(const AllocationType& allocation) {
+        if (!allocation) return;
+        mPolicy.deallocated(allocation);
+        mAllocator.deallocate(allocation);
+    }
+
+    template <typename Enable = void>
+    auto deallocate_all()
+            -> std::enable_if_t<shared_allocator_impl::has_deallocate_all<Allocator>, Enable> {
+        mAllocator.deallocate_all();
+        mPolicy.deallocated_all();
+    }
+
+    template <typename Enable = bool>
+    auto owns(const AllocationType& allocation) const
+            -> std::enable_if_t<shared_allocator_impl::has_owns<Allocator>, Enable> {
+        return mAllocator.owns(allocation);
+    }
+
+    template <typename Enable = std::string>
+    auto dump() const -> std::enable_if_t<shared_allocator_impl::has_dump<Allocator>, Enable> {
+        return mAllocator.dump();
+    }
+
+  private:
+    [[no_unique_address]] Allocator mAllocator;
+    [[no_unique_address]] Policy mPolicy;
+};
+
+// An allocator which keeps track of outstanding allocations for logging and
+// querying ownership.
+template <class Allocator>
+class SnoopingAllocator {
+  public:
+    struct AllocationData {
+        std::string name;
+        size_t allocation_number;
+    };
+    static constexpr size_t alignment() { return Allocator::alignment(); }
+
+    SnoopingAllocator(Allocator allocator, std::string_view name)
+        : mName(name), mAllocator(std::move(allocator)) {}
+
+    explicit SnoopingAllocator(std::string_view name) : mName(name), mAllocator(Allocator{}) {}
+
+    explicit SnoopingAllocator(Allocator allocator) : mAllocator(std::move(allocator)) {}
+
+    // Default construct allocator and name
+    SnoopingAllocator() = default;
+
+    template <typename T>
+    AllocationType allocate(T&& request) {
+        static_assert(std::is_base_of_v<NamedAllocRequest, std::decay_t<T>>);
+        AllocationType allocation = mAllocator.allocate(request);
+        if (allocation)
+            mAllocations.insert({WeakAllocationType{allocation},
+                                 {std::string{request.name}, mAllocationNumber++}});
+        return allocation;
+    }
+
+    void deallocate(const AllocationType& allocation) {
+        if (!allocation) return;
+        mAllocations.erase(WeakAllocationType{allocation});
+        mAllocator.deallocate(allocation);
+    }
+
+    void deallocate_all() {
+        if constexpr (shared_allocator_impl::has_deallocate_all<Allocator>) {
+            mAllocator.deallocate_all();
+        } else {
+            for (auto& [mem, value] : mAllocations) {
+                mAllocator.deallocate(mem);
+            }
+        }
+        mAllocations.clear();
+    }
+
+    bool owns(const AllocationType& allocation) const {
+        return (mAllocations.count(WeakAllocationType{allocation}) > 0);
+    }
+
+    std::string dump() const {
+        std::ostringstream dump;
+        dump << mName << " Allocator Dump:\n";
+        dump << std::setw(8) << "HeapID" << std::setw(8) << "Size" << std::setw(8) << "Offset"
+             << std::setw(8) << "Order"
+             << "   Name\n";
+        for (auto& [mem, value] : mAllocations) {
+            // TODO Imem size and offset
+            const AllocationType handle = mem.promote();
+            if (!handle) {
+                dump << "Invalid memory lifetime!";
+                continue;
+            }
+            const auto heap = handle->getMemory();
+            dump << std::setw(8) << heap->getHeapID() << std::setw(8) << heap->getOffset()
+                 << std::setw(8) << heap->getSize() << std::setw(8) << value.allocation_number
+                 << "   " << value.name << "\n";
+        }
+        return dump.str();
+    }
+
+    const std::unordered_map<WeakAllocationType, AllocationData>& getAllocations() {
+        return mAllocations;
+    }
+
+  private:
+    const std::string mName;
+    [[no_unique_address]] Allocator mAllocator;
+    // We don't take copies of the underlying information in an allocation,
+    // rather, the allocation information is put on the heap and referenced via
+    // a ref-counted solution. So, the address of the allocation information is
+    // appropriate to hash. In order for this block to be freed, the underlying
+    // allocation must be referenced by no one (thus deallocated).
+    std::unordered_map<WeakAllocationType, AllocationData> mAllocations;
+    // For debugging purposes, monotonic
+    size_t mAllocationNumber = 0;
+};
+
+// An allocator which passes a failed allocation request to a backup allocator.
+template <class PrimaryAllocator, class SecondaryAllocator>
+class FallbackAllocator {
+  public:
+    static_assert(PrimaryAllocator::alignment() == SecondaryAllocator::alignment());
+    static_assert(shared_allocator_impl::has_owns<PrimaryAllocator>);
+
+    static constexpr size_t alignment() { return PrimaryAllocator::alignment(); }
+
+    FallbackAllocator(const PrimaryAllocator& primary, const SecondaryAllocator& secondary)
+        : mPrimary(primary), mSecondary(secondary) {}
+
+    // Default construct primary and secondary allocator
+    FallbackAllocator() = default;
+
+    template <typename T>
+    AllocationType allocate(T&& request) {
+        AllocationType allocation = mPrimary.allocate(std::forward<T>(request));
+        if (!allocation) allocation = mSecondary.allocate(std::forward<T>(request));
+        return allocation;
+    }
+
+    void deallocate(const AllocationType& allocation) {
+        if (!allocation) return;
+        if (mPrimary.owns(allocation)) {
+            mPrimary.deallocate(allocation);
+        } else {
+            mSecondary.deallocate(allocation);
+        }
+    }
+
+    template <typename Enable = void>
+    auto deallocate_all() -> std::enable_if_t<
+            shared_allocator_impl::has_deallocate_all<PrimaryAllocator> &&
+                    shared_allocator_impl::has_deallocate_all<SecondaryAllocator>,
+            Enable> {
+        mPrimary.deallocate_all();
+        mSecondary.deallocate_all();
+    }
+
+    template <typename Enable = bool>
+    auto owns(const AllocationType& allocation) const
+            -> std::enable_if_t<shared_allocator_impl::has_owns<SecondaryAllocator>, Enable> {
+        return mPrimary.owns(allocation) || mSecondary.owns(allocation);
+    }
+
+    template <typename Enable = std::string>
+    auto dump() const
+            -> std::enable_if_t<shared_allocator_impl::has_dump<PrimaryAllocator> &&
+                                        shared_allocator_impl::has_dump<SecondaryAllocator>,
+                                Enable> {
+        return std::string("Primary: \n") + mPrimary.dump() + std::string("Secondary: \n") +
+               mSecondary.dump();
+    }
+
+  private:
+    [[no_unique_address]] PrimaryAllocator mPrimary;
+    [[no_unique_address]] SecondaryAllocator mSecondary;
+};
+
+// An allocator which is backed by a shared_ptr to an allocator, so multiple
+// allocators can share the same backing allocator (and thus the same state).
+template <typename Allocator>
+class IndirectAllocator {
+  public:
+    static constexpr size_t alignment() { return Allocator::alignment(); }
+
+    explicit IndirectAllocator(const std::shared_ptr<Allocator>& allocator)
+        : mAllocator(allocator) {}
+
+    template <typename T>
+    AllocationType allocate(T&& request) {
+        return mAllocator->allocate(std::forward<T>(request));
+    }
+
+    void deallocate(const AllocationType& allocation) {
+        if (!allocation) return;
+        mAllocator->deallocate(allocation);
+    }
+
+    // We can't implement deallocate_all/dump/owns, since we may not be the only allocator with
+    // access to the underlying allocator (making it not well-defined). If these
+    // methods are necesesary, we need to wrap with a snooping allocator.
+  private:
+    const std::shared_ptr<Allocator> mAllocator;
+};
+
+// Stateless. This allocator allocates full page-aligned MemoryHeapBases (backed by
+// a shared memory mapped anonymous file) as allocations.
+class MemoryHeapBaseAllocator {
+  public:
+    static constexpr size_t alignment() { return 4096; /* PAGE_SIZE */ }
+    static constexpr unsigned FLAGS = 0;  // default flags
+
+    template <typename T>
+    AllocationType allocate(T&& request) {
+        static_assert(std::is_base_of_v<BasicAllocRequest, std::decay_t<T>>);
+        auto heap =
+                sp<MemoryHeapBase>::make(shared_allocator_impl::roundup(request.size, alignment()));
+        if (!shared_allocator_impl::isHeapValid(heap)) {
+            return {};
+        }
+        return sp<MemoryBase>::make(heap, 0, heap->getSize());
+    }
+
+    // Passing a block not allocated by a HeapAllocator is undefined.
+    void deallocate(const AllocationType& allocation) {
+        if (!allocation) return;
+        const auto heap = allocation->getMemory();
+        if (!heap) return;
+        // This causes future mapped accesses (even across process boundaries)
+        // to receive SIGBUS.
+        ftruncate(heap->getHeapID(), 0);
+        // This static cast is safe, since as long as the block was originally
+        // allocated by us, the underlying IMemoryHeap was a MemoryHeapBase
+        static_cast<MemoryHeapBase&>(*heap).dispose();
+    }
+};
+}  // namespace android::mediautils
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index 48d18b0..4fcfdcd 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -35,6 +35,7 @@
     host_supported: true,
 
     shared_libs: [
+        "libbinder",
         "liblog",
         "libutils",
     ],
@@ -212,3 +213,11 @@
         "inplace_function_tests.cpp"
     ],
 }
+
+cc_test {
+    name: "shared_memory_allocator_tests",
+    defaults: ["libmediautils_tests_defaults"],
+    srcs: [
+        "shared_memory_allocator_tests.cpp",
+    ],
+}
diff --git a/media/utils/tests/shared_memory_allocator_tests.cpp b/media/utils/tests/shared_memory_allocator_tests.cpp
new file mode 100644
index 0000000..11bc72a
--- /dev/null
+++ b/media/utils/tests/shared_memory_allocator_tests.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "shared_memory_allocator_tests"
+
+#include <gtest/gtest.h>
+#include <mediautils/SharedMemoryAllocator.h>
+#include <sys/stat.h>
+#include <utils/Log.h>
+
+using namespace android;
+using namespace android::mediautils;
+
+namespace {
+void validate_block(const AllocationType& block) {
+    ASSERT_TRUE(block != nullptr);
+    memset(block->unsecurePointer(), 10, 4096);
+    EXPECT_EQ(*(static_cast<char*>(block->unsecurePointer()) + 100), static_cast<char>(10));
+}
+
+template <size_t N = 0, bool FatalOwn = true>
+struct ValidateForwarding {
+    static constexpr size_t alignment() { return 1337; }
+
+    bool owns(const AllocationType& allocation) const {
+        if (allocation == owned) return true;
+        if constexpr (FatalOwn) {
+            LOG_ALWAYS_FATAL_IF(allocation != not_owned, "Invalid allocation passed to allocator");
+        }
+        return false;
+    }
+
+    void deallocate_all() { deallocate_all_count++; }
+    std::string dump() const { return dump_string; }
+
+    static inline size_t deallocate_all_count = 0;
+    static inline const AllocationType owned =
+            MemoryHeapBaseAllocator().allocate(BasicAllocRequest{4096});
+    static inline const AllocationType not_owned =
+            MemoryHeapBaseAllocator().allocate(BasicAllocRequest{4096});
+    static inline const std::string dump_string = std::to_string(N) + "Test Dump Forwarding";
+};
+
+};  // namespace
+static_assert(shared_allocator_impl::has_owns<MemoryHeapBaseAllocator> == false);
+static_assert(shared_allocator_impl::has_dump<MemoryHeapBaseAllocator> == false);
+static_assert(shared_allocator_impl::has_deallocate_all<MemoryHeapBaseAllocator> == false);
+static_assert(shared_allocator_impl::has_owns<SnoopingAllocator<MemoryHeapBaseAllocator>> == true);
+static_assert(shared_allocator_impl::has_dump<SnoopingAllocator<MemoryHeapBaseAllocator>> == true);
+static_assert(
+        shared_allocator_impl::has_deallocate_all<SnoopingAllocator<MemoryHeapBaseAllocator>> ==
+        true);
+static_assert(
+        shared_allocator_impl::has_owns<
+                PolicyAllocator<SnoopingAllocator<MemoryHeapBaseAllocator>, SizePolicy<4096>>> ==
+        true);
+static_assert(
+        shared_allocator_impl::has_dump<
+                PolicyAllocator<SnoopingAllocator<MemoryHeapBaseAllocator>, SizePolicy<4096>>> ==
+        true);
+static_assert(
+        shared_allocator_impl::has_deallocate_all<
+                PolicyAllocator<SnoopingAllocator<MemoryHeapBaseAllocator>, SizePolicy<4096>>> ==
+        true);
+static_assert(shared_allocator_impl::has_owns<
+                      FallbackAllocator<SnoopingAllocator<MemoryHeapBaseAllocator>,
+                                        SnoopingAllocator<MemoryHeapBaseAllocator>>> == true);
+
+TEST(shared_memory_allocator_tests, roundup) {
+    using namespace shared_allocator_impl;
+    EXPECT_EQ(roundup(1023, 1024), 1024ul);
+    EXPECT_EQ(roundup(1024, 1024), 1024ul);
+    EXPECT_EQ(roundup(1025, 1024), 2048ul);
+    EXPECT_DEATH(roundup(1023, 1023), "");
+    EXPECT_DEATH(roundup(1023, 0), "");
+}
+
+TEST(shared_memory_allocator_tests, mheapbase_allocator) {
+    MemoryHeapBaseAllocator allocator;
+    const auto memory = allocator.allocate(BasicAllocRequest{500});
+    ASSERT_TRUE(memory != nullptr);
+    const auto fd = dup(memory->getMemory()->getHeapID());
+    EXPECT_EQ(memory->size(), static_cast<unsigned>(4096));
+    EXPECT_EQ(memory->size(), memory->getMemory()->getSize());
+    validate_block(memory);
+    allocator.deallocate(memory);
+    // Ensures we have closed the fd
+    EXPECT_EQ(memory->unsecurePointer(), nullptr);
+    EXPECT_EQ(memory->getMemory()->getBase(), nullptr);
+    struct stat st;
+    const auto err = fstat(fd, &st);
+    EXPECT_EQ(err, 0);
+    // Ensure we reclaim pages (overly-zealous)
+    EXPECT_EQ(st.st_size, 0);
+}
+
+TEST(shared_memory_allocator_tests, mheapbase_allocator_independence) {
+    static_assert(MemoryHeapBaseAllocator::alignment() == 4096);
+    MemoryHeapBaseAllocator allocator;
+    const auto first_memory = allocator.allocate(BasicAllocRequest{500});
+    const auto second_memory = allocator.allocate(BasicAllocRequest{500});
+    ASSERT_TRUE(first_memory != nullptr && second_memory != nullptr);
+    EXPECT_NE(first_memory->getMemory()->getHeapID(), second_memory->getMemory()->getHeapID());
+    allocator.deallocate(first_memory);
+    validate_block(second_memory);
+    allocator.deallocate(second_memory);
+}
+
+TEST(shared_memory_allocator_tests, snooping_allocator) {
+    static_assert(SnoopingAllocator<ValidateForwarding<0>>::alignment() ==
+                  ValidateForwarding<0>::alignment());
+
+    SnoopingAllocator<MemoryHeapBaseAllocator> allocator{"allocator"};
+    const auto first_memory = allocator.allocate(NamedAllocRequest{{500}, "allocate_1"});
+    auto second_memory = first_memory;
+    {
+        const auto tmp = allocator.allocate(NamedAllocRequest{{5000}, "allocate_2"});
+        // Test copying handle around
+        second_memory = tmp;
+    }
+    ASSERT_TRUE(first_memory && second_memory);
+    EXPECT_TRUE(allocator.owns(first_memory) && allocator.owns(second_memory));
+    const auto first_allocations = allocator.getAllocations();
+    EXPECT_EQ(first_allocations.size(), 2ull);
+    for (const auto& [key, val] : allocator.getAllocations()) {
+        if (val.allocation_number == 0) {
+            EXPECT_EQ(val.name, "allocate_1");
+            EXPECT_TRUE(first_memory == key);
+        }
+        if (val.allocation_number == 1) {
+            EXPECT_EQ(val.name, "allocate_2");
+            EXPECT_TRUE(second_memory == key);
+        }
+    }
+    // TODO test dump and deallocate forwarding
+    // EXPECT_EQ(allocator.dump(), std::string{});
+    validate_block(second_memory);
+    allocator.deallocate(second_memory);
+    EXPECT_EQ(second_memory->unsecurePointer(), nullptr);
+    EXPECT_FALSE(allocator.owns(second_memory));
+    EXPECT_TRUE(allocator.owns(first_memory));
+    const auto second_allocations = allocator.getAllocations();
+    EXPECT_EQ(second_allocations.size(), 1ul);
+    for (const auto& [key, val] : second_allocations) {
+        EXPECT_EQ(val.name, "allocate_1");
+        EXPECT_TRUE(first_memory == key);
+    }
+    // EXPECT_EQ(allocator.dump(), std::string{});
+    // TODO test deallocate_all O(1)
+}
+
+// TODO generic policy test
+TEST(shared_memory_allocator_tests, size_policy_allocator_enforcement) {
+    PolicyAllocator allocator{MemoryHeapBaseAllocator{},
+                              SizePolicy<4096 * 7, 4096 * 2, 4096 * 4>{}};
+    // Violate max size
+    EXPECT_TRUE(allocator.allocate(BasicAllocRequest{4096 * 5}) == nullptr);
+    // Violate min alloc size
+    EXPECT_TRUE(allocator.allocate(BasicAllocRequest{4096}) == nullptr);
+    const auto first_memory = allocator.allocate(BasicAllocRequest{4096 * 4});
+    validate_block(first_memory);
+    // Violate pool size
+    EXPECT_TRUE(allocator.allocate(BasicAllocRequest{4096 * 4}) == nullptr);
+    const auto second_memory = allocator.allocate(BasicAllocRequest{4096 * 3});
+    validate_block(second_memory);
+    allocator.deallocate(second_memory);
+    // Check pool size update after deallocation
+    const auto new_second_memory = allocator.allocate(BasicAllocRequest{4096 * 2});
+    validate_block(new_second_memory);
+}
+
+TEST(shared_memory_allocator_tests, indirect_allocator) {
+    static_assert(IndirectAllocator<ValidateForwarding<0>>::alignment() ==
+                  ValidateForwarding<0>::alignment());
+    const auto allocator_handle = std::make_shared<SnoopingAllocator<MemoryHeapBaseAllocator>>();
+    IndirectAllocator allocator{allocator_handle};
+    const auto memory = allocator.allocate(NamedAllocRequest{{4096}, "allocation"});
+    EXPECT_TRUE(allocator_handle->owns(memory));
+    EXPECT_TRUE(allocator_handle->getAllocations().size() == 1);
+    allocator.deallocate(memory);
+    EXPECT_FALSE(allocator_handle->owns(memory));
+    EXPECT_TRUE(allocator_handle->getAllocations().size() == 0);
+}
+
+TEST(shared_memory_allocator_tests, policy_allocator_forwarding) {
+    // Test appropriate forwarding of allocator, deallocate
+    const auto primary_allocator =
+            std::make_shared<SnoopingAllocator<MemoryHeapBaseAllocator>>("allocator");
+    PolicyAllocator allocator{IndirectAllocator(primary_allocator), SizePolicy<4096>{}};
+    const auto memory = allocator.allocate(NamedAllocRequest{{4096}, "allocation"});
+    EXPECT_TRUE(primary_allocator->owns(memory));
+    const auto& allocations = primary_allocator->getAllocations();
+    EXPECT_TRUE(allocations.size() == 1);
+    allocator.deallocate(memory);
+    EXPECT_TRUE(allocations.size() == 0);
+    const auto memory2 = allocator.allocate(NamedAllocRequest{{4096}, "allocation_2"});
+    EXPECT_TRUE(allocations.size() == 1);
+    EXPECT_TRUE(primary_allocator->owns(memory2));
+    allocator.deallocate(memory2);
+    EXPECT_FALSE(primary_allocator->owns(memory2));
+    EXPECT_TRUE(allocations.size() == 0);
+    // Test appropriate forwarding of own, dump, alignment, deallocate_all
+    PolicyAllocator allocator2{ValidateForwarding<0>{}, SizePolicy<4096>{}};
+    EXPECT_TRUE(allocator2.owns(ValidateForwarding<0>::owned));
+    EXPECT_FALSE(allocator2.owns(ValidateForwarding<0>::not_owned));
+    EXPECT_TRUE(allocator2.dump().find(ValidateForwarding<0>::dump_string) != std::string::npos);
+    static_assert(decltype(allocator2)::alignment() == ValidateForwarding<0>::alignment());
+    size_t prev = ValidateForwarding<0>::deallocate_all_count;
+    allocator2.deallocate_all();
+    EXPECT_EQ(ValidateForwarding<0>::deallocate_all_count, prev + 1);
+}
+
+TEST(shared_memory_allocator_tests, snooping_allocator_nullptr) {
+    SnoopingAllocator allocator{PolicyAllocator{MemoryHeapBaseAllocator{}, SizePolicy<4096 * 2>{}}};
+    const auto memory = allocator.allocate(NamedAllocRequest{{3000}, "allocation_1"});
+    validate_block(memory);
+    ASSERT_TRUE(allocator.allocate(NamedAllocRequest{{5000}, "allocation_2"}) == nullptr);
+    const auto& allocations = allocator.getAllocations();
+    EXPECT_EQ(allocations.size(), 1ul);
+    for (const auto& [key, val] : allocations) {
+        EXPECT_EQ(val.name, "allocation_1");
+        EXPECT_EQ(val.allocation_number, 0ul);
+        EXPECT_TRUE(key == memory);
+    }
+}
+
+TEST(shared_memory_allocator_tests, fallback_allocator) {
+    // Construct Fallback Allocator
+    const auto primary_allocator = std::make_shared<
+            SnoopingAllocator<PolicyAllocator<MemoryHeapBaseAllocator, SizePolicy<4096>>>>(
+            PolicyAllocator<MemoryHeapBaseAllocator, SizePolicy<4096>>{}, "primary_allocator");
+    const auto secondary_allocator =
+            std::make_shared<SnoopingAllocator<MemoryHeapBaseAllocator>>("secondary_allocator");
+
+    FallbackAllocator fallback_allocator{SnoopingAllocator{IndirectAllocator{primary_allocator}},
+                                         SnoopingAllocator{IndirectAllocator{secondary_allocator}}};
+    static_assert(decltype(fallback_allocator)::alignment() == 4096);
+    // Basic Allocation Test
+    const auto memory = fallback_allocator.allocate(NamedAllocRequest{{3000}, "allocation_1"});
+    validate_block(memory);
+    // Correct allocator selected
+    EXPECT_TRUE(fallback_allocator.owns(memory));
+    EXPECT_TRUE(primary_allocator->owns(memory));
+    EXPECT_FALSE(secondary_allocator->owns(memory));
+    // Test fallback allocation
+    const auto memory2 = fallback_allocator.allocate(NamedAllocRequest{{3000}, "allocation_2"});
+    validate_block(memory2);
+    // Correct allocator selected
+    EXPECT_TRUE(fallback_allocator.owns(memory2));
+    EXPECT_FALSE(primary_allocator->owns(memory2));
+    EXPECT_TRUE(secondary_allocator->owns(memory2));
+    // Allocations ended up in the correct allocators
+    const auto& primary_allocations = primary_allocator->getAllocations();
+    EXPECT_TRUE(primary_allocations.size() == 1ul);
+    ASSERT_TRUE(primary_allocations.find(memory) != primary_allocations.end());
+    EXPECT_EQ(primary_allocations.find(memory)->second.name, std::string{"allocation_1"});
+    const auto& secondary_allocations = secondary_allocator->getAllocations();
+    EXPECT_TRUE(secondary_allocations.size() == 1ul);
+    ASSERT_TRUE(secondary_allocations.find(memory2) != secondary_allocations.end());
+    EXPECT_EQ(secondary_allocations.find(memory2)->second.name, std::string{"allocation_2"});
+    // Test deallocate appropriate forwarding
+    fallback_allocator.deallocate(memory);
+    EXPECT_TRUE(primary_allocator->getAllocations().size() == 0ul);
+    EXPECT_TRUE(secondary_allocator->getAllocations().size() == 1ul);
+    // Appropriate fallback after deallocation
+    const auto memory3 = fallback_allocator.allocate(NamedAllocRequest{{3000}, "allocation_3"});
+    EXPECT_TRUE(fallback_allocator.owns(memory3));
+    EXPECT_TRUE(primary_allocator->owns(memory3));
+    EXPECT_FALSE(secondary_allocator->owns(memory3));
+    EXPECT_TRUE(primary_allocator->getAllocations().size() == 1ul);
+    // Test deallocate appropriate forwarding
+    EXPECT_TRUE(secondary_allocator->getAllocations().size() == 1ul);
+    fallback_allocator.deallocate(memory2);
+    EXPECT_TRUE(secondary_allocator->getAllocations().size() == 0ul);
+    const auto memory4 = fallback_allocator.allocate(NamedAllocRequest{{3000}, "allocation_4"});
+    EXPECT_TRUE(fallback_allocator.owns(memory4));
+    EXPECT_FALSE(primary_allocator->owns(memory4));
+    EXPECT_TRUE(secondary_allocator->owns(memory4));
+    // Allocations ended up in the correct allocators
+    EXPECT_TRUE(primary_allocator->getAllocations().size() == 1ul);
+    EXPECT_TRUE(secondary_allocator->getAllocations().size() == 1ul);
+    ASSERT_TRUE(primary_allocations.find(memory3) != primary_allocations.end());
+    EXPECT_EQ(primary_allocations.find(memory3)->second.name, std::string{"allocation_3"});
+    ASSERT_TRUE(secondary_allocations.find(memory4) != secondary_allocations.end());
+    EXPECT_EQ(secondary_allocations.find(memory4)->second.name, std::string{"allocation_4"});
+}
+
+TEST(shared_memory_allocator_tests, fallback_allocator_forwarding) {
+    // Test forwarding
+    using Alloc1 = ValidateForwarding<0, false>;
+    using Alloc2 = ValidateForwarding<1, false>;
+    FallbackAllocator forward_test{Alloc1{}, Alloc2{}};
+    EXPECT_TRUE(forward_test.dump().find(Alloc1::dump_string) != std::string::npos);
+    EXPECT_TRUE(forward_test.dump().find(Alloc2::dump_string) != std::string::npos);
+    // Test owned forwarding
+    EXPECT_TRUE(forward_test.owns(Alloc1::owned));
+    EXPECT_TRUE(forward_test.owns(Alloc2::owned));
+    EXPECT_FALSE(forward_test.owns(Alloc1::not_owned));
+    EXPECT_FALSE(forward_test.owns(Alloc2::not_owned));
+    // Test alignment forwarding
+    static_assert(FallbackAllocator<Alloc1, Alloc2>::alignment() == Alloc1::alignment());
+    // Test deallocate_all forwarding
+    size_t prev1 = Alloc1::deallocate_all_count;
+    size_t prev2 = Alloc2::deallocate_all_count;
+    forward_test.deallocate_all();
+    EXPECT_EQ(prev1 + 1, Alloc1::deallocate_all_count);
+    EXPECT_EQ(prev2 + 1, Alloc2::deallocate_all_count);
+}
+
+TEST(shared_memory_allocator_tests, scoped_allocator) {
+    const auto underlying_allocator =
+            std::make_shared<SnoopingAllocator<MemoryHeapBaseAllocator>>("Allocator");
+    ScopedAllocator allocator{underlying_allocator};
+    const auto& allocations = underlying_allocator->getAllocations();
+    {
+        decltype(allocator.allocate(NamedAllocRequest{})) copy;
+        {
+            EXPECT_EQ(allocations.size(), 0ul);
+            const auto memory = allocator.allocate(NamedAllocRequest{{3000}, "allocation_1"});
+            copy = memory;
+            EXPECT_EQ(allocations.size(), 1ul);
+            EXPECT_TRUE(allocator.owns(copy));
+            EXPECT_TRUE(allocator.owns(memory));
+        }
+        EXPECT_TRUE(allocator.owns(copy));
+        EXPECT_EQ(allocations.size(), 1ul);
+        for (const auto& [key, value] : allocations) {
+            EXPECT_EQ(value.name, std::string{"allocation_1"});
+        }
+    }
+    EXPECT_EQ(allocations.size(), 0ul);
+    // Test forwarding
+    static_assert(ScopedAllocator<ValidateForwarding<0>>::alignment() ==
+                  ValidateForwarding<0>::alignment());
+    ScopedAllocator<ValidateForwarding<0>> forwarding{};
+    EXPECT_EQ(forwarding.dump(), ValidateForwarding<0>::dump_string);
+}
diff --git a/services/audioflinger/AllocatorFactory.h b/services/audioflinger/AllocatorFactory.h
new file mode 100644
index 0000000..7534607
--- /dev/null
+++ b/services/audioflinger/AllocatorFactory.h
@@ -0,0 +1,95 @@
+/*
+**
+** Copyright 2022, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <mediautils/SharedMemoryAllocator.h>
+
+#pragma once
+
+// TODO how do we appropriately restrict visibility of this header?
+// It should only be included in AudioFlinger.h
+// We will make everything internal linkage for now.
+namespace android {
+namespace AllocatorFactory {
+namespace {
+// TODO make sure these are appropriate
+constexpr inline size_t MAX_MEMORY_SIZE = 1024 * 1024 * 100;                  // 100 MiB
+constexpr inline size_t DED_SIZE = (MAX_MEMORY_SIZE * 4) / 10;                // 40 MiB
+constexpr inline size_t SHARED_SIZE = MAX_MEMORY_SIZE - DED_SIZE;             // 60 MiB
+constexpr inline size_t SHARED_SIZE_LARGE = (SHARED_SIZE * 4) / 6;            // 40 MiB
+constexpr inline size_t SHARED_SIZE_SMALL = SHARED_SIZE - SHARED_SIZE_LARGE;  // 20 MiB
+constexpr inline size_t SMALL_THRESHOLD = 1024 * 40;                          // 40 KiB
+
+inline auto getDedicated() {
+    using namespace mediautils;
+    static const auto allocator =
+            std::make_shared<PolicyAllocator<MemoryHeapBaseAllocator, SizePolicy<DED_SIZE>>>();
+    return allocator;
+}
+
+inline auto getSharedLarge() {
+    using namespace mediautils;
+    static const auto allocator = std::make_shared<
+            PolicyAllocator<MemoryHeapBaseAllocator, SizePolicy<SHARED_SIZE_LARGE>>>();
+    return allocator;
+}
+
+inline auto getSharedSmall() {
+    using namespace mediautils;
+    static const auto allocator =
+            std::make_shared<PolicyAllocator<MemoryHeapBaseAllocator,
+                                             SizePolicy<SHARED_SIZE_SMALL, 0, SMALL_THRESHOLD>>>();
+    return allocator;
+}
+
+template <typename Policy, typename Allocator>
+inline auto wrapWithPolicySnooping(Allocator allocator, std::string_view name) {
+    using namespace mediautils;
+    return SnoopingAllocator{PolicyAllocator{IndirectAllocator{allocator}, Policy{}}, name};
+}
+
+// A reasonable upper bound on how many clients we expect, and how many pieces to slice
+// the dedicate pool.
+constexpr inline size_t CLIENT_BOUND = 32;
+// Maximum amount of shared pools a single client can take (50%).
+constexpr inline size_t ADV_THRESHOLD_INV = 2;
+
+inline auto getClientAllocator() {
+    using namespace mediautils;
+    const auto makeDedPool = []() {
+        return wrapWithPolicySnooping<SizePolicy<DED_SIZE / CLIENT_BOUND>>(getDedicated(),
+                                                                           "Dedicated Pool");
+    };
+    const auto makeLargeShared = []() {
+        return wrapWithPolicySnooping<SizePolicy<SHARED_SIZE_LARGE / ADV_THRESHOLD_INV>>(
+                getSharedLarge(), "Large Shared");
+    };
+    const auto makeSmallShared = []() {
+        return wrapWithPolicySnooping<
+                SizePolicy<SHARED_SIZE_SMALL / ADV_THRESHOLD_INV>>(
+                getSharedSmall(), "Small Shared");
+    };
+
+    return ScopedAllocator{std::make_shared<
+            FallbackAllocator<decltype(makeDedPool()),
+                              decltype(FallbackAllocator(makeLargeShared(), makeSmallShared()))>>(
+            makeDedPool(), FallbackAllocator{makeLargeShared(), makeSmallShared()})};
+}
+
+using ClientAllocator = decltype(getClientAllocator());
+}  // namespace
+}  // namespace AllocatorFactory
+}  // namespace android
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7f0fc1f..f3cc9c1 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -736,10 +736,10 @@
     for (size_t i = 0; i < mClients.size(); ++i) {
         sp<Client> client = mClients.valueAt(i).promote();
         if (client != 0) {
-            result.appendFormat("%6d %12zu\n", client->pid(),
-                    client->heap()->getMemoryHeap()->getSize());
+          result.append("Client: %d\n", client->pid());
+          result.append(client->allocator().dump().c_str());
         }
-    }
+   }
 
     result.append("Notification Clients:\n");
     result.append("   pid    uid  name\n");
@@ -2186,12 +2186,8 @@
 AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
     :   RefBase(),
         mAudioFlinger(audioFlinger),
-        mPid(pid)
-{
-    mMemoryDealer = new MemoryDealer(
-            audioFlinger->getClientSharedHeapSize(),
-            (std::string("AudioFlinger::Client(") + std::to_string(pid) + ")").c_str());
-}
+        mPid(pid),
+        mClientAllocator(AllocatorFactory::getClientAllocator()) {}
 
 // Client destructor must be called with AudioFlinger::mClientLock held
 AudioFlinger::Client::~Client()
@@ -2199,9 +2195,9 @@
     mAudioFlinger->removeClient_l(mPid);
 }
 
-sp<MemoryDealer> AudioFlinger::Client::heap() const
+AllocatorFactory::ClientAllocator& AudioFlinger::Client::allocator()
 {
-    return mMemoryDealer;
+    return mClientAllocator;
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 08594e2..7c5afce 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -75,6 +75,7 @@
 #include <media/ExtendedAudioBufferProvider.h>
 #include <media/VolumeShaper.h>
 #include <mediautils/ServiceUtilities.h>
+#include <mediautils/SharedMemoryAllocator.h>
 #include <mediautils/Synchronization.h>
 #include <mediautils/ThreadSnapshot.h>
 
@@ -94,7 +95,7 @@
 #include "NBAIO_Tee.h"
 #include "ThreadMetrics.h"
 #include "TrackMetrics.h"
-
+#include "AllocatorFactory.h"
 #include <android/os/IPowerManager.h>
 
 #include <media/nblog/NBLog.h>
@@ -499,19 +500,19 @@
 
     // --- Client ---
     class Client : public RefBase {
-    public:
-                            Client(const sp<AudioFlinger>& audioFlinger, pid_t pid);
+      public:
+        Client(const sp<AudioFlinger>& audioFlinger, pid_t pid);
         virtual             ~Client();
-        sp<MemoryDealer>    heap() const;
+        AllocatorFactory::ClientAllocator& allocator();
         pid_t               pid() const { return mPid; }
         sp<AudioFlinger>    audioFlinger() const { return mAudioFlinger; }
 
     private:
         DISALLOW_COPY_AND_ASSIGN(Client);
 
-        const sp<AudioFlinger> mAudioFlinger;
-              sp<MemoryDealer> mMemoryDealer;
+        const sp<AudioFlinger>    mAudioFlinger;
         const pid_t         mPid;
+        AllocatorFactory::ClientAllocator mClientAllocator;
     };
 
     // --- Notification Client ---
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 193e270..ff60859 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1761,7 +1761,14 @@
         return;
     }
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
-    mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
+    mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{
+            {static_cast<size_t>(EFFECT_PARAM_BUFFER_SIZE + bufOffset)},
+            std::string("Effect ID: ")
+                    .append(std::to_string(effect->id()))
+                    .append(" Session ID: ")
+                    .append(std::to_string(static_cast<int>(effect->sessionId())))
+                    .append(" \n")
+            });
     if (mCblkMemory == 0 ||
             (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
         ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 44a93c1..cfb296c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -162,11 +162,12 @@
     }
 
     if (client != 0) {
-        mCblkMemory = client->heap()->allocate(size);
+        mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
+                std::string("Track ID: ").append(std::to_string(mId))});
         if (mCblkMemory == 0 ||
                 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
-            client->heap()->dump("AudioTrack");
+            ALOGE("%s", client->allocator().dump().c_str());
             mCblkMemory.clear();
             return;
         }