Merge "Allow getSystemSelectionChannels to return specifiers" into sc-dev am: 197231f6f9

Original change: https://googleplex-android-review.googlesource.com/c/platform/hardware/interfaces/+/13562278

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: I7ffc380d69654aefba13a1dfc148bd440a7bf532
diff --git a/automotive/can/1.0/default/libnl++/Android.bp b/automotive/can/1.0/default/libnl++/Android.bp
index a69e302..9e18ba0 100644
--- a/automotive/can/1.0/default/libnl++/Android.bp
+++ b/automotive/can/1.0/default/libnl++/Android.bp
@@ -26,6 +26,7 @@
         "protocols/generic/Generic.cpp",
         "protocols/generic/GenericMessageBase.cpp",
         "protocols/generic/Unknown.cpp",
+        "protocols/generic/families/Mac80211hwsim.cpp",
         "protocols/generic/families/Nl80211.cpp",
         "protocols/route/Link.cpp",
         "protocols/route/Route.cpp",
@@ -33,6 +34,7 @@
         "protocols/MessageDefinition.cpp",
         "protocols/NetlinkProtocol.cpp",
         "protocols/all.cpp",
+        "protocols/structs.cpp",
         "Attributes.cpp",
         "MessageFactory.cpp",
         "MessageMutator.cpp",
diff --git a/automotive/can/1.0/default/libnl++/MessageMutator.cpp b/automotive/can/1.0/default/libnl++/MessageMutator.cpp
index 00b48a6..de2a2b1 100644
--- a/automotive/can/1.0/default/libnl++/MessageMutator.cpp
+++ b/automotive/can/1.0/default/libnl++/MessageMutator.cpp
@@ -19,7 +19,7 @@
 namespace android::nl {
 
 MessageMutator::MessageMutator(nlmsghdr* buffer, size_t totalLen)
-    : mConstBuffer(buffer, totalLen), mMutableBuffer(buffer) {
+    : mMutableBuffer(buffer), mTotalLen(totalLen) {
     CHECK(totalLen >= sizeof(nlmsghdr));
 }
 
@@ -27,8 +27,12 @@
     return mMutableBuffer;
 }
 
+Buffer<nlmsghdr> MessageMutator::constBuffer() const {
+    return {mMutableBuffer, mTotalLen};
+}
+
 MessageMutator::operator Buffer<nlmsghdr>() const {
-    return mConstBuffer;
+    return constBuffer();
 }
 
 uint64_t MessageMutator::read(Buffer<nlattr> attr) const {
@@ -37,7 +41,8 @@
 
 void MessageMutator::write(Buffer<nlattr> attr, uint64_t val) const {
     const auto attrData = attr.data<uint64_t>();
-    const auto offset = mConstBuffer.getOffset(attrData);
+    // TODO(b/177251183): deduplicate this code against fragment()
+    const auto offset = constBuffer().getOffset(attrData);
     CHECK(offset.has_value()) << "Trying to write attribute that's not a member of this message";
 
     const auto writeableBuffer = reinterpret_cast<uint8_t*>(mMutableBuffer) + *offset;
@@ -47,4 +52,40 @@
     memcpy(writeableBuffer, &val, std::min(sizeof(val), attrSize));
 }
 
+MessageMutator MessageMutator::fragment(Buffer<nlmsghdr> buf) const {
+    const auto offset = constBuffer().getOffset(buf);
+    CHECK(offset.has_value()) << "Trying to modify a fragment outside of buffer range";
+
+    const auto writeableBuffer = reinterpret_cast<nlmsghdr*>(uintptr_t(mMutableBuffer) + *offset);
+    const auto len = buf.getRaw().len();
+    CHECK(len <= mTotalLen - *offset);
+
+    return {writeableBuffer, len};
+}
+
+MessageMutator::iterator MessageMutator::begin() const {
+    return {*this, constBuffer().begin()};
+}
+
+MessageMutator::iterator MessageMutator::end() const {
+    return {*this, constBuffer().end()};
+}
+
+MessageMutator::iterator::iterator(const MessageMutator& container,
+                                   Buffer<nlmsghdr>::iterator current)
+    : mContainer(container), mCurrent(current) {}
+
+MessageMutator::iterator MessageMutator::iterator::operator++() {
+    ++mCurrent;
+    return *this;
+}
+
+bool MessageMutator::iterator::operator==(const iterator& other) const {
+    return other.mCurrent == mCurrent;
+}
+
+const MessageMutator MessageMutator::iterator::operator*() const {
+    return mContainer.fragment(*mCurrent);
+}
+
 }  // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h b/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
index d759a0a..4cabb9a 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/Buffer.h
@@ -138,7 +138,7 @@
     class raw_iterator : public iterator {
       public:
         iterator operator++() {
-            this->mCurrent.mData++;  // ignore alignment
+            ++this->mCurrent.mData;  // ignore alignment
             return *this;
         }
         const T& operator*() const { return *this->mCurrent.mData; }
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/Message.h b/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
index 50b3c4b..29f397d 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/Message.h
@@ -34,7 +34,7 @@
  * a single instance can only be used by a single thread - the one owning the underlying buffer).
  */
 template <typename T>
-class Message {
+class Message : public Buffer<nlmsghdr> {
   public:
     /**
      * Validate buffer contents as a message carrying T data and create instance of parsed message.
@@ -51,7 +51,7 @@
 
         const auto attributes = buf.data<nlattr>(sizeof(T));
 
-        return Message<T>(nlHeader, dataHeader, attributes);
+        return Message<T>(buf, nlHeader, dataHeader, attributes);
     }
 
     /**
@@ -94,8 +94,9 @@
     const T* operator->() const { return &data; }
 
   private:
-    Message(const nlmsghdr& nlHeader, const T& dataHeader, Attributes attributes)
-        : header(nlHeader), data(dataHeader), attributes(attributes) {}
+    Message(Buffer<nlmsghdr> buffer, const nlmsghdr& nlHeader, const T& dataHeader,
+            Attributes attributes)
+        : Buffer<nlmsghdr>(buffer), header(nlHeader), data(dataHeader), attributes(attributes) {}
 };
 
 }  // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h b/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
index 7d495e9..baadc44 100644
--- a/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/MessageMutator.h
@@ -53,9 +53,27 @@
      */
     void write(Buffer<nlattr> attr, uint64_t val) const;
 
+    class iterator {
+      public:
+        iterator(const MessageMutator& container, Buffer<nlmsghdr>::iterator current);
+
+        iterator operator++();
+        bool operator==(const iterator& other) const;
+        const MessageMutator operator*() const;
+
+      protected:
+        const MessageMutator& mContainer;
+        Buffer<nlmsghdr>::iterator mCurrent;
+    };
+    iterator begin() const;
+    iterator end() const;
+
   private:
-    const Buffer<nlmsghdr> mConstBuffer;
     nlmsghdr* mMutableBuffer;
+    size_t mTotalLen;
+
+    Buffer<nlmsghdr> constBuffer() const;
+    MessageMutator fragment(Buffer<nlmsghdr> buf) const;
 };
 
 }  // namespace android::nl
diff --git a/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h b/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h
new file mode 100644
index 0000000..9b811f8
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/include/libnl++/generic/families/mac80211_hwsim.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// API definitions from kernel drivers/net/wireless/mac80211_hwsim.h
+
+#define BIT(n) (1 << (n))
+
+enum hwsim_tx_control_flags {
+    HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0),
+    HWSIM_TX_CTL_NO_ACK = BIT(1),
+    HWSIM_TX_STAT_ACK = BIT(2),
+};
+
+enum {
+    HWSIM_CMD_UNSPEC,
+    HWSIM_CMD_REGISTER,
+    HWSIM_CMD_FRAME,
+    HWSIM_CMD_TX_INFO_FRAME,
+    HWSIM_CMD_NEW_RADIO,
+    HWSIM_CMD_DEL_RADIO,
+    HWSIM_CMD_GET_RADIO,
+    HWSIM_CMD_ADD_MAC_ADDR,
+    HWSIM_CMD_DEL_MAC_ADDR,
+};
+
+enum {
+    HWSIM_ATTR_UNSPEC,
+    HWSIM_ATTR_ADDR_RECEIVER,
+    HWSIM_ATTR_ADDR_TRANSMITTER,
+    HWSIM_ATTR_FRAME,
+    HWSIM_ATTR_FLAGS,
+    HWSIM_ATTR_RX_RATE,
+    HWSIM_ATTR_SIGNAL,
+    HWSIM_ATTR_TX_INFO,
+    HWSIM_ATTR_COOKIE,
+    HWSIM_ATTR_CHANNELS,
+    HWSIM_ATTR_RADIO_ID,
+    HWSIM_ATTR_REG_HINT_ALPHA2,
+    HWSIM_ATTR_REG_CUSTOM_REG,
+    HWSIM_ATTR_REG_STRICT_REG,
+    HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+    HWSIM_ATTR_USE_CHANCTX,
+    HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE,
+    HWSIM_ATTR_RADIO_NAME,
+    HWSIM_ATTR_NO_VIF,
+    HWSIM_ATTR_FREQ,
+    HWSIM_ATTR_PAD,
+    HWSIM_ATTR_TX_INFO_FLAGS,
+    HWSIM_ATTR_PERM_ADDR,
+    HWSIM_ATTR_IFTYPE_SUPPORT,
+    HWSIM_ATTR_CIPHER_SUPPORT,
+};
+
+struct hwsim_tx_rate {
+    int8_t idx;
+    uint8_t count;
+} __packed;
+static_assert(sizeof(hwsim_tx_rate) == 2);
+
+#undef BIT
diff --git a/automotive/can/1.0/default/libnl++/printer.cpp b/automotive/can/1.0/default/libnl++/printer.cpp
index f08897e..d540482 100644
--- a/automotive/can/1.0/default/libnl++/printer.cpp
+++ b/automotive/can/1.0/default/libnl++/printer.cpp
@@ -154,16 +154,19 @@
     }
 }
 
-std::string toString(const Buffer<nlmsghdr> hdr, int protocol, bool printPayload) {
-    if (!hdr.firstOk()) return "nlmsg{buffer overflow}";
+static void toStream(std::stringstream& ss, const Buffer<nlmsghdr> hdr, int protocol,
+                     bool printPayload) {
+    if (!hdr.firstOk()) {
+        ss << "nlmsg{buffer overflow}";
+        return;
+    }
 
-    std::stringstream ss;
     ss << std::setfill('0');
 
     auto protocolMaybe = protocols::get(protocol);
     if (!protocolMaybe.has_value()) {
         ss << "nlmsg{protocol=" << protocol << "}";
-        return ss.str();
+        return;
     }
     protocols::NetlinkProtocol& protocolDescr = *protocolMaybe;
 
@@ -187,7 +190,7 @@
     ss << ", crc=" << std::hex << std::setw(4) << crc16(hdr.data<uint8_t>()) << std::dec;
     ss << '}';
 
-    if (!printPayload) return ss.str();
+    if (!printPayload) return;
     ss << ' ';
 
     if (!msgDescMaybe.has_value()) {
@@ -210,6 +213,17 @@
     }
 
     ss << "}";
+}
+
+std::string toString(const Buffer<nlmsghdr> hdrs, int protocol, bool printPayload) {
+    std::stringstream ss;
+    bool first = true;
+    for (const auto hdr : hdrs) {
+        if (!first) ss << std::endl;
+        first = false;
+
+        toStream(ss, hdr, protocol, printPayload);
+    }
 
     return ss.str();
 }
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
index 1e1ad12..478c383 100644
--- a/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/Ctrl.cpp
@@ -16,6 +16,7 @@
 
 #include "Ctrl.h"
 
+#include "families/Mac80211hwsim.h"
 #include "families/Nl80211.h"
 
 #include <libnl++/Message.h>
@@ -68,12 +69,15 @@
     const auto familyId = msg.attributes.get<uint16_t>(CTRL_ATTR_FAMILY_ID);
     const auto familyName = msg.attributes.get<std::string>(CTRL_ATTR_FAMILY_NAME);
 
-    /* For now, we support just a single family. But if you add more, please define proper
+    /* For now, we support just two families. But if you add more, please define proper
      * abstraction and not hardcode every name and class here.
      */
     if (familyName == "nl80211") {
         mFamilyRegister[familyId] = std::make_shared<families::Nl80211>(familyId);
     }
+    if (familyName == "MAC80211_HWSIM") {
+        mFamilyRegister[familyId] = std::make_shared<families::Mac80211hwsim>(familyId);
+    }
 }
 
 }  // namespace android::nl::protocols::generic
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
index b7b811b..f92d6c0 100644
--- a/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/GenericMessageBase.cpp
@@ -40,9 +40,9 @@
 
     ss << "genlmsghdr{";
     if (commandName.has_value()) {
-        ss << "cmd=" << unsigned(data.cmd);
-    } else {
         ss << "cmd=" << *commandName;
+    } else {
+        ss << "cmd=" << unsigned(data.cmd);
     }
     ss << ", version=" << unsigned(data.version);
     if (data.reserved != 0) ss << ", reserved=" << data.reserved;
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp
new file mode 100644
index 0000000..f85309e
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Mac80211hwsim.h"
+
+#include "../../structs.h"
+#include "common.h"
+
+#include <libnl++/generic/families/mac80211_hwsim.h>
+
+namespace android::nl::protocols::generic::families {
+
+using DataType = AttributeDefinition::DataType;
+using Flags = AttributeDefinition::Flags;
+
+static void hwsim_tx_rateToStream(std::stringstream& ss, const Buffer<nlattr> attr);
+
+static const FlagsMap txControlFlags{
+        {HWSIM_TX_CTL_REQ_TX_STATUS, "REQ_TX"},
+        {HWSIM_TX_CTL_NO_ACK, "NO_ACK"},
+        {HWSIM_TX_STAT_ACK, "ACK"},
+};
+
+// clang-format off
+Mac80211hwsim::Mac80211hwsim(nlmsgtype_t familyId) : GenericMessageBase(familyId, "hwsim", {
+    {HWSIM_CMD_UNSPEC, "UNSPEC"},
+    {HWSIM_CMD_REGISTER, "REGISTER"},
+    {HWSIM_CMD_FRAME, "FRAME"},
+    {HWSIM_CMD_TX_INFO_FRAME, "TX_INFO_FRAME"},
+    {HWSIM_CMD_NEW_RADIO, "NEW_RADIO"},
+    {HWSIM_CMD_DEL_RADIO, "DEL_RADIO"},
+    {HWSIM_CMD_GET_RADIO, "GET_RADIO"},
+    {HWSIM_CMD_ADD_MAC_ADDR, "ADD_MAC_ADDR"},
+    {HWSIM_CMD_DEL_MAC_ADDR, "DEL_MAC_ADDR"},
+}, {
+    {HWSIM_ATTR_UNSPEC, {"UNSPEC"}},
+    {HWSIM_ATTR_ADDR_RECEIVER, {"ADDR_RECEIVER", DataType::Struct, hwaddrToStream}},
+    {HWSIM_ATTR_ADDR_TRANSMITTER, {"ADDR_TRANSMITTER", DataType::Struct, hwaddrToStream}},
+    {HWSIM_ATTR_FRAME, {"FRAME", DataType::Raw, AttributeMap{}, Flags::Verbose}},
+    {HWSIM_ATTR_FLAGS, {"FLAGS", DataType::Struct, flagsToStream(txControlFlags)}},
+    {HWSIM_ATTR_RX_RATE, {"RX_RATE", DataType::Uint}},
+    {HWSIM_ATTR_SIGNAL, {"SIGNAL", DataType::Uint}},
+    {HWSIM_ATTR_TX_INFO, {"TX_INFO", DataType::Struct, hwsim_tx_rateToStream}},
+    {HWSIM_ATTR_COOKIE, {"COOKIE", DataType::Uint}},
+    {HWSIM_ATTR_CHANNELS, {"CHANNELS", DataType::Uint}},
+    {HWSIM_ATTR_RADIO_ID, {"RADIO_ID", DataType::Uint}},
+    {HWSIM_ATTR_REG_HINT_ALPHA2, {"REG_HINT_ALPHA2", DataType::String}},
+    {HWSIM_ATTR_REG_CUSTOM_REG, {"REG_CUSTOM_REG", DataType::Uint}},
+    {HWSIM_ATTR_REG_STRICT_REG, {"REG_STRICT_REG", DataType::Flag}},
+    {HWSIM_ATTR_SUPPORT_P2P_DEVICE, {"SUPPORT_P2P_DEVICE", DataType::Flag}},
+    {HWSIM_ATTR_USE_CHANCTX, {"USE_CHANCTX", DataType::Flag}},
+    {HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, {"DESTROY_RADIO_ON_CLOSE", DataType::Flag}},
+    {HWSIM_ATTR_RADIO_NAME, {"RADIO_NAME", DataType::String}},
+    {HWSIM_ATTR_NO_VIF, {"NO_VIF", DataType::Flag}},
+    {HWSIM_ATTR_FREQ, {"FREQ", DataType::Uint}},
+    {HWSIM_ATTR_PAD, {"PAD", DataType::Uint}},
+    {HWSIM_ATTR_TX_INFO_FLAGS, {"TX_INFO_FLAGS"}},  // hwsim_tx_rate_flag
+    {HWSIM_ATTR_PERM_ADDR, {"PERM_ADDR"}},
+    {HWSIM_ATTR_IFTYPE_SUPPORT, {"IFTYPE_SUPPORT", DataType::Uint}},  // NL80211_IFTYPE_STATION etc
+    {HWSIM_ATTR_CIPHER_SUPPORT, {"CIPHER_SUPPORT", DataType::Struct, arrayToStream<int32_t>}},
+}) {}
+// clang-format on
+
+static void hwsim_tx_rateToStream(std::stringstream& ss, const Buffer<nlattr> attr) {
+    ss << '{';
+    bool first = true;
+    for (const auto rate : attr.data<hwsim_tx_rate>().getRaw()) {
+        if (rate.idx == -1) continue;
+
+        ss << (int)rate.idx << ": " << (unsigned)rate.count;
+
+        if (!first) ss << ", ";
+        first = false;
+    }
+    ss << '}';
+}
+
+}  // namespace android::nl::protocols::generic::families
diff --git a/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h
new file mode 100644
index 0000000..c01eb93
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/generic/families/Mac80211hwsim.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../GenericMessageBase.h"
+
+namespace android::nl::protocols::generic::families {
+
+class Mac80211hwsim : public GenericMessageBase {
+  public:
+    Mac80211hwsim(nlmsgtype_t familyId);
+};
+
+}  // namespace android::nl::protocols::generic::families
diff --git a/automotive/can/1.0/default/libnl++/protocols/structs.cpp b/automotive/can/1.0/default/libnl++/protocols/structs.cpp
new file mode 100644
index 0000000..8ff71f0
--- /dev/null
+++ b/automotive/can/1.0/default/libnl++/protocols/structs.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "structs.h"
+
+#include <iomanip>
+
+namespace android::nl::protocols {
+
+AttributeDefinition::ToStream flagsToStream(FlagsMap flags) {
+    return [flags](std::stringstream& ss, const Buffer<nlattr> attr) {
+        auto val = attr.data<uint64_t>().copyFirst();
+
+        bool first = true;
+        for (const auto& [flag, name] : flags) {
+            if ((val & flag) != flag) continue;
+            val &= ~flag;
+
+            if (!first) ss << '|';
+            first = false;
+
+            ss << name;
+        }
+
+        if (val == 0) return;
+
+        if (!first) ss << '|';
+        ss << std::hex << val << std::dec;
+    };
+}
+
+void hwaddrToStream(std::stringstream& ss, const Buffer<nlattr> attr) {
+    ss << std::hex;
+    bool first = true;
+    for (const auto byte : attr.data<uint8_t>().getRaw()) {
+        if (!first) ss << ':';
+        first = false;
+
+        ss << std::setw(2) << unsigned(byte);
+    }
+    ss << std::dec;
+}
+
+}  // namespace android::nl::protocols
diff --git a/automotive/can/1.0/default/libnl++/protocols/structs.h b/automotive/can/1.0/default/libnl++/protocols/structs.h
index 44c17b8..f3a8c44 100644
--- a/automotive/can/1.0/default/libnl++/protocols/structs.h
+++ b/automotive/can/1.0/default/libnl++/protocols/structs.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include "MessageDefinition.h"
+
 #include <sstream>
 
 namespace android::nl::protocols {
@@ -30,4 +32,9 @@
     ss << '}';
 }
 
+typedef std::map<uint64_t, std::string> FlagsMap;
+AttributeDefinition::ToStream flagsToStream(FlagsMap flags);
+
+void hwaddrToStream(std::stringstream& ss, const Buffer<nlattr> attr);
+
 }  // namespace android::nl::protocols
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index f2cbe93..8329303 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -41,7 +41,7 @@
 
     Burst(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel);
 
-    OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
+    OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure) const override;
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
index d3d933b..5d4bdbc 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
@@ -36,7 +36,7 @@
 GeneralResult<Operation> unvalidatedConvert(const hal::V1_0::Operation& operation);
 GeneralResult<Model::OperandValues> unvalidatedConvert(
         const hardware::hidl_vec<uint8_t>& operandValues);
-GeneralResult<Memory> unvalidatedConvert(const hardware::hidl_memory& memory);
+GeneralResult<SharedMemory> unvalidatedConvert(const hardware::hidl_memory& memory);
 GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model);
 GeneralResult<Request::Argument> unvalidatedConvert(
         const hal::V1_0::RequestArgument& requestArgument);
@@ -65,7 +65,7 @@
 nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation);
 nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
         const nn::Model::OperandValues& operandValues);
-nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory);
+nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory);
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
 nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool);
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index 384bd9b..971ad08 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -43,7 +43,7 @@
     CHECK(kPreparedModel != nullptr);
 }
 
-Burst::OptionalCacheHold Burst::cacheMemory(const nn::Memory& /*memory*/) const {
+Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& /*memory*/) const {
     return nullptr;
 }
 
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index fde7346..7a099cf 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -153,8 +153,8 @@
     return Model::OperandValues(operandValues.data(), operandValues.size());
 }
 
-GeneralResult<Memory> unvalidatedConvert(const hidl_memory& memory) {
-    return createSharedMemoryFromHidlMemory(memory);
+GeneralResult<SharedMemory> unvalidatedConvert(const hidl_memory& memory) {
+    return hal::utils::createSharedMemoryFromHidlMemory(memory);
 }
 
 GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model) {
@@ -346,9 +346,8 @@
     return hidl_vec<uint8_t>(operandValues.data(), operandValues.data() + operandValues.size());
 }
 
-nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory) {
-    return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)),
-                       memory.size);
+nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
+    return hal::utils::createHidlMemoryFromSharedMemory(memory);
 }
 
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
@@ -392,7 +391,7 @@
 }
 
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool) {
-    return unvalidatedConvert(std::get<nn::Memory>(memoryPool));
+    return unvalidatedConvert(std::get<nn::SharedMemory>(memoryPool));
 }
 
 nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request) {
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index b47f25a..07bf7bc 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -175,7 +175,7 @@
     return V1_0::utils::unvalidatedConvert(operandValues);
 }
 
-nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
     return V1_0::utils::unvalidatedConvert(memory);
 }
 
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 062f6f7..7ae483e 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -304,7 +304,11 @@
 }
 
 GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& hidlHandle) {
-    return hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle());
+    if (hidlHandle.getNativeHandle() == nullptr) {
+        return nullptr;
+    }
+    auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle()));
+    return std::make_shared<const Handle>(std::move(handle));
 }
 
 GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
@@ -365,7 +369,7 @@
     return V1_0::utils::unvalidatedConvert(operandValues);
 }
 
-nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
     return V1_0::utils::unvalidatedConvert(memory);
 }
 
@@ -588,7 +592,10 @@
 }
 
 nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
-    return hal::utils::hidlHandleFromSharedHandle(handle);
+    if (handle == nullptr) {
+        return {};
+    }
+    return hal::utils::hidlHandleFromSharedHandle(*handle);
 }
 
 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
index fda79c8..69e87f7 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
@@ -42,8 +42,8 @@
 
     nn::Request::MemoryDomainToken getToken() const override;
 
-    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
-    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+    nn::GeneralResult<void> copyTo(const nn::SharedMemory& dst) const override;
+    nn::GeneralResult<void> copyFrom(const nn::SharedMemory& src,
                                      const nn::Dimensions& dimensions) const override;
 
   private:
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index 74a6534..8e1cdb8 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -59,7 +59,7 @@
 GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
 
 GeneralResult<SharedHandle> convert(const hardware::hidl_handle& handle);
-GeneralResult<Memory> convert(const hardware::hidl_memory& memory);
+GeneralResult<SharedMemory> convert(const hardware::hidl_memory& memory);
 GeneralResult<std::vector<BufferRole>> convert(
         const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles);
 
@@ -100,7 +100,7 @@
 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
 
 nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle);
-nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory);
+nn::GeneralResult<hidl_memory> convert(const nn::SharedMemory& memory);
 nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
 
 nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index 614033e..ada5265 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -61,7 +61,7 @@
     return kToken;
 }
 
-nn::GeneralResult<void> Buffer::copyTo(const nn::Memory& dst) const {
+nn::GeneralResult<void> Buffer::copyTo(const nn::SharedMemory& dst) const {
     const auto hidlDst = NN_TRY(convert(dst));
 
     const auto ret = kBuffer->copyTo(hidlDst);
@@ -71,7 +71,7 @@
     return {};
 }
 
-nn::GeneralResult<void> Buffer::copyFrom(const nn::Memory& src,
+nn::GeneralResult<void> Buffer::copyFrom(const nn::SharedMemory& src,
                                          const nn::Dimensions& dimensions) const {
     const auto hidlSrc = NN_TRY(convert(src));
     const auto hidlDimensions = hidl_vec<uint32_t>(dimensions);
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 8b7db2b..6e74a62 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -261,7 +261,7 @@
     using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
     switch (memoryPool.getDiscriminator()) {
         case Discriminator::hidlMemory:
-            return createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
+            return hal::utils::createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
         case Discriminator::token:
             return static_cast<Request::MemoryDomainToken>(memoryPool.token());
     }
@@ -352,7 +352,7 @@
     return validatedConvert(handle);
 }
 
-GeneralResult<Memory> convert(const hardware::hidl_memory& memory) {
+GeneralResult<SharedMemory> convert(const hardware::hidl_memory& memory) {
     return validatedConvert(memory);
 }
 
@@ -386,7 +386,7 @@
     return V1_2::utils::unvalidatedConvert(handle);
 }
 
-nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
     return V1_0::utils::unvalidatedConvert(memory);
 }
 
@@ -424,7 +424,7 @@
     return unvalidatedConvertVec(arguments);
 }
 
-nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::SharedMemory& memory) {
     Request::MemoryPool ret;
     ret.hidlMemory(NN_TRY(unvalidatedConvert(memory)));
     return ret;
@@ -677,7 +677,7 @@
     return validatedConvert(handle);
 }
 
-nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::SharedMemory& memory) {
     return validatedConvert(memory);
 }
 
diff --git a/neuralnetworks/TEST_MAPPING b/neuralnetworks/TEST_MAPPING
index de84624..5d168d2 100644
--- a/neuralnetworks/TEST_MAPPING
+++ b/neuralnetworks/TEST_MAPPING
@@ -60,6 +60,17 @@
           "include-filter": "-*sample_float_fast*:*sample_float_slow*:*sample_minimal*:*sample_quant*"
         }
       ]
+    },
+    {
+      "name": "VtsHalNeuralnetworksTargetTest",
+      "options": [
+        {
+          // Do not use any sample driver except sample-all in order to reduce
+          // testing time. The other sample drivers (fast-float, quant, etc.)
+          // are subsets of sample-all.
+          "include-filter": "-*sample_float_fast*:*sample_float_slow*:*sample_minimal*:*sample_quant*"
+        }
+      ]
     }
   ]
 }
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 56017da..147d401 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -21,12 +21,14 @@
     local_include_dirs: ["include/nnapi/hal/aidl/"],
     export_include_dirs: ["include"],
     static_libs: [
+        "libarect",
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
-        "libhidlbase",
         "android.hardware.neuralnetworks-V1-ndk_platform",
         "libbinder_ndk",
+        "libhidlbase",
+        "libnativewindow",
     ],
 }
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 35de5be..1b2f69c 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -79,7 +79,7 @@
 GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph);
 GeneralResult<OutputShape> unvalidatedConvert(const aidl_hal::OutputShape& outputShape);
 GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming);
-GeneralResult<Memory> unvalidatedConvert(const aidl_hal::Memory& memory);
+GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory);
 GeneralResult<Timing> unvalidatedConvert(const aidl_hal::Timing& timing);
 GeneralResult<BufferDesc> unvalidatedConvert(const aidl_hal::BufferDesc& bufferDesc);
 GeneralResult<BufferRole> unvalidatedConvert(const aidl_hal::BufferRole& bufferRole);
@@ -99,7 +99,7 @@
 
 GeneralResult<ExecutionPreference> convert(
         const aidl_hal::ExecutionPreference& executionPreference);
-GeneralResult<Memory> convert(const aidl_hal::Memory& memory);
+GeneralResult<SharedMemory> convert(const aidl_hal::Memory& memory);
 GeneralResult<Model> convert(const aidl_hal::Model& model);
 GeneralResult<Operand> convert(const aidl_hal::Operand& operand);
 GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType);
@@ -108,7 +108,7 @@
 GeneralResult<Request> convert(const aidl_hal::Request& request);
 
 GeneralResult<std::vector<Operation>> convert(const std::vector<aidl_hal::Operation>& outputShapes);
-GeneralResult<std::vector<Memory>> convert(const std::vector<aidl_hal::Memory>& memories);
+GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
 
 GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec);
 
@@ -118,11 +118,11 @@
 
 namespace nn = ::android::nn;
 
-nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory& memory);
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory);
 nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
 nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus);
 
-nn::GeneralResult<Memory> convert(const nn::Memory& memory);
+nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory);
 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
 nn::GeneralResult<std::vector<OutputShape>> convert(
         const std::vector<nn::OutputShape>& outputShapes);
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 0e93b02..db3504b 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -18,6 +18,8 @@
 
 #include <aidl/android/hardware/common/NativeHandle.h>
 #include <android-base/logging.h>
+#include <android/hardware_buffer.h>
+#include <cutils/native_handle.h>
 #include <nnapi/OperandTypes.h>
 #include <nnapi/OperationTypes.h>
 #include <nnapi/Result.h>
@@ -27,6 +29,7 @@
 #include <nnapi/Validation.h>
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
+#include <vndk/hardware_buffer.h>
 
 #include <algorithm>
 #include <chrono>
@@ -53,6 +56,8 @@
 namespace android::nn {
 namespace {
 
+using ::aidl::android::hardware::common::NativeHandle;
+
 constexpr auto validOperandType(nn::OperandType operandType) {
     switch (operandType) {
         case nn::OperandType::FLOAT32:
@@ -125,6 +130,61 @@
     return canonical;
 }
 
+GeneralResult<Handle> unvalidatedConvertHelper(const NativeHandle& aidlNativeHandle) {
+    std::vector<base::unique_fd> fds;
+    fds.reserve(aidlNativeHandle.fds.size());
+    for (const auto& fd : aidlNativeHandle.fds) {
+        const int dupFd = dup(fd.get());
+        if (dupFd == -1) {
+            // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
+            // here?
+            return NN_ERROR() << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    return Handle{.fds = std::move(fds), .ints = aidlNativeHandle.ints};
+}
+
+struct NativeHandleDeleter {
+    void operator()(native_handle_t* handle) const {
+        if (handle) {
+            native_handle_close(handle);
+            native_handle_delete(handle);
+        }
+    }
+};
+
+using UniqueNativeHandle = std::unique_ptr<native_handle_t, NativeHandleDeleter>;
+
+static nn::GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(
+        const NativeHandle& handle) {
+    std::vector<base::unique_fd> fds;
+    fds.reserve(handle.fds.size());
+    for (const auto& fd : handle.fds) {
+        const int dupFd = dup(fd.get());
+        if (dupFd == -1) {
+            return NN_ERROR() << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    constexpr size_t kIntMax = std::numeric_limits<int>::max();
+    CHECK_LE(handle.fds.size(), kIntMax);
+    CHECK_LE(handle.ints.size(), kIntMax);
+    native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
+                                                         static_cast<int>(handle.ints.size()));
+    if (nativeHandle == nullptr) {
+        return NN_ERROR() << "Failed to create native_handle";
+    }
+    for (size_t i = 0; i < fds.size(); ++i) {
+        nativeHandle->data[i] = fds[i].release();
+    }
+    std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+    return UniqueNativeHandle(nativeHandle);
+}
+
 }  // anonymous namespace
 
 GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
@@ -316,13 +376,67 @@
     return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
 }
 
-GeneralResult<Memory> unvalidatedConvert(const aidl_hal::Memory& memory) {
+static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
+    return (value + multiple - 1) / multiple * multiple;
+}
+
+GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
     VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
-    return Memory{
-            .handle = NN_TRY(unvalidatedConvert(memory.handle)),
+    if (memory.size > std::numeric_limits<uint32_t>::max()) {
+        return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
+    }
+
+    if (memory.name != "hardware_buffer_blob") {
+        return std::make_shared<const Memory>(Memory{
+                .handle = NN_TRY(unvalidatedConvertHelper(memory.handle)),
+                .size = static_cast<uint32_t>(memory.size),
+                .name = memory.name,
+        });
+    }
+
+    const auto size = static_cast<uint32_t>(memory.size);
+    const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
+    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+    const uint32_t width = size;
+    const uint32_t height = 1;  // height is always 1 for BLOB mode AHardwareBuffer.
+    const uint32_t layers = 1;  // layers is always 1 for BLOB mode AHardwareBuffer.
+
+    const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle));
+    const native_handle_t* nativeHandle = handle.get();
+
+    // AHardwareBuffer_createFromHandle() might fail because an allocator
+    // expects a specific stride value. In that case, we try to guess it by
+    // aligning the width to small powers of 2.
+    // TODO(b/174120849): Avoid stride assumptions.
+    AHardwareBuffer* hardwareBuffer = nullptr;
+    status_t status = UNKNOWN_ERROR;
+    for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
+        const uint32_t stride = roundUpToMultiple(width, alignment);
+        AHardwareBuffer_Desc desc{
+                .width = width,
+                .height = height,
+                .layers = layers,
+                .format = format,
+                .usage = usage,
+                .stride = stride,
+        };
+        status = AHardwareBuffer_createFromHandle(&desc, nativeHandle,
+                                                  AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
+                                                  &hardwareBuffer);
+        if (status == NO_ERROR) {
+            break;
+        }
+    }
+    if (status != NO_ERROR) {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+               << "Can't create AHardwareBuffer from handle. Error: " << status;
+    }
+
+    return std::make_shared<const Memory>(Memory{
+            .handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
             .size = static_cast<uint32_t>(memory.size),
             .name = memory.name,
-    };
+    });
 }
 
 GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues) {
@@ -397,24 +511,8 @@
     return static_cast<ExecutionPreference>(executionPreference);
 }
 
-GeneralResult<SharedHandle> unvalidatedConvert(
-        const ::aidl::android::hardware::common::NativeHandle& aidlNativeHandle) {
-    std::vector<base::unique_fd> fds;
-    fds.reserve(aidlNativeHandle.fds.size());
-    for (const auto& fd : aidlNativeHandle.fds) {
-        int dupFd = dup(fd.get());
-        if (dupFd == -1) {
-            // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
-            // here?
-            return NN_ERROR() << "Failed to dup the fd";
-        }
-        fds.emplace_back(dupFd);
-    }
-
-    return std::make_shared<const Handle>(Handle{
-            .fds = std::move(fds),
-            .ints = aidlNativeHandle.ints,
-    });
+GeneralResult<SharedHandle> unvalidatedConvert(const NativeHandle& aidlNativeHandle) {
+    return std::make_shared<const Handle>(NN_TRY(unvalidatedConvertHelper(aidlNativeHandle)));
 }
 
 GeneralResult<ExecutionPreference> convert(
@@ -422,7 +520,7 @@
     return validatedConvert(executionPreference);
 }
 
-GeneralResult<Memory> convert(const aidl_hal::Memory& operand) {
+GeneralResult<SharedMemory> convert(const aidl_hal::Memory& operand) {
     return validatedConvert(operand);
 }
 
@@ -454,7 +552,7 @@
     return unvalidatedConvert(operations);
 }
 
-GeneralResult<std::vector<Memory>> convert(const std::vector<aidl_hal::Memory>& memories) {
+GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories) {
     return validatedConvert(memories);
 }
 
@@ -507,13 +605,11 @@
     return halObject;
 }
 
-}  // namespace
-
-nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
+nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::Handle& handle) {
     common::NativeHandle aidlNativeHandle;
-    aidlNativeHandle.fds.reserve(sharedHandle->fds.size());
-    for (const auto& fd : sharedHandle->fds) {
-        int dupFd = dup(fd.get());
+    aidlNativeHandle.fds.reserve(handle.fds.size());
+    for (const auto& fd : handle.fds) {
+        const int dupFd = dup(fd.get());
         if (dupFd == -1) {
             // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return
             // here?
@@ -521,18 +617,71 @@
         }
         aidlNativeHandle.fds.emplace_back(dupFd);
     }
-    aidlNativeHandle.ints = sharedHandle->ints;
+    aidlNativeHandle.ints = handle.ints;
     return aidlNativeHandle;
 }
 
-nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory& memory) {
-    if (memory.size > std::numeric_limits<int64_t>::max()) {
+static nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
+        const native_handle_t& handle) {
+    common::NativeHandle aidlNativeHandle;
+
+    aidlNativeHandle.fds.reserve(handle.numFds);
+    for (int i = 0; i < handle.numFds; ++i) {
+        const int dupFd = dup(handle.data[i]);
+        if (dupFd == -1) {
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+        }
+        aidlNativeHandle.fds.emplace_back(dupFd);
+    }
+
+    aidlNativeHandle.ints = std::vector<int>(&handle.data[handle.numFds],
+                                             &handle.data[handle.numFds + handle.numInts]);
+
+    return aidlNativeHandle;
+}
+
+}  // namespace
+
+nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandle& sharedHandle) {
+    CHECK(sharedHandle != nullptr);
+    return unvalidatedConvert(*sharedHandle);
+}
+
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory) {
+    CHECK(memory != nullptr);
+    if (memory->size > std::numeric_limits<int64_t>::max()) {
         return NN_ERROR() << "Memory size doesn't fit into int64_t.";
     }
+    if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
+        return Memory{
+                .handle = NN_TRY(unvalidatedConvert(*handle)),
+                .size = static_cast<int64_t>(memory->size),
+                .name = memory->name,
+        };
+    }
+
+    const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
+    AHardwareBuffer_Desc bufferDesc;
+    AHardwareBuffer_describe(ahwb, &bufferDesc);
+
+    if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
+        CHECK_EQ(memory->size, bufferDesc.width);
+        CHECK_EQ(memory->name, "hardware_buffer_blob");
+    } else {
+        CHECK_EQ(memory->size, 0u);
+        CHECK_EQ(memory->name, "hardware_buffer");
+    }
+
+    const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
+    if (nativeHandle == nullptr) {
+        return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
+                             "returned nullptr";
+    }
+
     return Memory{
-            .handle = NN_TRY(unvalidatedConvert(memory.handle)),
-            .size = static_cast<int64_t>(memory.size),
-            .name = memory.name,
+            .handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)),
+            .size = static_cast<int64_t>(memory->size),
+            .name = memory->name,
     };
 }
 
@@ -558,7 +707,7 @@
                        .isSufficient = outputShape.isSufficient};
 }
 
-nn::GeneralResult<Memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory) {
     return validatedConvert(memory);
 }
 
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 86d5f3f..4beb828 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -266,7 +266,7 @@
     copyTestBuffers(constCopies, operandValues.data());
 
     // Shared memory.
-    std::vector<nn::Memory> pools = {};
+    std::vector<nn::SharedMemory> pools = {};
     if (constRefSize > 0) {
         const auto pool = nn::createSharedMemory(constRefSize).value();
         pools.push_back(pool);
diff --git a/neuralnetworks/aidl/vts/functional/Utils.cpp b/neuralnetworks/aidl/vts/functional/Utils.cpp
index 14a496a..3c7f5f7 100644
--- a/neuralnetworks/aidl/vts/functional/Utils.cpp
+++ b/neuralnetworks/aidl/vts/functional/Utils.cpp
@@ -135,7 +135,8 @@
     ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
     ASSERT_NE(mAhwb, nullptr);
 
-    const auto sharedMemory = nn::createSharedMemoryFromAHWB(*mAhwb).value();
+    const auto sharedMemory =
+            nn::createSharedMemoryFromAHWB(mAhwb, /*takeOwnership=*/false).value();
     mMapping = nn::map(sharedMemory).value();
     mPtr = static_cast<uint8_t*>(std::get<void*>(mMapping.pointer));
     CHECK_NE(mPtr, nullptr);
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index 6c491ae..50295f1 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -22,10 +22,12 @@
     export_include_dirs: ["include"],
     cflags: ["-Wthread-safety"],
     static_libs: [
+        "libarect",
         "neuralnetworks_types",
     ],
     shared_libs: [
         "libhidlbase",
+        "libnativewindow",
     ],
 }
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index fef9d9c..547f203 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -74,10 +74,12 @@
 std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
                                              const std::vector<nn::Operation>& operations);
 
-nn::GeneralResult<nn::Memory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
+nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory);
+nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
 
-nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle);
-nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle);
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle);
+nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle);
+
 nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
         const std::vector<nn::SyncFence>& fences);
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
index 95a20a8..209b663 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
+
 #include <android/hidl/base/1.0/IBase.h>
 #include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
@@ -50,7 +53,8 @@
     })
 
 template <typename Type>
-nn::GeneralResult<Type> makeGeneralFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+nn::GeneralResult<Type> makeGeneralFailure(
+        nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
     if (!result.has_value()) {
         return nn::error(status) << std::move(result).error();
     }
@@ -75,7 +79,8 @@
 }
 
 template <typename Type>
-nn::ExecutionResult<Type> makeExecutionFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+nn::ExecutionResult<Type> makeExecutionFailure(
+        nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
     return makeExecutionFailure(makeGeneralFailure(result, status));
 }
 
@@ -86,4 +91,6 @@
     } else                                                              \
         return NN_ERROR(canonical)
 
-}  // namespace android::hardware::neuralnetworks::utils
\ No newline at end of file
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h
index 8c04b88..0e98c2e 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h
@@ -31,9 +31,9 @@
   public:
     nn::Request::MemoryDomainToken getToken() const override;
 
-    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
+    nn::GeneralResult<void> copyTo(const nn::SharedMemory& dst) const override;
 
-    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+    nn::GeneralResult<void> copyFrom(const nn::SharedMemory& src,
                                      const nn::Dimensions& dimensions) const override;
 };
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index 83e60b6..996858c 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -29,7 +29,7 @@
 
 class InvalidBurst final : public nn::IBurst {
   public:
-    OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
+    OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure) const override;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
index d2c2469..c8ca6f2 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
@@ -46,9 +46,9 @@
 
     nn::Request::MemoryDomainToken getToken() const override;
 
-    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
+    nn::GeneralResult<void> copyTo(const nn::SharedMemory& dst) const override;
 
-    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+    nn::GeneralResult<void> copyFrom(const nn::SharedMemory& src,
                                      const nn::Dimensions& dimensions) const override;
 
   private:
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index 0df287f..3b87330 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -44,7 +44,7 @@
     nn::SharedBurst getBurst() const;
     nn::GeneralResult<nn::SharedBurst> recover(const nn::IBurst* failingBurst) const;
 
-    OptionalCacheHold cacheMemory(const nn::Memory& memory) const override;
+    OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure) const override;
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index c04c8df..7a5035f 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -20,11 +20,14 @@
 
 #include <android-base/logging.h>
 #include <android-base/unique_fd.h>
+#include <android/hardware_buffer.h>
+#include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
+#include <vndk/hardware_buffer.h>
 
 #include <algorithm>
 #include <any>
@@ -203,13 +206,13 @@
 nn::GeneralResult<void> unflushDataFromSharedToPointer(
         const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
     if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
-        !std::holds_alternative<nn::Memory>(maybeRequestInShared->pools.back())) {
+        !std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
         return {};
     }
     const auto& requestInShared = *maybeRequestInShared;
 
     // Map the memory.
-    const auto& outputMemory = std::get<nn::Memory>(requestInShared.pools.back());
+    const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
     const auto [pointer, size, context] = NN_TRY(map(outputMemory));
     const uint8_t* constantPointer =
             std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
@@ -248,44 +251,128 @@
     return nn::countNumberOfConsumers(numberOfOperands, operations);
 }
 
-nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
-    if (handle == nullptr) {
-        return {};
+nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
+    if (memory == nullptr) {
+        return NN_ERROR() << "Memory must be non-empty";
+    }
+    if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
+        return hidl_memory(memory->name, NN_TRY(hidlHandleFromSharedHandle(*handle)), memory->size);
     }
 
+    const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
+    AHardwareBuffer_Desc bufferDesc;
+    AHardwareBuffer_describe(ahwb, &bufferDesc);
+
+    if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
+        CHECK_EQ(memory->size, bufferDesc.width);
+        CHECK_EQ(memory->name, "hardware_buffer_blob");
+    } else {
+        CHECK_EQ(memory->size, 0u);
+        CHECK_EQ(memory->name, "hardware_buffer");
+    }
+
+    const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
+    const hidl_handle hidlHandle(nativeHandle);
+    hidl_handle handle(hidlHandle);
+
+    return hidl_memory(memory->name, std::move(handle), memory->size);
+}
+
+static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
+    return (value + multiple - 1) / multiple * multiple;
+}
+
+nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
+    CHECK_LE(memory.size(), std::numeric_limits<uint32_t>::max());
+
+    if (memory.name() != "hardware_buffer_blob") {
+        return std::make_shared<const nn::Memory>(nn::Memory{
+                .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())),
+                .size = static_cast<uint32_t>(memory.size()),
+                .name = memory.name(),
+        });
+    }
+
+    const auto size = memory.size();
+    const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
+    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+    const uint32_t width = size;
+    const uint32_t height = 1;  // height is always 1 for BLOB mode AHardwareBuffer.
+    const uint32_t layers = 1;  // layers is always 1 for BLOB mode AHardwareBuffer.
+
+    // AHardwareBuffer_createFromHandle() might fail because an allocator
+    // expects a specific stride value. In that case, we try to guess it by
+    // aligning the width to small powers of 2.
+    // TODO(b/174120849): Avoid stride assumptions.
+    AHardwareBuffer* hardwareBuffer = nullptr;
+    status_t status = UNKNOWN_ERROR;
+    for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
+        const uint32_t stride = roundUpToMultiple(width, alignment);
+        AHardwareBuffer_Desc desc{
+                .width = width,
+                .height = height,
+                .layers = layers,
+                .format = format,
+                .usage = usage,
+                .stride = stride,
+        };
+        status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
+                                                  AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
+                                                  &hardwareBuffer);
+        if (status == NO_ERROR) {
+            break;
+        }
+    }
+    if (status != NO_ERROR) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+               << "Can't create AHardwareBuffer from handle. Error: " << status;
+    }
+
+    return std::make_shared<const nn::Memory>(nn::Memory{
+            .handle = nn::HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
+            .size = static_cast<uint32_t>(memory.size()),
+            .name = memory.name(),
+    });
+}
+
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle) {
     std::vector<base::unique_fd> fds;
-    fds.reserve(handle->fds.size());
-    for (const auto& fd : handle->fds) {
-        int dupFd = dup(fd);
+    fds.reserve(handle.fds.size());
+    for (const auto& fd : handle.fds) {
+        const int dupFd = dup(fd);
         if (dupFd == -1) {
             return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
         }
         fds.emplace_back(dupFd);
     }
 
-    native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+    constexpr size_t kIntMax = std::numeric_limits<int>::max();
+    CHECK_LE(handle.fds.size(), kIntMax);
+    CHECK_LE(handle.ints.size(), kIntMax);
+    native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
+                                                         static_cast<int>(handle.ints.size()));
     if (nativeHandle == nullptr) {
         return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
     }
     for (size_t i = 0; i < fds.size(); ++i) {
         nativeHandle->data[i] = fds[i].release();
     }
-    std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+    std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
 
     hidl_handle hidlHandle;
     hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
     return hidlHandle;
 }
 
-nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
     if (handle == nullptr) {
-        return nullptr;
+        return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr";
     }
 
     std::vector<base::unique_fd> fds;
     fds.reserve(handle->numFds);
     for (int i = 0; i < handle->numFds; ++i) {
-        int dupFd = dup(handle->data[i]);
+        const int dupFd = dup(handle->data[i]);
         if (dupFd == -1) {
             return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
         }
@@ -295,18 +382,18 @@
     std::vector<int> ints(&handle->data[handle->numFds],
                           &handle->data[handle->numFds + handle->numInts]);
 
-    return std::make_shared<const nn::Handle>(nn::Handle{
-            .fds = std::move(fds),
-            .ints = std::move(ints),
-    });
+    return nn::Handle{.fds = std::move(fds), .ints = std::move(ints)};
 }
 
 nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
         const std::vector<nn::SyncFence>& syncFences) {
     hidl_vec<hidl_handle> handles(syncFences.size());
     for (size_t i = 0; i < syncFences.size(); ++i) {
-        handles[i] =
-                NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
+        const auto& handle = syncFences[i].getSharedHandle();
+        if (handle == nullptr) {
+            return NN_ERROR() << "convertSyncFences failed because sync fence is empty";
+        }
+        handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle));
     }
     return handles;
 }
diff --git a/neuralnetworks/utils/common/src/InvalidBuffer.cpp b/neuralnetworks/utils/common/src/InvalidBuffer.cpp
index c6f75d7..e73001d 100644
--- a/neuralnetworks/utils/common/src/InvalidBuffer.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBuffer.cpp
@@ -30,11 +30,11 @@
     return nn::Request::MemoryDomainToken{};
 }
 
-nn::GeneralResult<void> InvalidBuffer::copyTo(const nn::Memory& /*dst*/) const {
+nn::GeneralResult<void> InvalidBuffer::copyTo(const nn::SharedMemory& /*dst*/) const {
     return NN_ERROR() << "InvalidBuffer";
 }
 
-nn::GeneralResult<void> InvalidBuffer::copyFrom(const nn::Memory& /*src*/,
+nn::GeneralResult<void> InvalidBuffer::copyFrom(const nn::SharedMemory& /*src*/,
                                                 const nn::Dimensions& /*dimensions*/) const {
     return NN_ERROR() << "InvalidBuffer";
 }
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 4ca6603..81ca18d 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -26,7 +26,8 @@
 
 namespace android::hardware::neuralnetworks::utils {
 
-InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory(const nn::Memory& /*memory*/) const {
+InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory(
+        const nn::SharedMemory& /*memory*/) const {
     return nullptr;
 }
 
diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
index 47abbe2..1904375 100644
--- a/neuralnetworks/utils/common/src/ResilientBuffer.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
@@ -99,12 +99,12 @@
     return getBuffer()->getToken();
 }
 
-nn::GeneralResult<void> ResilientBuffer::copyTo(const nn::Memory& dst) const {
+nn::GeneralResult<void> ResilientBuffer::copyTo(const nn::SharedMemory& dst) const {
     const auto fn = [&dst](const nn::IBuffer& buffer) { return buffer.copyTo(dst); };
     return protect(*this, fn);
 }
 
-nn::GeneralResult<void> ResilientBuffer::copyFrom(const nn::Memory& src,
+nn::GeneralResult<void> ResilientBuffer::copyFrom(const nn::SharedMemory& src,
                                                   const nn::Dimensions& dimensions) const {
     const auto fn = [&src, &dimensions](const nn::IBuffer& buffer) {
         return buffer.copyFrom(src, dimensions);
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 0d3cb33..5ca868b 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -94,7 +94,8 @@
     return mBurst;
 }
 
-ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory(const nn::Memory& memory) const {
+ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory(
+        const nn::SharedMemory& memory) const {
     return getBurst()->cacheMemory(memory);
 }
 
diff --git a/neuralnetworks/utils/common/test/MockBuffer.h b/neuralnetworks/utils/common/test/MockBuffer.h
index c5405fb..59d5700 100644
--- a/neuralnetworks/utils/common/test/MockBuffer.h
+++ b/neuralnetworks/utils/common/test/MockBuffer.h
@@ -27,9 +27,9 @@
 class MockBuffer final : public IBuffer {
   public:
     MOCK_METHOD(Request::MemoryDomainToken, getToken, (), (const, override));
-    MOCK_METHOD(GeneralResult<void>, copyTo, (const Memory& dst), (const, override));
-    MOCK_METHOD(GeneralResult<void>, copyFrom, (const Memory& src, const Dimensions& dimensions),
-                (const, override));
+    MOCK_METHOD(GeneralResult<void>, copyTo, (const SharedMemory& dst), (const, override));
+    MOCK_METHOD(GeneralResult<void>, copyFrom,
+                (const SharedMemory& src, const Dimensions& dimensions), (const, override));
 };
 
 }  // namespace android::nn
diff --git a/neuralnetworks/utils/common/test/ResilientBufferTest.cpp b/neuralnetworks/utils/common/test/ResilientBufferTest.cpp
index deb9b7c..7afd020 100644
--- a/neuralnetworks/utils/common/test/ResilientBufferTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientBufferTest.cpp
@@ -15,9 +15,11 @@
  */
 
 #include <gmock/gmock.h>
+#include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/ResilientBuffer.h>
+#include <memory>
 #include <tuple>
 #include <utility>
 #include "MockBuffer.h"
@@ -113,7 +115,8 @@
     EXPECT_CALL(*mockBuffer, copyTo(_)).Times(1).WillOnce(Return(kNoError));
 
     // run test
-    const auto result = buffer->copyTo({});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyTo(memory);
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -126,7 +129,8 @@
     EXPECT_CALL(*mockBuffer, copyTo(_)).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = buffer->copyTo({});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyTo(memory);
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -140,7 +144,8 @@
     EXPECT_CALL(*mockBufferFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = buffer->copyTo({});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyTo(memory);
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -156,7 +161,8 @@
     EXPECT_CALL(*mockBufferFactory, Call()).Times(1).WillOnce(Return(recoveredMockBuffer));
 
     // run test
-    const auto result = buffer->copyTo({});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyTo(memory);
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -169,7 +175,8 @@
     EXPECT_CALL(*mockBuffer, copyFrom(_, _)).Times(1).WillOnce(Return(kNoError));
 
     // run test
-    const auto result = buffer->copyFrom({}, {});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyFrom(memory, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -182,7 +189,8 @@
     EXPECT_CALL(*mockBuffer, copyFrom(_, _)).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = buffer->copyFrom({}, {});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyFrom(memory, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -196,7 +204,8 @@
     EXPECT_CALL(*mockBufferFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = buffer->copyFrom({}, {});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyFrom(memory, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -212,7 +221,8 @@
     EXPECT_CALL(*mockBufferFactory, Call()).Times(1).WillOnce(Return(recoveredMockBuffer));
 
     // run test
-    const auto result = buffer->copyFrom({}, {});
+    const nn::SharedMemory memory = std::make_shared<const nn::Memory>();
+    const auto result = buffer->copyFrom(memory, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())