Merge "Add support and VTS test for RSA OAEP MGF1."
diff --git a/audio/7.0/IStreamOutEventCallback.hal b/audio/7.0/IStreamOutEventCallback.hal
index 52e65d3..4de767f 100644
--- a/audio/7.0/IStreamOutEventCallback.hal
+++ b/audio/7.0/IStreamOutEventCallback.hal
@@ -42,7 +42,7 @@
      * is TEST(metadata_tests, compatibility_R) [3].  The test for R compatibility for JNI
      * marshalling is android.media.cts.AudioMetadataTest#testCompatibilityR [4].
      *
-     * R (audio HAL 7.0) defined keys are as follows [2]:
+     * R (audio HAL 6.0) defined keys are as follows [2]:
      * "bitrate", int32
      * "channel-mask", int32
      * "mime", string
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index e9df02f..a994b23 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -334,6 +334,13 @@
             <instance>default</instance>
         </interface>
     </hal>
+    <hal format="aidl" optional="true">
+        <name>android.hardware.memtrack</name>
+        <interface>
+            <name>IMemtrack</name>
+            <instance>default</instance>
+        </interface>
+    </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.memtrack</name>
         <version>1.0</version>
diff --git a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
index 8077f08..50f282f 100644
--- a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
+++ b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
@@ -19,6 +19,8 @@
 #include <algorithm>
 #include <regex>
 #include <thread>
+#include <unordered_map>
+#include <utility>
 
 #include <android-base/logging.h>
 #include <android-base/properties.h>
@@ -285,6 +287,59 @@
     }
 }
 
+TEST_P(GraphicsComposerHidlTest, GetDisplayAttribute_2_4_ConfigsInAGroupDifferOnlyByVsyncPeriod) {
+    struct Resolution {
+        int32_t width, height;
+    };
+    struct Dpi {
+        int32_t x, y;
+    };
+    for (const auto& display : mDisplays) {
+        std::vector<Config> configs = mComposerClient->getDisplayConfigs(display.get());
+        std::unordered_map<int32_t, Resolution> configGroupToResolutionMap;
+        std::unordered_map<int32_t, Dpi> configGroupToDpiMap;
+        for (auto config : configs) {
+            const auto configGroup = mComposerClient->getDisplayAttribute_2_4(
+                    display.get(), config, IComposerClient::Attribute::CONFIG_GROUP);
+            const auto width = mComposerClient->getDisplayAttribute_2_4(
+                    display.get(), config, IComposerClient::Attribute::WIDTH);
+            const auto height = mComposerClient->getDisplayAttribute_2_4(
+                    display.get(), config, IComposerClient::Attribute::HEIGHT);
+            if (configGroupToResolutionMap.find(configGroup) == configGroupToResolutionMap.end()) {
+                configGroupToResolutionMap[configGroup] = {width, height};
+            }
+            EXPECT_EQ(configGroupToResolutionMap[configGroup].width, width);
+            EXPECT_EQ(configGroupToResolutionMap[configGroup].height, height);
+
+            int32_t dpiX = -1;
+            mComposerClient->getRaw()->getDisplayAttribute_2_4(
+                    display.get(), config, IComposerClient::Attribute::DPI_X,
+                    [&](const auto& tmpError, const auto& value) {
+                        if (tmpError == Error::NONE) {
+                            dpiX = value;
+                        }
+                    });
+            int32_t dpiY = -1;
+            mComposerClient->getRaw()->getDisplayAttribute_2_4(
+                    display.get(), config, IComposerClient::Attribute::DPI_Y,
+                    [&](const auto& tmpError, const auto& value) {
+                        if (tmpError == Error::NONE) {
+                            dpiY = value;
+                        }
+                    });
+            if (dpiX == -1 && dpiY == -1) {
+                continue;
+            }
+
+            if (configGroupToDpiMap.find(configGroup) == configGroupToDpiMap.end()) {
+                configGroupToDpiMap[configGroup] = {dpiX, dpiY};
+            }
+            EXPECT_EQ(configGroupToDpiMap[configGroup].x, dpiX);
+            EXPECT_EQ(configGroupToDpiMap[configGroup].y, dpiY);
+        }
+    }
+}
+
 TEST_P(GraphicsComposerHidlTest, getDisplayVsyncPeriod_BadDisplay) {
     VsyncPeriodNanos vsyncPeriodNanos;
     EXPECT_EQ(Error::BAD_DISPLAY,
diff --git a/identity/support/src/IdentityCredentialSupport.cpp b/identity/support/src/IdentityCredentialSupport.cpp
index 77b795b..093120d 100644
--- a/identity/support/src/IdentityCredentialSupport.cpp
+++ b/identity/support/src/IdentityCredentialSupport.cpp
@@ -935,18 +935,19 @@
 optional<vector<vector<uint8_t>>> createAttestation(
         const EVP_PKEY* key, const vector<uint8_t>& applicationId, const vector<uint8_t>& challenge,
         uint64_t activeTimeMilliSeconds, uint64_t expireTimeMilliSeconds, bool isTestCredential) {
-    const keymaster_cert_chain_t* attestation_chain =
-            ::keymaster::getAttestationChain(KM_ALGORITHM_EC, nullptr);
-    if (attestation_chain == nullptr) {
-        LOG(ERROR) << "Error getting attestation chain";
+    keymaster_error_t error;
+    ::keymaster::CertificateChain attestation_chain =
+            ::keymaster::getAttestationChain(KM_ALGORITHM_EC, &error);
+    if (KM_ERROR_OK != error) {
+        LOG(ERROR) << "Error getting attestation chain " << error;
         return {};
     }
     if (expireTimeMilliSeconds == 0) {
-        if (attestation_chain->entry_count < 1) {
+        if (attestation_chain.entry_count < 1) {
             LOG(ERROR) << "Expected at least one entry in attestation chain";
             return {};
         }
-        keymaster_blob_t* bcBlob = &(attestation_chain->entries[0]);
+        keymaster_blob_t* bcBlob = &(attestation_chain.entries[0]);
         const uint8_t* bcData = bcBlob->data;
         auto bc = X509_Ptr(d2i_X509(nullptr, &bcData, bcBlob->data_length));
         time_t bcNotAfter;
@@ -1015,34 +1016,30 @@
     }
     ::keymaster::AuthorizationSet hwEnforced(hwEnforcedBuilder);
 
-    keymaster_error_t error;
-    ::keymaster::CertChainPtr cert_chain_out;
-
     // Pretend to be implemented in a trusted environment just so we can pass
     // the VTS tests. Of course, this is a pretend-only game since hopefully no
     // relying party is ever going to trust our batch key and those keys above
     // it.
-    //
     ::keymaster::PureSoftKeymasterContext context(::keymaster::KmVersion::KEYMASTER_4_1,
                                                   KM_SECURITY_LEVEL_TRUSTED_ENVIRONMENT);
 
-    error = generate_attestation_from_EVP(key, swEnforced, hwEnforced, auth_set, context,
-                                          *attestation_chain, *attestation_signing_key,
-                                          &cert_chain_out);
+    ::keymaster::CertificateChain cert_chain_out = generate_attestation_from_EVP(
+            key, swEnforced, hwEnforced, auth_set, context, move(attestation_chain),
+            *attestation_signing_key, &error);
 
-    if (KM_ERROR_OK != error || !cert_chain_out) {
+    if (KM_ERROR_OK != error) {
         LOG(ERROR) << "Error generate attestation from EVP key" << error;
         return {};
     }
 
-    // translate certificate format from keymaster_cert_chain_t to vector<uint8_t>.
+    // translate certificate format from keymaster_cert_chain_t to vector<vector<uint8_t>>.
     vector<vector<uint8_t>> attestationCertificate;
-    for (int i = 0; i < cert_chain_out->entry_count; i++) {
+    for (int i = 0; i < cert_chain_out.entry_count; i++) {
         attestationCertificate.insert(
                 attestationCertificate.end(),
                 vector<uint8_t>(
-                        cert_chain_out->entries[i].data,
-                        cert_chain_out->entries[i].data + cert_chain_out->entries[i].data_length));
+                        cert_chain_out.entries[i].data,
+                        cert_chain_out.entries[i].data + cert_chain_out.entries[i].data_length));
     }
 
     return attestationCertificate;
diff --git a/memtrack/aidl/Android.bp b/memtrack/aidl/Android.bp
new file mode 100644
index 0000000..fe4d01b
--- /dev/null
+++ b/memtrack/aidl/Android.bp
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+aidl_interface {
+    name: "android.hardware.memtrack",
+    vendor_available: true,
+    srcs: ["android/hardware/memtrack/*.aidl"],
+    stability: "vintf",
+    backend: {
+        cpp: {
+            enabled: false,
+        },
+        java: {
+            enabled: false,
+        },
+        ndk: {
+            vndk: {
+                enabled: true,
+            },
+        },
+    },
+}
diff --git a/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/DeviceInfo.aidl b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/DeviceInfo.aidl
new file mode 100644
index 0000000..00abff9
--- /dev/null
+++ b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/DeviceInfo.aidl
@@ -0,0 +1,23 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.memtrack;
+@VintfStability
+parcelable DeviceInfo {
+  int id;
+  @utf8InCpp String name;
+}
diff --git a/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/IMemtrack.aidl b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/IMemtrack.aidl
new file mode 100644
index 0000000..844a1bb
--- /dev/null
+++ b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/IMemtrack.aidl
@@ -0,0 +1,23 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.memtrack;
+@VintfStability
+interface IMemtrack {
+  android.hardware.memtrack.MemtrackRecord[] getMemory(in int pid, in android.hardware.memtrack.MemtrackType type);
+  android.hardware.memtrack.DeviceInfo[] getGpuDeviceInfo();
+}
diff --git a/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackRecord.aidl b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackRecord.aidl
new file mode 100644
index 0000000..09ecefc
--- /dev/null
+++ b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackRecord.aidl
@@ -0,0 +1,32 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.memtrack;
+@VintfStability
+parcelable MemtrackRecord {
+  int flags;
+  long sizeInBytes;
+  const int FLAG_SMAPS_ACCOUNTED = 2;
+  const int FLAG_SMAPS_UNACCOUNTED = 4;
+  const int FLAG_SHARED = 8;
+  const int FLAG_SHARED_PSS = 16;
+  const int FLAG_PRIVATE = 32;
+  const int FLAG_SYSTEM = 64;
+  const int FLAG_DEDICATED = 128;
+  const int FLAG_NONSECURE = 256;
+  const int FLAG_SECURE = 512;
+}
diff --git a/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackType.aidl b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackType.aidl
new file mode 100644
index 0000000..7f3f939
--- /dev/null
+++ b/memtrack/aidl/aidl_api/android.hardware.memtrack/current/android/hardware/memtrack/MemtrackType.aidl
@@ -0,0 +1,27 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL interface (or parcelable). Do not try to
+// edit this file. It looks like you are doing that because you have modified
+// an AIDL interface in a backward-incompatible way, e.g., deleting a function
+// from an interface or a field from a parcelable and it broke the build. That
+// breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.memtrack;
+@Backing(type="int") @VintfStability
+enum MemtrackType {
+  OTHER = 0,
+  GL = 1,
+  GRAPHICS = 2,
+  MULTIMEDIA = 3,
+  CAMERA = 4,
+  NUM_TYPES = 5,
+}
diff --git a/memtrack/aidl/android/hardware/memtrack/DeviceInfo.aidl b/memtrack/aidl/android/hardware/memtrack/DeviceInfo.aidl
new file mode 100644
index 0000000..bcba544
--- /dev/null
+++ b/memtrack/aidl/android/hardware/memtrack/DeviceInfo.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.memtrack;
+
+/*
+ * Contains the device name and the device id.
+ */
+@VintfStability
+parcelable DeviceInfo {
+    int id;
+    @utf8InCpp String name;
+}
+
diff --git a/memtrack/aidl/android/hardware/memtrack/IMemtrack.aidl b/memtrack/aidl/android/hardware/memtrack/IMemtrack.aidl
new file mode 100644
index 0000000..18587ee
--- /dev/null
+++ b/memtrack/aidl/android/hardware/memtrack/IMemtrack.aidl
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.memtrack;
+
+import android.hardware.memtrack.DeviceInfo;
+import android.hardware.memtrack.MemtrackRecord;
+import android.hardware.memtrack.MemtrackType;
+
+/**
+ * The Memory Tracker HAL is designed to return information about
+ * device-specific memory usage.
+ * The primary goal is to be able to track memory that is not
+ * trackable in any other way, for example texture memory that is allocated by
+ * a process, but not mapped in to that process's address space.
+ * A secondary goal is to be able to categorize memory used by a process into
+ * GL, graphics, etc. All memory sizes must be in real memory usage,
+ * accounting for stride, bit depth, rounding up to page size, etc.
+ *
+ * Constructor for the interface should be used to perform memtrack management
+ * setup actions and must be called once before any calls to getMemory().
+ */
+@VintfStability
+interface IMemtrack {
+    /**
+     * getMemory() populates MemtrackRecord vector with the sizes of memory
+     * plus associated flags for that memory.
+     *
+     * A process collecting memory statistics will call getMemory for each
+     * combination of pid and memory type. For each memory type that it
+     * recognizes, the HAL must fill out an array of memtrack_record
+     * structures breaking down the statistics of that memory type as much as
+     * possible. For example,
+     * getMemory(<pid>, GL) might return:
+     * { { 4096,  ACCOUNTED | PRIVATE | SYSTEM },
+     *   { 40960, UNACCOUNTED | PRIVATE | SYSTEM },
+     *   { 8192,  ACCOUNTED | PRIVATE | DEDICATED },
+     *   { 8192,  UNACCOUNTED | PRIVATE | DEDICATED } }
+     * If the HAL cannot differentiate between SYSTEM and DEDICATED memory, it
+     * could return:
+     * { { 12288,  ACCOUNTED | PRIVATE },
+     *   { 49152,  UNACCOUNTED | PRIVATE } }
+     *
+     * Memory must not overlap between types. For example, a graphics buffer
+     * that has been mapped into the GPU as a surface must show up when
+     * GRAPHICS is requested and not when GL
+     * is requested.
+     *
+     * @param pid process for which memory information is requested
+     * @param type memory type that information is being requested about
+     * @return vector of MemtrackRecord containing memory information
+     */
+    MemtrackRecord[] getMemory(in int pid, in MemtrackType type);
+
+    /**
+     * getGpuDeviceInfo() populates DeviceInfo with the ID and name
+     * of each GPU device.
+     *
+     * For example, getGpuDeviceInfor, might return:
+     * { { 0, <gpu-device-name> },
+     *   { 1, <gpu-device-name> } }
+     *
+     * This information is used to identify GPU devices for GPU specific
+     * memory accounting (e.g. DMA buffer usage).
+     *
+     * @return vector of DeviceInfo populated for all GPU devices.
+     */
+     DeviceInfo[] getGpuDeviceInfo();
+}
diff --git a/memtrack/aidl/android/hardware/memtrack/MemtrackRecord.aidl b/memtrack/aidl/android/hardware/memtrack/MemtrackRecord.aidl
new file mode 100644
index 0000000..dae026e
--- /dev/null
+++ b/memtrack/aidl/android/hardware/memtrack/MemtrackRecord.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.memtrack;
+
+/*
+ * Each record consists of the size of the memory used by the process and
+ * flags indicate all the MemtrackFlags that are valid for this record.
+ */
+@VintfStability
+parcelable MemtrackRecord {
+    /* Memtrack Flags */
+    const int FLAG_SMAPS_ACCOUNTED = 1 << 1;
+    const int FLAG_SMAPS_UNACCOUNTED = 1 << 2;
+    const int FLAG_SHARED = 1 << 3;
+    const int FLAG_SHARED_PSS = 1 << 4;
+    const int FLAG_PRIVATE = 1 << 5;
+    const int FLAG_SYSTEM = 1 << 6;
+    const int FLAG_DEDICATED = 1 << 7;
+    const int FLAG_NONSECURE = 1 << 8;
+    const int FLAG_SECURE = 1 << 9;
+
+    /* Bitfield indicating all flags that are valid for this record */
+    int flags;
+
+    long sizeInBytes;
+}
+
diff --git a/memtrack/aidl/android/hardware/memtrack/MemtrackType.aidl b/memtrack/aidl/android/hardware/memtrack/MemtrackType.aidl
new file mode 100644
index 0000000..715c6bf
--- /dev/null
+++ b/memtrack/aidl/android/hardware/memtrack/MemtrackType.aidl
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.memtrack;
+
+/**
+ * Tags which define the usage of the memory buffers.
+ */
+@VintfStability
+@Backing(type="int")
+enum MemtrackType {
+    OTHER = 0,
+    GL = 1,
+    GRAPHICS = 2,
+    MULTIMEDIA = 3,
+    CAMERA = 4,
+    NUM_TYPES,
+}
diff --git a/memtrack/aidl/default/Android.bp b/memtrack/aidl/default/Android.bp
new file mode 100644
index 0000000..52f88c8
--- /dev/null
+++ b/memtrack/aidl/default/Android.bp
@@ -0,0 +1,30 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_binary {
+    name: "android.hardware.memtrack-service.example",
+    relative_install_path: "hw",
+    init_rc: ["memtrack-default.rc"],
+    vintf_fragments: ["memtrack-default.xml"],
+    vendor: true,
+    shared_libs: [
+        "libbase",
+        "libbinder_ndk",
+        "android.hardware.memtrack-ndk_platform",
+    ],
+    srcs: [
+        "main.cpp",
+        "Memtrack.cpp",
+    ],
+}
diff --git a/memtrack/aidl/default/Memtrack.cpp b/memtrack/aidl/default/Memtrack.cpp
new file mode 100644
index 0000000..7361719
--- /dev/null
+++ b/memtrack/aidl/default/Memtrack.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Memtrack.h"
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace memtrack {
+
+ndk::ScopedAStatus Memtrack::getMemory(int pid, MemtrackType type,
+                                       std::vector<MemtrackRecord>* _aidl_return) {
+    if (pid < 0) {
+        return ndk::ScopedAStatus(AStatus_fromExceptionCode(EX_ILLEGAL_ARGUMENT));
+    }
+    if (type < MemtrackType::OTHER || type >= MemtrackType::NUM_TYPES) {
+        return ndk::ScopedAStatus(AStatus_fromExceptionCode(EX_UNSUPPORTED_OPERATION));
+    }
+    _aidl_return->clear();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Memtrack::getGpuDeviceInfo(std::vector<DeviceInfo>* _aidl_return) {
+    _aidl_return->clear();
+    return ndk::ScopedAStatus::ok();
+}
+
+}  // namespace memtrack
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/memtrack/aidl/default/Memtrack.h b/memtrack/aidl/default/Memtrack.h
new file mode 100644
index 0000000..f2ef60e
--- /dev/null
+++ b/memtrack/aidl/default/Memtrack.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/memtrack/BnMemtrack.h>
+#include <aidl/android/hardware/memtrack/DeviceInfo.h>
+#include <aidl/android/hardware/memtrack/MemtrackRecord.h>
+#include <aidl/android/hardware/memtrack/MemtrackType.h>
+
+namespace aidl {
+namespace android {
+namespace hardware {
+namespace memtrack {
+
+class Memtrack : public BnMemtrack {
+    ndk::ScopedAStatus getMemory(int pid, MemtrackType type,
+                                 std::vector<MemtrackRecord>* _aidl_return) override;
+
+    ndk::ScopedAStatus getGpuDeviceInfo(std::vector<DeviceInfo>* _aidl_return) override;
+};
+
+}  // namespace memtrack
+}  // namespace hardware
+}  // namespace android
+}  // namespace aidl
diff --git a/memtrack/aidl/default/main.cpp b/memtrack/aidl/default/main.cpp
new file mode 100644
index 0000000..d063d2a
--- /dev/null
+++ b/memtrack/aidl/default/main.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Memtrack.h"
+
+#include <android-base/logging.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+using aidl::android::hardware::memtrack::Memtrack;
+
+int main() {
+    ABinderProcess_setThreadPoolMaxThreadCount(0);
+    std::shared_ptr<Memtrack> memtrack = ndk::SharedRefBase::make<Memtrack>();
+
+    const std::string instance = std::string() + Memtrack::descriptor + "/default";
+    binder_status_t status =
+            AServiceManager_addService(memtrack->asBinder().get(), instance.c_str());
+    CHECK(status == STATUS_OK);
+
+    ABinderProcess_joinThreadPool();
+    return EXIT_FAILURE;  // Unreachable
+}
diff --git a/memtrack/aidl/default/memtrack-default.rc b/memtrack/aidl/default/memtrack-default.rc
new file mode 100644
index 0000000..1725cd0
--- /dev/null
+++ b/memtrack/aidl/default/memtrack-default.rc
@@ -0,0 +1,4 @@
+service vendor.memtrack-default /vendor/bin/hw/android.hardware.memtrack-service.example
+    class hal
+    user nobody
+    group system
diff --git a/memtrack/aidl/default/memtrack-default.xml b/memtrack/aidl/default/memtrack-default.xml
new file mode 100644
index 0000000..3e3e0f6
--- /dev/null
+++ b/memtrack/aidl/default/memtrack-default.xml
@@ -0,0 +1,7 @@
+<manifest version="1.0" type="device">
+    <hal format="aidl">
+        <name>android.hardware.memtrack</name>
+        <fqname>IMemtrack/default</fqname>
+    </hal>
+</manifest>
+
diff --git a/memtrack/aidl/vts/Android.bp b/memtrack/aidl/vts/Android.bp
new file mode 100644
index 0000000..ea36677
--- /dev/null
+++ b/memtrack/aidl/vts/Android.bp
@@ -0,0 +1,17 @@
+cc_test {
+    name: "VtsHalMemtrackTargetTest",
+    defaults: [
+        "VtsHalTargetTestDefaults",
+        "use_libaidlvintf_gtest_helper_static",
+    ],
+    srcs: ["VtsHalMemtrackTargetTest.cpp"],
+    shared_libs: [
+        "libbinder_ndk",
+    ],
+    static_libs: [
+        "android.hardware.memtrack-unstable-ndk_platform",
+    ],
+    test_suites: [
+        "vts-core",
+    ],
+}
diff --git a/memtrack/aidl/vts/VtsHalMemtrackTargetTest.cpp b/memtrack/aidl/vts/VtsHalMemtrackTargetTest.cpp
new file mode 100644
index 0000000..4d33101
--- /dev/null
+++ b/memtrack/aidl/vts/VtsHalMemtrackTargetTest.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <aidl/Gtest.h>
+#include <aidl/Vintf.h>
+
+#include <aidl/android/hardware/memtrack/DeviceInfo.h>
+#include <aidl/android/hardware/memtrack/IMemtrack.h>
+#include <aidl/android/hardware/memtrack/MemtrackType.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+using aidl::android::hardware::memtrack::DeviceInfo;
+using aidl::android::hardware::memtrack::IMemtrack;
+using aidl::android::hardware::memtrack::MemtrackRecord;
+using aidl::android::hardware::memtrack::MemtrackType;
+
+class MemtrackAidlTest : public testing::TestWithParam<std::string> {
+  public:
+    virtual void SetUp() override {
+        const auto instance = GetParam();
+        ASSERT_TRUE(AServiceManager_isDeclared(instance.c_str()));
+        auto memtrackBinder = ndk::SpAIBinder(AServiceManager_waitForService(instance.c_str()));
+        memtrack_ = IMemtrack::fromBinder(memtrackBinder);
+        ASSERT_NE(memtrack_, nullptr);
+    }
+
+    std::shared_ptr<IMemtrack> memtrack_;
+};
+
+TEST_P(MemtrackAidlTest, GetMemoryInvalidPid) {
+    int pid = -1;
+    MemtrackType type = MemtrackType::OTHER;
+    std::vector<MemtrackRecord> records;
+
+    auto status = memtrack_->getMemory(pid, type, &records);
+
+    EXPECT_EQ(status.getExceptionCode(), EX_ILLEGAL_ARGUMENT);
+}
+
+TEST_P(MemtrackAidlTest, GetMemoryInvalidType) {
+    int pid = 1;
+    MemtrackType type = MemtrackType::NUM_TYPES;
+    std::vector<MemtrackRecord> records;
+
+    auto status = memtrack_->getMemory(pid, type, &records);
+
+    EXPECT_EQ(status.getExceptionCode(), EX_UNSUPPORTED_OPERATION);
+}
+
+TEST_P(MemtrackAidlTest, GetMemory) {
+    int pid = 1;
+    MemtrackType type = MemtrackType::OTHER;
+    std::vector<MemtrackRecord> records;
+
+    auto status = memtrack_->getMemory(pid, type, &records);
+
+    EXPECT_TRUE(status.isOk());
+}
+
+TEST_P(MemtrackAidlTest, GetGpuDeviceInfo) {
+    std::vector<DeviceInfo> device_info;
+
+    auto status = memtrack_->getGpuDeviceInfo(&device_info);
+
+    EXPECT_TRUE(status.isOk());
+}
+
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(MemtrackAidlTest);
+INSTANTIATE_TEST_SUITE_P(PerInstance, MemtrackAidlTest,
+                         testing::ValuesIn(android::getAidlHalInstanceNames(IMemtrack::descriptor)),
+                         android::PrintInstanceNameToString);
+
+int main(int argc, char** argv) {
+    ::testing::InitGoogleTest(&argc, argv);
+    ABinderProcess_setThreadPoolMaxThreadCount(1);
+    ABinderProcess_startThreadPool();
+    return RUN_ALL_TESTS();
+}
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
index 65b75e5..3b32e1d 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
@@ -27,8 +27,31 @@
 #include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
+// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success,
+// this function returns with the supported operations as indicated by a driver. On failure, this
+// function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+        ErrorStatus status, const hidl_vec<bool>& supportedOperations);
+
+// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
+// function returns with a non-null nn::SharedPreparedModel with a feature level of
+// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate
+// nn::GeneralError.
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        ErrorStatus status, const sp<IPreparedModel>& preparedModel);
+
+// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
+// returns with an empty output shape vector and no timing information. On failure, this function
+// returns with the appropriate nn::ExecutionError.
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        ErrorStatus status);
+
+// A HIDL callback class to receive the results of IDevice::prepareModel asynchronously.
 class PreparedModelCallback final : public IPreparedModelCallback,
                                     public hal::utils::IProtectedCallback {
   public:
@@ -41,11 +64,10 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
+// A HIDL callback class to receive the results of IDevice::execute asynchronously.
 class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
   public:
     using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -57,8 +79,6 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index ee103ba..db3b2ad 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -32,8 +32,12 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
+// Class that adapts V1_0::IDevice to nn::IDevice.
 class Device final : public nn::IDevice {
     struct PrivateConstructorTag {};
 
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index 31f366d..2de1828 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -29,8 +29,12 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
+// Class that adapts V1_0::IPreparedModel to nn::IPreparedModel.
 class PreparedModel final : public nn::IPreparedModel {
     struct PrivateConstructorTag {};
 
@@ -44,13 +48,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp
index b1259c3..ea3ea56 100644
--- a/neuralnetworks/1.0/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp
@@ -27,69 +27,62 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
 #include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <utility>
 
-namespace android::hardware::neuralnetworks::V1_0::utils {
-namespace {
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
 
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<IPreparedModel>& preparedModel) {
-    return NN_TRY(utils::PreparedModel::create(preparedModel));
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+        ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
+    HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+    return supportedOperations;
 }
 
-}  // namespace
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    return NN_TRY(PreparedModel::create(preparedModel));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        ErrorStatus status) {
+    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+    return {};
+}
 
 Return<void> PreparedModelCallback::notify(ErrorStatus status,
                                            const sp<IPreparedModel>& preparedModel) {
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 void PreparedModelCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 PreparedModelCallback::Data PreparedModelCallback::get() {
     return mData.take();
 }
 
-void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 // ExecutionCallback methods begin here
 
 Return<void> ExecutionCallback::notify(ErrorStatus status) {
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal({});
-    }
+    mData.put(executionCallback(status));
     return Void();
 }
 
 void ExecutionCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 ExecutionCallback::Data ExecutionCallback::get() {
     return mData.take();
 }
 
-void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index 285c515..93bd81a 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -31,6 +31,7 @@
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
 #include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
 
 #include <functional>
 #include <memory>
@@ -38,27 +39,27 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_0::utils {
 namespace {
 
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
+                                                         const Capabilities& capabilities) {
+    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    return nn::convert(capabilities);
+}
+
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_0::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status);
-        } else {
-            result = nn::convert(capabilities);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(capabilitiesCallback);
 
     const auto ret = device->getCapabilities(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 }  // namespace
@@ -74,7 +75,7 @@
                << "V1_0::utils::Device::create must have non-null device";
     }
 
-    auto capabilities = NN_TRY(initCapabilities(device.get()));
+    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
     return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
@@ -131,27 +132,12 @@
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
 
-    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                  << "uninitialized";
-    auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getSupportedOperations failed with " << toString(status);
-        } else if (supportedOperations.size() != model.main.operations.size()) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                     << "getSupportedOperations returned vector of size "
-                     << supportedOperations.size() << " but expected "
-                     << model.main.operations.size();
-        } else {
-            result = supportedOperations;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(supportedOperationsCallback);
 
     const auto ret = kDevice->getSupportedOperations(hidlModel, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -170,10 +156,7 @@
 
     const auto ret = kDevice->prepareModel(hidlModel, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 46dd3f8..c0c22fb 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -34,13 +34,15 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
         sp<V1_0::IPreparedModel> preparedModel) {
     if (preparedModel == nullptr) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_0::utils::PreparedModel::create must have non-null preparedModel";
+        return NN_ERROR() << "V1_0::utils::PreparedModel::create must have non-null preparedModel";
     }
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
@@ -55,7 +57,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming /*measure*/,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
@@ -68,10 +70,7 @@
 
     const auto ret = kPreparedModel->execute(hidlRequest, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "execute failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
 
     auto result = NN_TRY(cb->get());
     NN_TRY(hal::utils::makeExecutionFailure(
@@ -81,11 +80,12 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(
-        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
-        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(const nn::Request& /*request*/,
+                             const std::vector<nn::SyncFence>& /*waitFor*/,
+                             nn::MeasureTiming /*measure*/,
+                             const nn::OptionalTimePoint& /*deadline*/,
+                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
+                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
 }
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
index f646462..5d0769f 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
@@ -51,6 +51,10 @@
 nn::GeneralResult<Model> convert(const nn::Model& model);
 nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference);
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request);
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status);
+
 }  // namespace android::hardware::neuralnetworks::V1_1::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_CONVERSIONS_H
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index c1e95fe1a..5e224b5 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -32,8 +32,12 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_1::utils {
 
+// Class that adapts V1_1::IDevice to nn::IDevice.
 class Device final : public nn::IDevice {
     struct PrivateConstructorTag {};
 
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index 359f68a..b47f25a 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -275,4 +275,16 @@
     return validatedConvert(executionPreference);
 }
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+    return V1_0::utils::convert(deviceStatus);
+}
+
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) {
+    return V1_0::utils::convert(request);
+}
+
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status) {
+    return V1_0::utils::convert(status);
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_1::utils
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index f73d3f8..3197ef4 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -39,27 +39,27 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_1::utils {
 namespace {
 
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_1::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(V1_0::ErrorStatus status,
+                                                         const Capabilities& capabilities) {
+    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    return nn::convert(capabilities);
+}
+
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_1::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status);
-        } else {
-            result = nn::convert(capabilities);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(capabilitiesCallback);
 
     const auto ret = device->getCapabilities_1_1(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 }  // namespace
@@ -75,7 +75,7 @@
                << "V1_1::utils::Device::create must have non-null device";
     }
 
-    auto capabilities = NN_TRY(initCapabilities(device.get()));
+    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
     return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
@@ -132,28 +132,12 @@
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
 
-    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                  << "uninitialized";
-    auto cb = [&result, &model](V1_0::ErrorStatus status,
-                                const hidl_vec<bool>& supportedOperations) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getSupportedOperations_1_1 failed with " << toString(status);
-        } else if (supportedOperations.size() != model.main.operations.size()) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                     << "getSupportedOperations_1_1 returned vector of size "
-                     << supportedOperations.size() << " but expected "
-                     << model.main.operations.size();
-        } else {
-            result = supportedOperations;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback);
 
     const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -173,10 +157,7 @@
 
     const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
index bc7d92a..ba3c1ba 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
@@ -31,8 +31,24 @@
 #include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
+// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
+// function returns with a non-null nn::SharedPreparedModel with a feature level of
+// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel);
+
+// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
+// returns with the output shapes and the timing information. On failure, this function returns with
+// the appropriate nn::ExecutionError.
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing);
+
+// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously.
 class PreparedModelCallback final : public IPreparedModelCallback,
                                     public hal::utils::IProtectedCallback {
   public:
@@ -48,11 +64,10 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
+// A HIDL callback class to receive the results of IDevice::execute_1_2 asynchronously.
 class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
   public:
     using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -66,8 +81,6 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index 5dcbc0b..6fd1337 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -97,6 +97,12 @@
 nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
 nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request);
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status);
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference);
+
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_CONVERSIONS_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index a68830d..b4bef5e 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -32,14 +32,29 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
-nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device);
-nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device);
-nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device);
-nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+// Retrieves the version string from the provided device object. On failure, this function returns
+// with the appropriate nn::GeneralError.
+nn::GeneralResult<std::string> getVersionStringFrom(V1_2::IDevice* device);
+
+// Retrieves the device type from the provided device object. On failure, this function returns with
+// the appropriate nn::GeneralError.
+nn::GeneralResult<nn::DeviceType> getDeviceTypeFrom(V1_2::IDevice* device);
+
+// Retrieves the extensions supported by the provided device object. On failure, this function
+// returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::vector<nn::Extension>> getSupportedExtensionsFrom(V1_2::IDevice* device);
+
+// Retrieves the number of model cache files and data cache files needed by the provided device
+// object. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> getNumberOfCacheFilesNeededFrom(
         V1_2::IDevice* device);
 
+// Class that adapts V1_2::IDevice to nn::IDevice.
 class Device final : public nn::IDevice {
     struct PrivateConstructorTag {};
 
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 65e1e8a..6a56a82 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -30,28 +30,32 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
+// Class that adapts V1_2::IPreparedModel to nn::IPreparedModel.
 class PreparedModel final : public nn::IPreparedModel {
     struct PrivateConstructorTag {};
 
   public:
     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
-            sp<V1_2::IPreparedModel> preparedModel);
+            sp<V1_2::IPreparedModel> preparedModel, bool executeSynchronously);
 
-    PreparedModel(PrivateConstructorTag tag, sp<V1_2::IPreparedModel> preparedModel,
-                  hal::utils::DeathHandler deathHandler);
+    PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
+                  sp<V1_2::IPreparedModel> preparedModel, hal::utils::DeathHandler deathHandler);
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
@@ -61,6 +65,7 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
             const V1_0::Request& request, MeasureTiming measure) const;
 
+    const bool kExecuteSynchronously;
     const sp<V1_2::IPreparedModel> kPreparedModel;
     const hal::utils::DeathHandler kDeathHandler;
 };
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 70149a2..c289fc8 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -30,6 +30,8 @@
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
+using CacheToken = hidl_array<uint8_t, static_cast<size_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
 constexpr auto kDefaultMesaureTiming = MeasureTiming::NO;
 constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits<uint64_t>::max(),
                                   .timeInDriver = std::numeric_limits<uint64_t>::max()};
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
index 39f88c2..fefa122 100644
--- a/neuralnetworks/1.2/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -27,6 +27,7 @@
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Callbacks.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.0/PreparedModel.h>
 #include <nnapi/hal/CommonUtils.h>
@@ -36,107 +37,79 @@
 
 #include <utility>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<V1_0::IPreparedModel>& preparedModel) {
-    return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<IPreparedModel>& preparedModel) {
-    return NN_TRY(utils::PreparedModel::create(preparedModel));
-}
-
 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
                                      const Timing& timing) {
     return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
 }
 
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionGeneralResults(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+}  // namespace
+
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+    if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        auto canonicalOutputShapes =
+                nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
+        return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+               << "execution failed with " << toString(status);
+    }
+    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     return hal::utils::makeExecutionFailure(
             convertExecutionGeneralResultsHelper(outputShapes, timing));
 }
 
-}  // namespace
-
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
                                            const sp<V1_0::IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
                                                const sp<IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 void PreparedModelCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 PreparedModelCallback::Data PreparedModelCallback::get() {
     return mData.take();
 }
 
-void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 // ExecutionCallback methods begin here
 
 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal({});
-    }
+    mData.put(V1_0::utils::executionCallback(status));
     return Void();
 }
 
 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
                                            const hidl_vec<OutputShape>& outputShapes,
                                            const Timing& timing) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
-    }
+    mData.put(executionCallback(status, outputShapes, timing));
     return Void();
 }
 
 void ExecutionCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 ExecutionCallback::Data ExecutionCallback::get() {
     return mData.take();
 }
 
-void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index f11474f..062f6f7 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -26,6 +26,7 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
 
@@ -43,7 +44,9 @@
     return static_cast<std::underlying_type_t<Type>>(value);
 }
 
+using HalDuration = std::chrono::duration<uint64_t, std::micro>;
 constexpr auto kVersion = android::nn::Version::ANDROID_Q;
+constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max();
 
 }  // namespace
 
@@ -270,7 +273,18 @@
 }
 
 GeneralResult<Timing> unvalidatedConvert(const hal::V1_2::Timing& timing) {
-    return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
+    constexpr uint64_t kMaxTiming = std::chrono::floor<HalDuration>(Duration::max()).count();
+    constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration {
+        if (halTiming == kNoTiming) {
+            return {};
+        }
+        if (halTiming > kMaxTiming) {
+            return Duration::max();
+        }
+        return HalDuration{halTiming};
+    };
+    return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice),
+                  .timeInDriver = convertTiming(timing.timeInDriver)};
 }
 
 GeneralResult<Extension> unvalidatedConvert(const hal::V1_2::Extension& extension) {
@@ -547,7 +561,14 @@
 }
 
 nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
-    return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
+    constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t {
+        if (!canonicalTiming.has_value()) {
+            return kNoTiming;
+        }
+        return std::chrono::ceil<HalDuration>(*canonicalTiming).count();
+    };
+    return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice),
+                  .timeInDriver = convertTiming(timing.timeInDriver)};
 }
 
 nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
@@ -602,4 +623,21 @@
     return validatedConvert(outputShapes);
 }
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+    return V1_1::utils::convert(deviceStatus);
+}
+
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) {
+    return V1_1::utils::convert(request);
+}
+
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status) {
+    return V1_1::utils::convert(status);
+}
+
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference) {
+    return V1_1::utils::convert(executionPreference);
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index 0061065..9fe0de2 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -41,112 +41,108 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(V1_0::ErrorStatus status,
+                                                         const Capabilities& capabilities) {
+    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    return nn::convert(capabilities);
+}
+
+nn::GeneralResult<std::string> versionStringCallback(V1_0::ErrorStatus status,
+                                                     const hidl_string& versionString) {
+    HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status);
+    return versionString;
+}
+
+nn::GeneralResult<nn::DeviceType> deviceTypeCallback(V1_0::ErrorStatus status,
+                                                     DeviceType deviceType) {
+    HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status);
+    return nn::convert(deviceType);
+}
+
+nn::GeneralResult<std::vector<nn::Extension>> supportedExtensionsCallback(
+        V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
+    HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status);
+    return nn::convert(extensions);
+}
+
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> numberOfCacheFilesNeededCallback(
+        V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+    HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status);
+    if (numModelCache > nn::kMaxNumberOfCacheFiles) {
+        return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater "
+                             "than allowed max ("
+                          << numModelCache << " vs " << nn::kMaxNumberOfCacheFiles << ")";
+    }
+    if (numDataCache > nn::kMaxNumberOfCacheFiles) {
+        return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numDataCache files greater "
+                             "than allowed max ("
+                          << numDataCache << " vs " << nn::kMaxNumberOfCacheFiles << ")";
+    }
+    return std::make_pair(numModelCache, numDataCache);
+}
+
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status);
-        } else {
-            result = nn::convert(capabilities);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(capabilitiesCallback);
 
     const auto ret = device->getCapabilities_1_2(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 }  // namespace
 
-nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device) {
+nn::GeneralResult<std::string> getVersionStringFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::string> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                            << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status);
-        } else {
-            result = versionString;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(versionStringCallback);
 
     const auto ret = device->getVersionString(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device) {
+nn::GeneralResult<nn::DeviceType> getDeviceTypeFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::DeviceType> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                               << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status);
-        } else {
-            result = nn::convert(deviceType);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(deviceTypeCallback);
 
     const auto ret = device->getType(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device) {
+nn::GeneralResult<std::vector<nn::Extension>> getSupportedExtensionsFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::vector<nn::Extension>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status);
-        } else {
-            result = nn::convert(extensions);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(supportedExtensionsCallback);
 
     const auto ret = device->getSupportedExtensions(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> getNumberOfCacheFilesNeededFrom(
         V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::pair<uint32_t, uint32_t>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache,
-                              uint32_t numDataCache) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getNumberOfCacheFilesNeeded failed with " << toString(status);
-        } else {
-            result = std::make_pair(numModelCache, numDataCache);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(numberOfCacheFilesNeededCallback);
 
     const auto ret = device->getNumberOfCacheFilesNeeded(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
@@ -160,11 +156,11 @@
                << "V1_2::utils::Device::create must have non-null device";
     }
 
-    auto versionString = NN_TRY(initVersionString(device.get()));
-    const auto deviceType = NN_TRY(initDeviceType(device.get()));
-    auto extensions = NN_TRY(initExtensions(device.get()));
-    auto capabilities = NN_TRY(initCapabilities(device.get()));
-    const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get()));
+    auto versionString = NN_TRY(getVersionStringFrom(device.get()));
+    const auto deviceType = NN_TRY(getDeviceTypeFrom(device.get()));
+    auto extensions = NN_TRY(getSupportedExtensionsFrom(device.get()));
+    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
+    const auto numberOfCacheFilesNeeded = NN_TRY(getNumberOfCacheFilesNeededFrom(device.get()));
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
     return std::make_shared<const Device>(
@@ -229,28 +225,12 @@
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
 
-    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                  << "uninitialized";
-    auto cb = [&result, &model](V1_0::ErrorStatus status,
-                                const hidl_vec<bool>& supportedOperations) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getSupportedOperations_1_2 failed with " << toString(status);
-        } else if (supportedOperations.size() != model.main.operations.size()) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                     << "getSupportedOperations_1_2 returned vector of size "
-                     << supportedOperations.size() << " but expected "
-                     << model.main.operations.size();
-        } else {
-            result = supportedOperations;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback);
 
     const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -263,10 +243,10 @@
             NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
-    const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+    const auto hidlPreference = NN_TRY(convert(preference));
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlToken = CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -274,10 +254,7 @@
     const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
                                                hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
@@ -287,17 +264,14 @@
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlToken = CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
 
     const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index dad9a7e..6d00082 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -37,55 +37,37 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_2::utils {
-namespace {
-
-nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionResultsHelper(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
-    return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
-}
-
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
-        const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
-    return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
-}
-
-}  // namespace
 
 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
-        sp<V1_2::IPreparedModel> preparedModel) {
+        sp<V1_2::IPreparedModel> preparedModel, bool executeSynchronously) {
     if (preparedModel == nullptr) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_2::utils::PreparedModel::create must have non-null preparedModel";
+        return NN_ERROR() << "V1_2::utils::PreparedModel::create must have non-null preparedModel";
     }
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
-    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
-                                                 std::move(deathHandler));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously,
+                                                 std::move(preparedModel), std::move(deathHandler));
 }
 
-PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_2::IPreparedModel> preparedModel,
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously,
+                             sp<V1_2::IPreparedModel> preparedModel,
                              hal::utils::DeathHandler deathHandler)
-    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+    : kExecuteSynchronously(executeSynchronously),
+      kPreparedModel(std::move(preparedModel)),
+      kDeathHandler(std::move(deathHandler)) {}
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const {
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
-                              const Timing& timing) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
-        } else {
-            result = convertExecutionResults(outputShapes, timing);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(executionCallback);
 
     const auto ret = kPreparedModel->executeSynchronously(request, measure, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
@@ -95,9 +77,8 @@
 
     const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "execute failed with " << toString(status);
+    if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     }
 
     return cb->get();
@@ -106,51 +87,38 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
             hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
 
-    const auto hidlRequest =
-            NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared)));
+    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
 
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const bool preferSynchronous = true;
+    auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure)
+                                        : executeAsynchronously(hidlRequest, hidlMeasure);
+    auto [outputShapes, timing] = NN_TRY(std::move(result));
 
-    // Execute synchronously if allowed.
-    if (preferSynchronous) {
-        result = executeSynchronously(hidlRequest, hidlMeasure);
-    }
+    NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
 
-    // Run asymchronous execution if execution has not already completed.
-    if (!result.has_value()) {
-        result = executeAsynchronously(hidlRequest, hidlMeasure);
-    }
-
-    // Flush output buffers if suxcessful execution.
-    if (result.has_value()) {
-        NN_TRY(hal::utils::makeExecutionFailure(
-                hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-    }
-
-    return result;
+    return std::make_pair(std::move(outputShapes), timing);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(
-        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
-        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(const nn::Request& /*request*/,
+                             const std::vector<nn::SyncFence>& /*waitFor*/,
+                             nn::MeasureTiming /*measure*/,
+                             const nn::OptionalTimePoint& /*deadline*/,
+                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
+                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
-    sp<V1_0::IPreparedModel> resource = kPreparedModel;
+    sp<V1_2::IPreparedModel> resource = kPreparedModel;
     return resource;
 }
 
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
index 637179d..fda79c8 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
@@ -24,8 +24,12 @@
 #include <nnapi/Types.h>
 #include <memory>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
+// Class that adapts V1_3::IBuffer to nn::IBuffer.
 class Buffer final : public nn::IBuffer {
     struct PrivateConstructorTag {};
 
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
index d46b111..643172e 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
@@ -34,8 +34,31 @@
 #include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
+// Converts the results of IDevice::getSupportedOperations* to the NN canonical format. On success,
+// this function returns with the supported operations as indicated by a driver. On failure, this
+// function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+        ErrorStatus status, const hidl_vec<bool>& supportedOperations);
+
+// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
+// function returns with a non-null nn::SharedPreparedModel with a feature level of
+// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        ErrorStatus status, const sp<IPreparedModel>& preparedModel);
+
+// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
+// returns with the output shapes and the timing information. On failure, this function returns with
+// the appropriate nn::ExecutionError.
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+        const V1_2::Timing& timing);
+
+// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously.
 class PreparedModelCallback final : public IPreparedModelCallback,
                                     public hal::utils::IProtectedCallback {
   public:
@@ -52,11 +75,10 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
+// A HIDL callback class to receive the results of IDevice::execute_1_3 asynchronously.
 class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
   public:
     using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -73,8 +95,6 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index 9653a05..74a6534 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -44,7 +44,7 @@
         const hal::V1_3::Request::MemoryPool& memoryPool);
 GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const hal::V1_3::OptionalTimePoint& optionalTimePoint);
-GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
+GeneralResult<OptionalDuration> unvalidatedConvert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
 GeneralResult<ErrorStatus> unvalidatedConvert(const hal::V1_3::ErrorStatus& errorStatus);
 
@@ -54,7 +54,7 @@
 GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
 GeneralResult<Request> convert(const hal::V1_3::Request& request);
 GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
-GeneralResult<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
 GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
 
@@ -86,7 +86,7 @@
 nn::GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const nn::OptionalTimePoint& optionalTimePoint);
 nn::GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
+        const nn::OptionalDuration& optionalTimeoutDuration);
 nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus);
 
 nn::GeneralResult<Priority> convert(const nn::Priority& priority);
@@ -96,13 +96,24 @@
 nn::GeneralResult<Request> convert(const nn::Request& request);
 nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
 nn::GeneralResult<OptionalTimeoutDuration> convert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
+        const nn::OptionalDuration& optionalTimeoutDuration);
 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
 
 nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle);
 nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory);
 nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference);
+nn::GeneralResult<hidl_vec<V1_2::Extension>> convert(const std::vector<nn::Extension>& extensions);
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
+nn::GeneralResult<hidl_vec<V1_2::OutputShape>> convert(
+        const std::vector<nn::OutputShape>& outputShapes);
+nn::GeneralResult<V1_2::DeviceType> convert(const nn::DeviceType& deviceType);
+nn::GeneralResult<V1_2::MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
+nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing);
+
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index 0f5234b..84f606a 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -32,8 +32,12 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
+// Class that adapts V1_3::IDevice to nn::IDevice.
 class Device final : public nn::IDevice {
     struct PrivateConstructorTag {};
 
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index e0d69dd..664d87a 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -29,28 +29,32 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
+// Class that adapts V1_3::IPreparedModel to nn::IPreparedModel.
 class PreparedModel final : public nn::IPreparedModel {
     struct PrivateConstructorTag {};
 
   public:
     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
-            sp<V1_3::IPreparedModel> preparedModel);
+            sp<V1_3::IPreparedModel> preparedModel, bool executeSynchronously);
 
-    PreparedModel(PrivateConstructorTag tag, sp<V1_3::IPreparedModel> preparedModel,
-                  hal::utils::DeathHandler deathHandler);
+    PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
+                  sp<V1_3::IPreparedModel> preparedModel, hal::utils::DeathHandler deathHandler);
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
@@ -62,6 +66,7 @@
             const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
             const OptionalTimeoutDuration& loopTimeoutDuration) const;
 
+    const bool kExecuteSynchronously;
     const sp<V1_3::IPreparedModel> kPreparedModel;
     const hal::utils::DeathHandler kDeathHandler;
 };
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index ffdeccd..614033e 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -33,17 +33,18 @@
 #include <memory>
 #include <utility>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
 nn::GeneralResult<std::shared_ptr<const Buffer>> Buffer::create(
         sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token) {
     if (buffer == nullptr) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_3::utils::Buffer::create must have non-null buffer";
+        return NN_ERROR() << "V1_3::utils::Buffer::create must have non-null buffer";
     }
     if (token == static_cast<nn::Request::MemoryDomainToken>(0)) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_3::utils::Buffer::create must have non-zero token";
+        return NN_ERROR() << "V1_3::utils::Buffer::create must have non-zero token";
     }
 
     return std::make_shared<const Buffer>(PrivateConstructorTag{}, std::move(buffer), token);
@@ -65,10 +66,7 @@
 
     const auto ret = kBuffer->copyTo(hidlDst);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status);
 
     return {};
 }
@@ -80,10 +78,7 @@
 
     const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status);
 
     return {};
 }
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
index e3c6074..af76e6a 100644
--- a/neuralnetworks/1.3/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -30,6 +30,7 @@
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.2/Callbacks.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/PreparedModel.h>
 #include <nnapi/hal/CommonUtils.h>
@@ -39,139 +40,99 @@
 
 #include <utility>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 namespace {
 
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<V1_0::IPreparedModel>& preparedModel) {
-    return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<V1_2::IPreparedModel>& preparedModel) {
-    return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<IPreparedModel>& preparedModel) {
-    return NN_TRY(utils::PreparedModel::create(preparedModel));
-}
-
 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
                                      const V1_2::Timing& timing) {
     return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
 }
 
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionGeneralResults(const hidl_vec<V1_2::OutputShape>& outputShapes,
-                               const V1_2::Timing& timing) {
+}  // namespace
+
+nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
+        ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
+    HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+    return supportedOperations;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+        const V1_2::Timing& timing) {
+    if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        auto canonicalOutputShapes =
+                nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
+        return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+               << "execution failed with " << toString(status);
+    }
+    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     return hal::utils::makeExecutionFailure(
             convertExecutionGeneralResultsHelper(outputShapes, timing));
 }
 
-}  // namespace
-
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
                                            const sp<V1_0::IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
                                                const sp<V1_2::IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(V1_2::utils::prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
                                                const sp<IPreparedModel>& preparedModel) {
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 void PreparedModelCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 PreparedModelCallback::Data PreparedModelCallback::get() {
     return mData.take();
 }
 
-void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 // ExecutionCallback methods begin here
 
 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal({});
-    }
+    mData.put(V1_0::utils::executionCallback(status));
     return Void();
 }
 
 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
                                            const hidl_vec<V1_2::OutputShape>& outputShapes,
                                            const V1_2::Timing& timing) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
-    }
+    mData.put(V1_2::utils::executionCallback(status, outputShapes, timing));
     return Void();
 }
 
 Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
                                            const hidl_vec<V1_2::OutputShape>& outputShapes,
                                            const V1_2::Timing& timing) {
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
-    }
+    mData.put(executionCallback(status, outputShapes, timing));
     return Void();
 }
 
 void ExecutionCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 ExecutionCallback::Data ExecutionCallback::get() {
     return mData.take();
 }
 
-void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 949dd0d..8b7db2b 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -272,47 +272,26 @@
 
 GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
-    constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count();
-    const auto makeTimePoint = [](uint64_t count) -> GeneralResult<OptionalTimePoint> {
-        if (count > kTimePointMaxCount) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimePoint because the count exceeds "
-                      "the max";
-        }
-        const auto nanoseconds = std::chrono::nanoseconds{count};
-        return TimePoint{nanoseconds};
-    };
-
     using Discriminator = hal::V1_3::OptionalTimePoint::hidl_discriminator;
     switch (optionalTimePoint.getDiscriminator()) {
         case Discriminator::none:
-            return std::nullopt;
+            return {};
         case Discriminator::nanosecondsSinceEpoch:
-            return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch());
+            return TimePoint{Duration{optionalTimePoint.nanosecondsSinceEpoch()}};
     }
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "Invalid OptionalTimePoint discriminator "
            << underlyingType(optionalTimePoint.getDiscriminator());
 }
 
-GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
+GeneralResult<OptionalDuration> unvalidatedConvert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
-    constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count();
-    const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult<OptionalTimeoutDuration> {
-        if (count > kTimeoutDurationMaxCount) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimeoutDuration because the count "
-                      "exceeds the max";
-        }
-        return TimeoutDuration{count};
-    };
-
     using Discriminator = hal::V1_3::OptionalTimeoutDuration::hidl_discriminator;
     switch (optionalTimeoutDuration.getDiscriminator()) {
         case Discriminator::none:
-            return std::nullopt;
+            return {};
         case Discriminator::nanoseconds:
-            return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds());
+            return Duration(optionalTimeoutDuration.nanoseconds());
     }
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "Invalid OptionalTimeoutDuration discriminator "
@@ -360,7 +339,7 @@
     return validatedConvert(optionalTimePoint);
 }
 
-GeneralResult<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
     return validatedConvert(optionalTimeoutDuration);
 }
@@ -629,27 +608,16 @@
     OptionalTimePoint ret;
     if (optionalTimePoint.has_value()) {
         const auto count = optionalTimePoint.value().time_since_epoch().count();
-        if (count < 0) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimePoint because time since epoch "
-                      "count is "
-                      "negative";
-        }
         ret.nanosecondsSinceEpoch(count);
     }
     return ret;
 }
 
 nn::GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
+        const nn::OptionalDuration& optionalTimeoutDuration) {
     OptionalTimeoutDuration ret;
     if (optionalTimeoutDuration.has_value()) {
         const auto count = optionalTimeoutDuration.value().count();
-        if (count < 0) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimeoutDuration because count is "
-                      "negative";
-        }
         ret.nanoseconds(count);
     }
     return ret;
@@ -697,7 +665,7 @@
 }
 
 nn::GeneralResult<OptionalTimeoutDuration> convert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
+        const nn::OptionalDuration& optionalTimeoutDuration) {
     return validatedConvert(optionalTimeoutDuration);
 }
 
@@ -717,4 +685,38 @@
     return validatedConvert(bufferRoles);
 }
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+    return V1_2::utils::convert(deviceStatus);
+}
+
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference) {
+    return V1_2::utils::convert(executionPreference);
+}
+
+nn::GeneralResult<hidl_vec<V1_2::Extension>> convert(const std::vector<nn::Extension>& extensions) {
+    return V1_2::utils::convert(extensions);
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles) {
+    return V1_2::utils::convert(handles);
+}
+
+nn::GeneralResult<hidl_vec<V1_2::OutputShape>> convert(
+        const std::vector<nn::OutputShape>& outputShapes) {
+    return V1_2::utils::convert(outputShapes);
+}
+
+nn::GeneralResult<V1_2::DeviceType> convert(const nn::DeviceType& deviceType) {
+    return V1_2::utils::convert(deviceType);
+}
+
+nn::GeneralResult<V1_2::MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
+    return V1_2::utils::convert(measureTiming);
+}
+
+nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing) {
+    return V1_2::utils::convert(timing);
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index 82837ba..d710b85 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -36,6 +36,7 @@
 #include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/Device.h>
+#include <nnapi/hal/1.2/Utils.h>
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
 #include <nnapi/hal/ProtectCallback.h>
@@ -47,6 +48,9 @@
 #include <string>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 namespace {
 
@@ -66,29 +70,27 @@
     return hidlPreparedModels;
 }
 
-nn::GeneralResult<nn::SharedBuffer> convert(
-        nn::GeneralResult<std::shared_ptr<const Buffer>> result) {
-    return NN_TRY(std::move(result));
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
+                                                         const Capabilities& capabilities) {
+    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    return nn::convert(capabilities);
 }
 
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_3::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_3::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getCapabilities_1_3 failed with " << toString(status);
-        } else {
-            result = nn::convert(capabilities);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(capabilitiesCallback);
 
     const auto ret = device->getCapabilities_1_3(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
+}
+
+nn::GeneralResult<nn::SharedBuffer> allocationCallback(ErrorStatus status,
+                                                       const sp<IBuffer>& buffer, uint32_t token) {
+    HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status);
+    return Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token));
 }
 
 }  // namespace
@@ -104,12 +106,12 @@
                << "V1_3::utils::Device::create must have non-null device";
     }
 
-    auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get()));
-    const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get()));
-    auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get()));
-    auto capabilities = NN_TRY(initCapabilities(device.get()));
+    auto versionString = NN_TRY(V1_2::utils::getVersionStringFrom(device.get()));
+    const auto deviceType = NN_TRY(V1_2::utils::getDeviceTypeFrom(device.get()));
+    auto extensions = NN_TRY(V1_2::utils::getSupportedExtensionsFrom(device.get()));
+    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
     const auto numberOfCacheFilesNeeded =
-            NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get()));
+            NN_TRY(V1_2::utils::getNumberOfCacheFilesNeededFrom(device.get()));
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
     return std::make_shared<const Device>(
@@ -174,27 +176,12 @@
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
 
-    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                  << "uninitialized";
-    auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "IDevice::getSupportedOperations_1_3 failed with " << toString(status);
-        } else if (supportedOperations.size() != model.main.operations.size()) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                     << "IDevice::getSupportedOperations_1_3 returned vector of size "
-                     << supportedOperations.size() << " but expected "
-                     << model.main.operations.size();
-        } else {
-            result = supportedOperations;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(supportedOperationsCallback);
 
     const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -207,12 +194,12 @@
             NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
-    const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+    const auto hidlPreference = NN_TRY(convert(preference));
     const auto hidlPriority = NN_TRY(convert(priority));
     const auto hidlDeadline = NN_TRY(convert(deadline));
-    const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
-    const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlModelCache = NN_TRY(convert(modelCache));
+    const auto hidlDataCache = NN_TRY(convert(dataCache));
+    const auto hidlToken = V1_2::utils::CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -221,10 +208,7 @@
             kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
                                       hidlModelCache, hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
@@ -233,9 +217,9 @@
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlDeadline = NN_TRY(convert(deadline));
-    const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
-    const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlModelCache = NN_TRY(convert(modelCache));
+    const auto hidlDataCache = NN_TRY(convert(dataCache));
+    const auto hidlToken = V1_2::utils::CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -243,10 +227,7 @@
     const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
                                                         hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
 
     return cb->get();
 }
@@ -260,27 +241,13 @@
     const auto hidlInputRoles = NN_TRY(convert(inputRoles));
     const auto hidlOutputRoles = NN_TRY(convert(outputRoles));
 
-    nn::GeneralResult<nn::SharedBuffer> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    auto cb = [&result](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status);
-        } else if (buffer == nullptr) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr";
-        } else if (token == 0) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)";
-        } else {
-            result = convert(
-                    Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token)));
-        }
-    };
+    auto cb = hal::utils::CallbackValue(allocationCallback);
 
     const auto ret =
             kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 49b9b0b..7b4b7ba 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -39,28 +39,23 @@
 #include <utility>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::V1_3::utils {
 namespace {
 
-nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
-                              const V1_2::Timing& timing) {
-    return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
-}
-
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
-        const hidl_vec<V1_2::OutputShape>& outputShapes, const V1_2::Timing& timing) {
-    return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
-}
-
 nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
-        const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
+        ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
+    HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status);
     return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
 }
 
-nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-convertExecuteFencedResults(const hidl_handle& syncFence,
-                            const sp<IFencedExecutionCallback>& callback) {
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> fencedExecutionCallback(
+        ErrorStatus status, const hidl_handle& syncFence,
+        const sp<IFencedExecutionCallback>& callback) {
+    HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status);
+
     auto resultSyncFence = nn::SyncFence::createAsSignaled();
     if (syncFence.getNativeHandle() != nullptr) {
         auto sharedHandle = NN_TRY(nn::convert(syncFence));
@@ -75,23 +70,12 @@
     // Create callback which can be used to retrieve the execution error status and timings.
     nn::ExecuteFencedInfoCallback resultCallback =
             [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
-        nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> result =
-                NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-        auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched,
-                            const V1_2::Timing& timingFenced) {
-            if (status != ErrorStatus::NONE) {
-                const auto canonical =
-                        nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-                result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status);
-            } else {
-                result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced);
-            }
-        };
+        auto cb = hal::utils::CallbackValue(convertFencedExecutionCallbackResults);
 
         const auto ret = callback->getExecutionInfo(cb);
         HANDLE_TRANSPORT_FAILURE(ret);
 
-        return result;
+        return cb.take();
     };
 
     return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
@@ -100,42 +84,34 @@
 }  // namespace
 
 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
-        sp<V1_3::IPreparedModel> preparedModel) {
+        sp<V1_3::IPreparedModel> preparedModel, bool executeSynchronously) {
     if (preparedModel == nullptr) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_3::utils::PreparedModel::create must have non-null preparedModel";
+        return NN_ERROR() << "V1_3::utils::PreparedModel::create must have non-null preparedModel";
     }
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
-    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
-                                                 std::move(deathHandler));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously,
+                                                 std::move(preparedModel), std::move(deathHandler));
 }
 
-PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_3::IPreparedModel> preparedModel,
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously,
+                             sp<V1_3::IPreparedModel> preparedModel,
                              hal::utils::DeathHandler deathHandler)
-    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+    : kExecuteSynchronously(executeSynchronously),
+      kPreparedModel(std::move(preparedModel)),
+      kDeathHandler(std::move(deathHandler)) {}
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure,
                                     const OptionalTimePoint& deadline,
                                     const OptionalTimeoutDuration& loopTimeoutDuration) const {
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
-                              const V1_2::Timing& timing) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
-        } else {
-            result = convertExecutionResults(outputShapes, timing);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(executionCallback);
 
     const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline,
                                                               loopTimeoutDuration, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
@@ -148,9 +124,8 @@
     const auto ret =
             kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status);
+    if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     }
 
     return cb->get();
@@ -159,49 +134,36 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalTimePoint& deadline,
-        const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
             hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto hidlMeasure =
-            NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure)));
+    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
     const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
     const auto hidlLoopTimeoutDuration =
             NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
 
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const bool preferSynchronous = true;
+    auto result = kExecuteSynchronously
+                          ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+                                                 hidlLoopTimeoutDuration)
+                          : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+                                                  hidlLoopTimeoutDuration);
+    auto [outputShapes, timing] = NN_TRY(std::move(result));
 
-    // Execute synchronously if allowed.
-    if (preferSynchronous) {
-        result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
-                                      hidlLoopTimeoutDuration);
-    }
+    NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
 
-    // Run asymchronous execution if execution has not already completed.
-    if (!result.has_value()) {
-        result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
-                                       hidlLoopTimeoutDuration);
-    }
-
-    // Flush output buffers if suxcessful execution.
-    if (result.has_value()) {
-        NN_TRY(hal::utils::makeExecutionFailure(
-                hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-    }
-
-    return result;
+    return std::make_pair(std::move(outputShapes), timing);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
 PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
                              nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-                             const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-                             const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+                             const nn::OptionalDuration& loopTimeoutDuration,
+                             const nn::OptionalDuration& timeoutDurationAfterFence) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared =
@@ -209,28 +171,18 @@
 
     const auto hidlRequest = NN_TRY(convert(requestInShared));
     const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
-    const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
+    const auto hidlMeasure = NN_TRY(convert(measure));
     const auto hidlDeadline = NN_TRY(convert(deadline));
     const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
     const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
 
-    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence,
-                        const sp<IFencedExecutionCallback>& callback) {
-        if (status != ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status);
-        } else {
-            result = convertExecuteFencedResults(syncFence, callback);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(fencedExecutionCallback);
 
     const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
                                                    hidlDeadline, hidlLoopTimeoutDuration,
                                                    hidlTimeoutDurationAfterFence, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
-    auto [syncFence, callback] = NN_TRY(std::move(result));
+    auto [syncFence, callback] = NN_TRY(cb.take());
 
     // If executeFenced required the request memory to be moved into shared memory, block here until
     // the fenced execution has completed and flush the memory back.
diff --git a/neuralnetworks/utils/README.md b/neuralnetworks/utils/README.md
index 0dee103..45ca0b4 100644
--- a/neuralnetworks/utils/README.md
+++ b/neuralnetworks/utils/README.md
@@ -1,11 +1,11 @@
 # NNAPI Conversions
 
 `convert` fails if either the source type or the destination type is invalid, and it yields a valid
-object if the conversion succeeds. For example, let's say that an enumeration in the current
-version has fewer possible values than the "same" canonical enumeration, such as `OperationType`.
-The new value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid
-existing value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN
-HAL 1.0) is valid. This can be seen in the following model conversions:
+object if the conversion succeeds. For example, let's say that an enumeration in the current version
+has fewer possible values than the "same" canonical enumeration, such as `OperationType`. The new
+value of `HARD_SWISH` (introduced in Android R / NN HAL 1.3) does not map to any valid existing
+value in `OperationType`, but an older value of `ADD` (introduced in Android OC-MR1 / NN HAL 1.0) is
+valid. This can be seen in the following model conversions:
 
 ```cpp
 // Unsuccessful conversion
@@ -48,3 +48,50 @@
 `unvalidatedConvert` functions operate on types that are either used in a HIDL method call directly
 (i.e., not as a nested class) or used in a subsequent version of the NN HAL. Prefer using `convert`
 over `unvalidatedConvert`.
+
+# HIDL Interface Lifetimes across Processes
+
+Some notes about HIDL interface objects and lifetimes across processes:
+
+All HIDL interface objects inherit from `IBase`, which itself inherits from `::android::RefBase`. As
+such, all HIDL interface objects are reference counted and must be owned through `::android::sp` (or
+referenced through `::android::wp`). Allocating `RefBase` objects on the stack will log errors and
+may result in crashes, and deleting a `RefBase` object through another means (e.g., "delete",
+"free", or RAII-cleanup through `std::unique_ptr` or some equivalent) will result in double-free
+and/or use-after-free undefined behavior.
+
+HIDL/Binder manages the reference count of HIDL interface objects automatically across processes. If
+a process that references (but did not create) the HIDL interface object dies, HIDL/Binder ensures
+any reference count it held is properly released. (Caveat: it might be possible that HIDL/Binder
+behave strangely with `::android::wp` references.)
+
+If the process which created the HIDL interface object dies, any call on this object from another
+process will result in a HIDL transport error with the code `DEAD_OBJECT`.
+
+# Protecting Asynchronous Calls across HIDL
+
+Some notes about asynchronous calls across HIDL:
+
+For synchronous calls across HIDL, if an error occurs after the function was called but before it
+returns, HIDL will return a transport error. For example, if the message cannot be delivered to the
+server process or if the server process dies before returning a result, HIDL will return from the
+function with the appropriate transport error in the `Return<>` object, which can be queried with
+`Return<>::isOk()`, `Return<>::isDeadObject()`, `Return<>::description()`, etc.
+
+However, HIDL offers no such error management in the case of asynchronous calls. By default, if the
+client launches an asynchronous task and the server fails to return a result through the callback,
+the client will be left waiting indefinitely for a result it will never receive.
+
+In the NNAPI, `IDevice::prepareModel*` and `IPreparedModel::execute*` (but not
+`IPreparedModel::executeSynchronously*`) are asynchronous calls across HIDL. Specifically, these
+asynchronous functions are called with a HIDL interface callback object (`IPrepareModelCallback` for
+`IDevice::prepareModel*` and `IExecutionCallback` for `IPreparedModel::execute*`) and are expected
+to quickly return, and the results are returned at a later time through these callback objects.
+
+To protect against the case when the server dies after the asynchronous task was called successfully
+but before the results could be returned, HIDL provides an object called a "`hidl_death_recipient`,"
+which can be used to detect when an interface object (and more generally, the server process) has
+died. nnapi/hal/ProtectCallback.h's `DeathHandler` uses `hidl_death_recipient`s to detect when the
+driver process has died, and `DeathHandler` will unblock any thread waiting on the results of an
+`IProtectedCallback` callback object that may otherwise not be signaled. In order for this to work,
+the `IProtectedCallback` object must have been registered via `DeathHandler::protectCallback()`.
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 43bb0c6..b3989e5 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -44,9 +44,18 @@
 bool hasNoPointerData(const nn::Model& model);
 bool hasNoPointerData(const nn::Request& request);
 
-// Relocate pointer-based data to shared memory.
+// Relocate pointer-based data to shared memory. If `model` has no Operand::LifeTime::POINTER data,
+// the function returns with a reference to `model`. If `model` has Operand::LifeTime::POINTER data,
+// the model is copied to `maybeModelInSharedOut` with the POINTER data relocated to a memory pool,
+// and the function returns with a reference to `*maybeModelInSharedOut`.
 nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
         const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
+
+// Relocate pointer-based data to shared memory. If `request` has no
+// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If
+// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to
+// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function
+// returns with a reference to `*maybeRequestInSharedOut`.
 nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
         const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
index 78b2a12..95a20a8 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
@@ -79,4 +79,11 @@
     return makeExecutionFailure(makeGeneralFailure(result, status));
 }
 
+#define HANDLE_HAL_STATUS(status)                                       \
+    if (const auto canonical = ::android::nn::convert(status).value_or( \
+                ::android::nn::ErrorStatus::GENERAL_FAILURE);           \
+        canonical == ::android::nn::ErrorStatus::NONE) {                \
+    } else                                                              \
+        return NN_ERROR(canonical)
+
 }  // namespace android::hardware::neuralnetworks::utils
\ No newline at end of file
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index 4b32b4e..985cddb 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -32,13 +32,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 };
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
index 85bd613..c921885 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
@@ -28,6 +28,9 @@
 #include <mutex>
 #include <vector>
 
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
 namespace android::hardware::neuralnetworks::utils {
 
 class IProtectedCallback {
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
index 996ec1e..9d5e3e6 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
@@ -34,7 +34,7 @@
     struct PrivateConstructorTag {};
 
   public:
-    using Factory = std::function<nn::GeneralResult<nn::SharedBuffer>(bool blocking)>;
+    using Factory = std::function<nn::GeneralResult<nn::SharedBuffer>()>;
 
     static nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> create(Factory makeBuffer);
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 4bfed6c..84ae799 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -46,8 +46,8 @@
                              nn::Capabilities capabilities, nn::SharedDevice device);
 
     nn::SharedDevice getDevice() const EXCLUDES(mMutex);
-    nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const
-            EXCLUDES(mMutex);
+    nn::GeneralResult<nn::SharedDevice> recover(const nn::IDevice* failingDevice,
+                                                bool blocking) const EXCLUDES(mMutex);
 
     const std::string& getName() const override;
     const std::string& getVersionString() const override;
@@ -81,17 +81,14 @@
   private:
     bool isValidInternal() const EXCLUDES(mMutex);
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
-            bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
-            nn::Priority priority, nn::OptionalTimePoint deadline,
-            const std::vector<nn::SharedHandle>& modelCache,
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
             const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
-            bool blocking, nn::OptionalTimePoint deadline,
-            const std::vector<nn::SharedHandle>& modelCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
             const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
     nn::GeneralResult<nn::SharedBuffer> allocateInternal(
-            bool blocking, const nn::BufferDesc& desc,
-            const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
             const std::vector<nn::BufferRole>& inputRoles,
             const std::vector<nn::BufferRole>& outputRoles) const;
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index c2940d1..faae673 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -34,7 +34,7 @@
     struct PrivateConstructorTag {};
 
   public:
-    using Factory = std::function<nn::GeneralResult<nn::SharedPreparedModel>(bool blocking)>;
+    using Factory = std::function<nn::GeneralResult<nn::SharedPreparedModel>()>;
 
     static nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> create(
             Factory makePreparedModel);
@@ -49,13 +49,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h
index 7103c6b..6679afe 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h
@@ -17,19 +17,60 @@
 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
 
+#include <android-base/logging.h>
 #include <android-base/thread_annotations.h>
 
 #include <condition_variable>
+#include <functional>
 #include <mutex>
 #include <optional>
+#include <type_traits>
 
 namespace android::hardware::neuralnetworks::utils {
 
-// This class is thread safe.
+// This class adapts a function pointer and offers two affordances:
+// 1) This class object can be used to generate a callback (via the implicit conversion operator)
+//    that can be used to send the result to `CallbackValue` when called.
+// 2) This class object can be used to retrieve the result of the callback with `take`.
+//
+// This class is thread compatible.
+template <typename ReturnType, typename... ArgTypes>
+class CallbackValue final {
+  public:
+    using FunctionType = std::add_pointer_t<ReturnType(ArgTypes...)>;
+    using CallbackType = std::function<void(ArgTypes...)>;
+
+    explicit CallbackValue(FunctionType fn);
+
+    // Creates a callback that forwards its arguments to `mFunction` and stores the result in
+    // `mReturnValue`.
+    /*implicit*/ operator CallbackType();  // NOLINT(google-explicit-constructor)
+
+    // Take the result of calling `mFunction`.
+    // Precondition: mReturnValue.has_value()
+    // Postcondition: !mReturnValue.has_value()
+    [[nodiscard]] ReturnType take();
+
+  private:
+    std::optional<ReturnType> mReturnValue;
+    FunctionType mFunction;
+};
+
+// Deduction guidelines for CallbackValue when constructed with a function pointer.
+template <typename ReturnType, typename... ArgTypes>
+CallbackValue(ReturnType (*)(ArgTypes...))->CallbackValue<ReturnType, ArgTypes...>;
+
+// Thread-safe container to pass a value between threads.
 template <typename Type>
 class TransferValue final {
   public:
+    // Put the value in `TransferValue`. If `TransferValue` already has a value, this function is a
+    // no-op.
     void put(Type object) const;
+
+    // Take the value stored in `TransferValue`. If no value is available, this function will block
+    // until the value becomes available.
+    // Postcondition: !mObject.has_value()
     [[nodiscard]] Type take() const;
 
   private:
@@ -38,7 +79,23 @@
     mutable std::optional<Type> mObject GUARDED_BY(mMutex);
 };
 
-// template implementation
+// template implementations
+
+template <typename ReturnType, typename... ArgTypes>
+CallbackValue<ReturnType, ArgTypes...>::CallbackValue(FunctionType fn) : mFunction(fn) {}
+
+template <typename ReturnType, typename... ArgTypes>
+CallbackValue<ReturnType, ArgTypes...>::operator CallbackType() {
+    return [this](ArgTypes... args) { mReturnValue = mFunction(args...); };
+}
+
+template <typename ReturnType, typename... ArgTypes>
+ReturnType CallbackValue<ReturnType, ArgTypes...>::take() {
+    CHECK(mReturnValue.has_value());
+    std::optional<ReturnType> object;
+    std::swap(object, mReturnValue);
+    return std::move(object).value();
+}
 
 template <typename Type>
 void TransferValue<Type>::put(Type object) const {
@@ -56,6 +113,7 @@
     std::unique_lock lock(mMutex);
     base::ScopedLockAssertion lockAssertion(mMutex);
     mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); });
+    CHECK(mObject.has_value());
     std::optional<Type> object;
     std::swap(object, mObject);
     return std::move(object).value();
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9ae7a63..a46f4ac 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -29,7 +29,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
                               const nn::OptionalTimePoint& /*deadline*/,
-                              const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+                              const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
@@ -37,8 +37,8 @@
 InvalidPreparedModel::executeFenced(
         const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
         nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
index 984295b..cf5496a 100644
--- a/neuralnetworks/utils/common/src/ResilientBuffer.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
@@ -36,7 +36,7 @@
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
                << "utils::ResilientBuffer::create must have non-empty makeBuffer";
     }
-    auto buffer = NN_TRY(makeBuffer(/*blocking=*/true));
+    auto buffer = NN_TRY(makeBuffer());
     CHECK(buffer != nullptr);
     return std::make_shared<const ResilientBuffer>(PrivateConstructorTag{}, std::move(makeBuffer),
                                                    std::move(buffer));
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 2f83c5c..6ad3fad 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -49,7 +49,17 @@
         return result;
     }
 
-    device = resilientDevice.recover(device.get(), blocking);
+    // Attempt recovery and return if it fails.
+    auto maybeDevice = resilientDevice.recover(device.get(), blocking);
+    if (!maybeDevice.has_value()) {
+        const auto& [resultErrorMessage, resultErrorCode] = result.error();
+        const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeDevice.error();
+        return nn::error(resultErrorCode)
+               << resultErrorMessage << ", and failed to recover dead device with error "
+               << recoveryErrorCode << ": " << recoveryErrorMessage;
+    }
+    device = std::move(maybeDevice).value();
+
     return fn(*device);
 }
 
@@ -94,7 +104,8 @@
     return mDevice;
 }
 
-nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const {
+nn::GeneralResult<nn::SharedDevice> ResilientDevice::recover(const nn::IDevice* failingDevice,
+                                                             bool blocking) const {
     std::lock_guard guard(mMutex);
 
     // Another caller updated the failing device.
@@ -102,13 +113,7 @@
         return mDevice;
     }
 
-    auto maybeDevice = kMakeDevice(blocking);
-    if (!maybeDevice.has_value()) {
-        const auto& [message, code] = maybeDevice.error();
-        LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message;
-        return mDevice;
-    }
-    auto device = std::move(maybeDevice).value();
+    auto device = NN_TRY(kMakeDevice(blocking));
 
     // If recovered device has different metadata than what is cached (i.e., because it was
     // updated), mark the device as invalid and preserve the cached data.
@@ -176,11 +181,11 @@
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
-    ResilientPreparedModel::Factory makePreparedModel =
-            [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
-             token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
-        return device->prepareModelInternal(blocking, model, preference, priority, deadline,
-                                            modelCache, dataCache, token);
+    ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model,
+                                                         preference, priority, deadline, modelCache,
+                                                         dataCache, token] {
+        return device->prepareModelInternal(model, preference, priority, deadline, modelCache,
+                                            dataCache, token);
     };
     return ResilientPreparedModel::create(std::move(makePreparedModel));
 }
@@ -189,11 +194,9 @@
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
-    ResilientPreparedModel::Factory makePreparedModel =
-            [device = std::move(self), deadline, modelCache, dataCache,
-             token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
-        return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache,
-                                                     token);
+    ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), deadline,
+                                                         modelCache, dataCache, token] {
+        return device->prepareModelFromCacheInternal(deadline, modelCache, dataCache, token);
     };
     return ResilientPreparedModel::create(std::move(makePreparedModel));
 }
@@ -203,10 +206,9 @@
         const std::vector<nn::BufferRole>& inputRoles,
         const std::vector<nn::BufferRole>& outputRoles) const {
     auto self = shared_from_this();
-    ResilientBuffer::Factory makeBuffer =
-            [device = std::move(self), desc, preparedModels, inputRoles,
-             outputRoles](bool blocking) -> nn::GeneralResult<nn::SharedBuffer> {
-        return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles);
+    ResilientBuffer::Factory makeBuffer = [device = std::move(self), desc, preparedModels,
+                                           inputRoles, outputRoles] {
+        return device->allocateInternal(desc, preparedModels, inputRoles, outputRoles);
     };
     return ResilientBuffer::create(std::move(makeBuffer));
 }
@@ -217,9 +219,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
-        bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
-        nn::Priority priority, nn::OptionalTimePoint deadline,
-        const std::vector<nn::SharedHandle>& modelCache,
+        const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     if (!isValidInternal()) {
         return std::make_shared<const InvalidPreparedModel>();
@@ -229,12 +230,11 @@
         return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
                                    token);
     };
-    return protect(*this, fn, blocking);
+    return protect(*this, fn, /*blocking=*/false);
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
-        bool blocking, nn::OptionalTimePoint deadline,
-        const std::vector<nn::SharedHandle>& modelCache,
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     if (!isValidInternal()) {
         return std::make_shared<const InvalidPreparedModel>();
@@ -242,12 +242,11 @@
     const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
         return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
     };
-    return protect(*this, fn, blocking);
+    return protect(*this, fn, /*blocking=*/false);
 }
 
 nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocateInternal(
-        bool blocking, const nn::BufferDesc& desc,
-        const std::vector<nn::SharedPreparedModel>& preparedModels,
+        const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
         const std::vector<nn::BufferRole>& inputRoles,
         const std::vector<nn::BufferRole>& outputRoles) const {
     if (!isValidInternal()) {
@@ -256,7 +255,7 @@
     const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) {
         return device.allocate(desc, preparedModels, inputRoles, outputRoles);
     };
-    return protect(*this, fn, blocking);
+    return protect(*this, fn, /*blocking=*/false);
 }
 
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 1c9ecba..b8acee1 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -36,7 +36,7 @@
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
                << "utils::ResilientPreparedModel::create must have non-empty makePreparedModel";
     }
-    auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true));
+    auto preparedModel = NN_TRY(makePreparedModel());
     CHECK(preparedModel != nullptr);
     return std::make_shared<ResilientPreparedModel>(
             PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel));
@@ -64,16 +64,17 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
                                 const nn::OptionalTimePoint& deadline,
-                                const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+                                const nn::OptionalDuration& loopTimeoutDuration) const {
     return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-ResilientPreparedModel::executeFenced(
-        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
-        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-        const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-        const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+ResilientPreparedModel::executeFenced(const nn::Request& request,
+                                      const std::vector<nn::SyncFence>& waitFor,
+                                      nn::MeasureTiming measure,
+                                      const nn::OptionalTimePoint& deadline,
+                                      const nn::OptionalDuration& loopTimeoutDuration,
+                                      const nn::OptionalDuration& timeoutDurationAfterFence) const {
     return getPreparedModel()->executeFenced(request, waitFor, measure, deadline,
                                              loopTimeoutDuration, timeoutDurationAfterFence);
 }
diff --git a/security/keymint/aidl/vts/functional/AndroidTest.xml b/security/keymint/aidl/vts/functional/AndroidTest.xml
index 43e7a8a..de543f1 100644
--- a/security/keymint/aidl/vts/functional/AndroidTest.xml
+++ b/security/keymint/aidl/vts/functional/AndroidTest.xml
@@ -13,7 +13,7 @@
      See the License for the specific language governing permissions and
      limitations under the License.
 -->
-<configuration description="Runs VtsAidlKeyMintV1_0TargetTest.">
+<configuration description="Runs VtsAidlKeyMintTargetTest.">
     <option name="test-suite-tag" value="apct" />
     <option name="test-suite-tag" value="apct-native" />
 
@@ -22,12 +22,13 @@
 
     <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
         <option name="cleanup" value="true" />
-        <option name="push" value="VtsAidlKeyMintV1_0TargetTest->/data/local/tmp/VtsAidlKeyMintV1_0TargetTest" />
+        <option name="push"
+                value="VtsAidlKeyMintTargetTest->/data/local/tmp/VtsAidlKeyMintTargetTest" />
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.GTest" >
         <option name="native-test-device-path" value="/data/local/tmp" />
-        <option name="module-name" value="VtsAidlKeyMintV1_0TargetTest" />
+        <option name="module-name" value="VtsAidlKeyMintTargetTest" />
         <option name="native-test-timeout" value="900000"/>
     </test>
 </configuration>