Merge "Implement keymint V1 aidl interfaces, service module, and vts tests."
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index 44eccd3..2099dc0 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -1209,7 +1209,12 @@
             return notify;
         }
 
-        if (physicalCameraMetadata.size() != request->expectedPhysicalResults.size()) {
+        // Physical device results are only expected in the last/final
+        // partial result notification.
+        bool expectPhysicalResults = !(request->usePartialResult &&
+                (results.partialResult < request->numPartialResults));
+        if (expectPhysicalResults &&
+                (physicalCameraMetadata.size() != request->expectedPhysicalResults.size())) {
             ALOGE("%s: Frame %d: Returned physical metadata count %zu "
                     "must be equal to expected count %zu", __func__, frameNumber,
                     physicalCameraMetadata.size(), request->expectedPhysicalResults.size());
diff --git a/neuralnetworks/1.0/utils/Android.bp b/neuralnetworks/1.0/utils/Android.bp
index 57a052f..4d61fc0 100644
--- a/neuralnetworks/1.0/utils/Android.bp
+++ b/neuralnetworks/1.0/utils/Android.bp
@@ -20,6 +20,7 @@
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal/1.0/"],
     export_include_dirs: ["include"],
+    cflags: ["-Wthread-safety"],
     static_libs: [
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
new file mode 100644
index 0000000..65b75e5
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
+
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+class PreparedModelCallback final : public IPreparedModelCallback,
+                                    public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::GeneralResult<nn::SharedPreparedModel>;
+
+    Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
+
+    Return<void> notify(ErrorStatus status) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_CALLBACKS_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
index 8ad98cb..fb77cb2 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
@@ -24,42 +24,44 @@
 
 namespace android::nn {
 
-Result<OperandType> convert(const hal::V1_0::OperandType& operandType);
-Result<OperationType> convert(const hal::V1_0::OperationType& operationType);
-Result<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime);
-Result<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus);
-Result<Capabilities::PerformanceInfo> convert(const hal::V1_0::PerformanceInfo& performanceInfo);
-Result<Capabilities> convert(const hal::V1_0::Capabilities& capabilities);
-Result<DataLocation> convert(const hal::V1_0::DataLocation& location);
-Result<Operand> convert(const hal::V1_0::Operand& operand);
-Result<Operation> convert(const hal::V1_0::Operation& operation);
-Result<Model::OperandValues> convert(const hardware::hidl_vec<uint8_t>& operandValues);
-Result<Memory> convert(const hardware::hidl_memory& memory);
-Result<Model> convert(const hal::V1_0::Model& model);
-Result<Request::Argument> convert(const hal::V1_0::RequestArgument& requestArgument);
-Result<Request> convert(const hal::V1_0::Request& request);
-Result<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status);
+GeneralResult<OperandType> convert(const hal::V1_0::OperandType& operandType);
+GeneralResult<OperationType> convert(const hal::V1_0::OperationType& operationType);
+GeneralResult<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime);
+GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus);
+GeneralResult<Capabilities::PerformanceInfo> convert(
+        const hal::V1_0::PerformanceInfo& performanceInfo);
+GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities);
+GeneralResult<DataLocation> convert(const hal::V1_0::DataLocation& location);
+GeneralResult<Operand> convert(const hal::V1_0::Operand& operand);
+GeneralResult<Operation> convert(const hal::V1_0::Operation& operation);
+GeneralResult<Model::OperandValues> convert(const hardware::hidl_vec<uint8_t>& operandValues);
+GeneralResult<Memory> convert(const hardware::hidl_memory& memory);
+GeneralResult<Model> convert(const hal::V1_0::Model& model);
+GeneralResult<Request::Argument> convert(const hal::V1_0::RequestArgument& requestArgument);
+GeneralResult<Request> convert(const hal::V1_0::Request& request);
+GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status);
 
 }  // namespace android::nn
 
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType);
-nn::Result<OperationType> convert(const nn::OperationType& operationType);
-nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime);
-nn::Result<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
-nn::Result<PerformanceInfo> convert(const nn::Capabilities::PerformanceInfo& performanceInfo);
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
-nn::Result<DataLocation> convert(const nn::DataLocation& location);
-nn::Result<Operand> convert(const nn::Operand& operand);
-nn::Result<Operation> convert(const nn::Operation& operation);
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues);
-nn::Result<hidl_memory> convert(const nn::Memory& memory);
-nn::Result<Model> convert(const nn::Model& model);
-nn::Result<RequestArgument> convert(const nn::Request::Argument& requestArgument);
-nn::Result<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool);
-nn::Result<Request> convert(const nn::Request& request);
-nn::Result<ErrorStatus> convert(const nn::ErrorStatus& status);
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
+nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime);
+nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<PerformanceInfo> convert(
+        const nn::Capabilities::PerformanceInfo& performanceInfo);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<DataLocation> convert(const nn::DataLocation& location);
+nn::GeneralResult<Operand> convert(const nn::Operand& operand);
+nn::GeneralResult<Operation> convert(const nn::Operation& operation);
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues);
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory);
+nn::GeneralResult<Model> convert(const nn::Model& model);
+nn::GeneralResult<RequestArgument> convert(const nn::Request::Argument& requestArgument);
+nn::GeneralResult<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool);
+nn::GeneralResult<Request> convert(const nn::Request& request);
+nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status);
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
 
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
new file mode 100644
index 0000000..4403a57
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
+
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+class Device final : public nn::IDevice {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
+                                                                   sp<V1_0::IDevice> device);
+
+    Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities,
+           sp<V1_0::IDevice> device, hal::utils::DeathHandler deathHandler);
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    const std::string kName;
+    const std::string kVersionString = "UNKNOWN";
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    const sp<V1_0::IDevice> kDevice;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_DEVICE_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
new file mode 100644
index 0000000..31f366d
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+class PreparedModel final : public nn::IPreparedModel {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
+            sp<V1_0::IPreparedModel> preparedModel);
+
+    PreparedModel(PrivateConstructorTag tag, sp<V1_0::IPreparedModel> preparedModel,
+                  hal::utils::DeathHandler deathHandler);
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
+            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+
+    std::any getUnderlyingResource() const override;
+
+  private:
+    const sp<V1_0::IPreparedModel> kPreparedModel;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h
new file mode 100644
index 0000000..11fbb9e
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Service.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <string>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_SERVICE_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
index ec8da06..baa2b95 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 
@@ -31,10 +32,14 @@
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
-    const auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeCanonical = nn::convert(halObject);
+    if (!maybeCanonical.has_value()) {
+        return nn::error() << maybeCanonical.error().message;
+    }
+    const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return {};
 }
@@ -51,9 +56,14 @@
 template <typename Type>
 decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
     auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeVersion = nn::validate(canonical);
+    if (!maybeVersion.has_value()) {
+        return nn::error() << maybeVersion.error();
+    }
+    const auto version = maybeVersion.value();
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return canonical;
 }
diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp
new file mode 100644
index 0000000..f286bcc
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Callbacks.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+#include <utility>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+namespace {
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<IPreparedModel>& preparedModel) {
+    return NN_TRY(utils::PreparedModel::create(preparedModel));
+}
+
+}  // namespace
+
+Return<void> PreparedModelCallback::notify(ErrorStatus status,
+                                           const sp<IPreparedModel>& preparedModel) {
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+void PreparedModelCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+PreparedModelCallback::Data PreparedModelCallback::get() {
+    return mData.take();
+}
+
+void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+// ExecutionCallback methods begin here
+
+Return<void> ExecutionCallback::notify(ErrorStatus status) {
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal({});
+    }
+    return Void();
+}
+
+void ExecutionCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+ExecutionCallback::Data ExecutionCallback::get() {
+    return mData.take();
+}
+
+void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index 4a58f3b..f301065 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -52,7 +52,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
     std::vector<ConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
@@ -63,30 +63,31 @@
 
 }  // anonymous namespace
 
-Result<OperandType> convert(const hal::V1_0::OperandType& operandType) {
+GeneralResult<OperandType> convert(const hal::V1_0::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-Result<OperationType> convert(const hal::V1_0::OperationType& operationType) {
+GeneralResult<OperationType> convert(const hal::V1_0::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-Result<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime) {
+GeneralResult<Operand::LifeTime> convert(const hal::V1_0::OperandLifeTime& lifetime) {
     return static_cast<Operand::LifeTime>(lifetime);
 }
 
-Result<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus) {
+GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus) {
     return static_cast<DeviceStatus>(deviceStatus);
 }
 
-Result<Capabilities::PerformanceInfo> convert(const hal::V1_0::PerformanceInfo& performanceInfo) {
+GeneralResult<Capabilities::PerformanceInfo> convert(
+        const hal::V1_0::PerformanceInfo& performanceInfo) {
     return Capabilities::PerformanceInfo{
             .execTime = performanceInfo.execTime,
             .powerUsage = performanceInfo.powerUsage,
     };
 }
 
-Result<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
+GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities) {
     const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance));
     const auto float32Performance = NN_TRY(convert(capabilities.float32Performance));
 
@@ -100,7 +101,7 @@
     };
 }
 
-Result<DataLocation> convert(const hal::V1_0::DataLocation& location) {
+GeneralResult<DataLocation> convert(const hal::V1_0::DataLocation& location) {
     return DataLocation{
             .poolIndex = location.poolIndex,
             .offset = location.offset,
@@ -108,7 +109,7 @@
     };
 }
 
-Result<Operand> convert(const hal::V1_0::Operand& operand) {
+GeneralResult<Operand> convert(const hal::V1_0::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -119,7 +120,7 @@
     };
 }
 
-Result<Operation> convert(const hal::V1_0::Operation& operation) {
+GeneralResult<Operation> convert(const hal::V1_0::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -127,15 +128,15 @@
     };
 }
 
-Result<Model::OperandValues> convert(const hidl_vec<uint8_t>& operandValues) {
+GeneralResult<Model::OperandValues> convert(const hidl_vec<uint8_t>& operandValues) {
     return Model::OperandValues(operandValues.data(), operandValues.size());
 }
 
-Result<Memory> convert(const hidl_memory& memory) {
+GeneralResult<Memory> convert(const hidl_memory& memory) {
     return createSharedMemoryFromHidlMemory(memory);
 }
 
-Result<Model> convert(const hal::V1_0::Model& model) {
+GeneralResult<Model> convert(const hal::V1_0::Model& model) {
     auto operations = NN_TRY(convert(model.operations));
 
     // Verify number of consumers.
@@ -144,9 +145,9 @@
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
-            return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
-                              << numberOfConsumers[i] << " but found "
-                              << model.operands[i].numberOfConsumers;
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                   << "Invalid numberOfConsumers for operand " << i << ", expected "
+                   << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
         }
     }
 
@@ -164,7 +165,7 @@
     };
 }
 
-Result<Request::Argument> convert(const hal::V1_0::RequestArgument& argument) {
+GeneralResult<Request::Argument> convert(const hal::V1_0::RequestArgument& argument) {
     const auto lifetime = argument.hasNoValue ? Request::Argument::LifeTime::NO_VALUE
                                               : Request::Argument::LifeTime::POOL;
     return Request::Argument{
@@ -174,7 +175,7 @@
     };
 }
 
-Result<Request> convert(const hal::V1_0::Request& request) {
+GeneralResult<Request> convert(const hal::V1_0::Request& request) {
     auto memories = NN_TRY(convert(request.pools));
     std::vector<Request::MemoryPool> pools;
     pools.reserve(memories.size());
@@ -187,7 +188,7 @@
     };
 }
 
-Result<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
+GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status) {
     switch (status) {
         case hal::V1_0::ErrorStatus::NONE:
         case hal::V1_0::ErrorStatus::DEVICE_UNAVAILABLE:
@@ -196,7 +197,8 @@
         case hal::V1_0::ErrorStatus::INVALID_ARGUMENT:
             return static_cast<ErrorStatus>(status);
     }
-    return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
+    return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+           << "Invalid ErrorStatus " << underlyingType(status);
 }
 
 }  // namespace android::nn
@@ -208,7 +210,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
     hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(utils::convert(arguments[i]));
@@ -218,33 +220,35 @@
 
 }  // anonymous namespace
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType) {
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType) {
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
+nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
     if (lifetime == nn::Operand::LifeTime::POINTER) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
     return static_cast<OperandLifeTime>(lifetime);
 }
 
-nn::Result<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
     return static_cast<DeviceStatus>(deviceStatus);
 }
 
-nn::Result<PerformanceInfo> convert(const nn::Capabilities::PerformanceInfo& performanceInfo) {
+nn::GeneralResult<PerformanceInfo> convert(
+        const nn::Capabilities::PerformanceInfo& performanceInfo) {
     return PerformanceInfo{
             .execTime = performanceInfo.execTime,
             .powerUsage = performanceInfo.powerUsage,
     };
 }
 
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
     return Capabilities{
             .float32Performance = NN_TRY(convert(
                     capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
@@ -253,7 +257,7 @@
     };
 }
 
-nn::Result<DataLocation> convert(const nn::DataLocation& location) {
+nn::GeneralResult<DataLocation> convert(const nn::DataLocation& location) {
     return DataLocation{
             .poolIndex = location.poolIndex,
             .offset = location.offset,
@@ -261,7 +265,7 @@
     };
 }
 
-nn::Result<Operand> convert(const nn::Operand& operand) {
+nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -273,7 +277,7 @@
     };
 }
 
-nn::Result<Operation> convert(const nn::Operation& operation) {
+nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -281,20 +285,21 @@
     };
 }
 
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
     return hidl_vec<uint8_t>(operandValues.data(), operandValues.data() + operandValues.size());
 }
 
-nn::Result<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
     const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
     // Copy memory to force the native_handle_t to be copied.
     auto copiedMemory = hidlMemory;
     return copiedMemory;
 }
 
-nn::Result<Model> convert(const nn::Model& model) {
+nn::GeneralResult<Model> convert(const nn::Model& model) {
     if (!hal::utils::hasNoPointerData(model)) {
-        return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Mdoel cannot be converted because it contains pointer-based memory";
     }
 
     auto operands = NN_TRY(convert(model.main.operands));
@@ -317,9 +322,10 @@
     };
 }
 
-nn::Result<RequestArgument> convert(const nn::Request::Argument& requestArgument) {
+nn::GeneralResult<RequestArgument> convert(const nn::Request::Argument& requestArgument) {
     if (requestArgument.lifetime == nn::Request::Argument::LifeTime::POINTER) {
-        return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Request cannot be converted because it contains pointer-based memory";
     }
     const bool hasNoValue = requestArgument.lifetime == nn::Request::Argument::LifeTime::NO_VALUE;
     return RequestArgument{
@@ -329,13 +335,14 @@
     };
 }
 
-nn::Result<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool) {
+nn::GeneralResult<hidl_memory> convert(const nn::Request::MemoryPool& memoryPool) {
     return convert(std::get<nn::Memory>(memoryPool));
 }
 
-nn::Result<Request> convert(const nn::Request& request) {
+nn::GeneralResult<Request> convert(const nn::Request& request) {
     if (!hal::utils::hasNoPointerData(request)) {
-        return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Request cannot be converted because it contains pointer-based memory";
     }
 
     return Request{
@@ -345,7 +352,7 @@
     };
 }
 
-nn::Result<ErrorStatus> convert(const nn::ErrorStatus& status) {
+nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status) {
     switch (status) {
         case nn::ErrorStatus::NONE:
         case nn::ErrorStatus::DEVICE_UNAVAILABLE:
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
new file mode 100644
index 0000000..8292f17
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+namespace {
+
+nn::GeneralResult<nn::Capabilities> initCapabilities(V1_0::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                 << "uninitialized";
+    const auto cb = [&result](ErrorStatus status, const Capabilities& capabilities) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getCapabilities failed with " << toString(status);
+        } else {
+            result = validatedConvertToCanonical(capabilities);
+        }
+    };
+
+    const auto ret = device->getCapabilities(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
+                                                                sp<V1_0::IDevice> device) {
+    if (name.empty()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_0::utils::Device::create must have non-empty name";
+    }
+    if (device == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_0::utils::Device::create must have non-null device";
+    }
+
+    auto capabilities = NN_TRY(initCapabilities(device.get()));
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
+    return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
+                                          std::move(capabilities), std::move(device),
+                                          std::move(deathHandler));
+}
+
+Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities,
+               sp<V1_0::IDevice> device, hal::utils::DeathHandler deathHandler)
+    : kName(std::move(name)),
+      kCapabilities(std::move(capabilities)),
+      kDevice(std::move(device)),
+      kDeathHandler(std::move(deathHandler)) {}
+
+const std::string& Device::getName() const {
+    return kName;
+}
+
+const std::string& Device::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version Device::getFeatureLevel() const {
+    return nn::Version::ANDROID_OC_MR1;
+}
+
+nn::DeviceType Device::getType() const {
+    return nn::DeviceType::OTHER;
+}
+
+const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& Device::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
+    return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0);
+}
+
+nn::GeneralResult<void> Device::wait() const {
+    const auto ret = kDevice->ping();
+    return hal::utils::handleTransportError(ret);
+}
+
+nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+
+    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                  << "uninitialized";
+    auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical)
+                     << "getSupportedOperations failed with " << toString(status);
+        } else if (supportedOperations.size() != model.main.operations.size()) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                     << "getSupportedOperations returned vector of size "
+                     << supportedOperations.size() << " but expected "
+                     << model.main.operations.size();
+        } else {
+            result = supportedOperations;
+        }
+    };
+
+    const auto ret = kDevice->getSupportedOperations(hidlModel, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
+        const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
+        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+
+    const auto cb = sp<PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kDevice->prepareModel(hidlModel, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
+        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IDevice::prepareModelFromCache not supported on 1.0 HAL service";
+}
+
+nn::GeneralResult<nn::SharedBuffer> Device::allocate(
+        const nn::BufferDesc& /*desc*/,
+        const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
+        const std::vector<nn::BufferRole>& /*inputRoles*/,
+        const std::vector<nn::BufferRole>& /*outputRoles*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IDevice::allocate not supported on 1.0 HAL service";
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
new file mode 100644
index 0000000..11ccbe3
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
+        sp<V1_0::IPreparedModel> preparedModel) {
+    if (preparedModel == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_0::utils::PreparedModel::create must have non-null preparedModel";
+    }
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
+                                                 std::move(deathHandler));
+}
+
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_0::IPreparedModel> preparedModel,
+                             hal::utils::DeathHandler deathHandler)
+    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
+        const nn::Request& request, nn::MeasureTiming /*measure*/,
+        const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+    // Ensure that request is ready for IPC.
+    std::optional<nn::Request> maybeRequestInShared;
+    const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+
+    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+
+    const auto cb = sp<ExecutionCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kPreparedModel->execute(hidlRequest, cb);
+    const auto status =
+            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "execute failed with " << toString(status);
+    }
+
+    auto result = NN_TRY(cb->get());
+    NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
+
+    return result;
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFenced(
+        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
+}
+
+std::any PreparedModel::getUnderlyingResource() const {
+    sp<V1_0::IPreparedModel> resource = kPreparedModel;
+    return resource;
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Service.cpp b/neuralnetworks/1.0/utils/src/Service.cpp
new file mode 100644
index 0000000..ec28b1d
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/Service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Service.h"
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientDevice.h>
+#include <string>
+#include "Device.h"
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
+    hal::utils::ResilientDevice::Factory makeDevice =
+            [name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
+        auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
+        if (service == nullptr) {
+            return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
+        }
+        return Device::create(name, std::move(service));
+    };
+
+    return hal::utils::ResilientDevice::create(std::move(makeDevice));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.1/utils/Android.bp b/neuralnetworks/1.1/utils/Android.bp
index 85a32c5..909575b 100644
--- a/neuralnetworks/1.1/utils/Android.bp
+++ b/neuralnetworks/1.1/utils/Android.bp
@@ -20,6 +20,7 @@
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal/1.1/"],
     export_include_dirs: ["include"],
+    cflags: ["-Wthread-safety"],
     static_libs: [
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
index d0c5397..16ddd53 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Conversions.h
@@ -24,21 +24,22 @@
 
 namespace android::nn {
 
-Result<OperationType> convert(const hal::V1_1::OperationType& operationType);
-Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities);
-Result<Operation> convert(const hal::V1_1::Operation& operation);
-Result<Model> convert(const hal::V1_1::Model& model);
-Result<ExecutionPreference> convert(const hal::V1_1::ExecutionPreference& executionPreference);
+GeneralResult<OperationType> convert(const hal::V1_1::OperationType& operationType);
+GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities);
+GeneralResult<Operation> convert(const hal::V1_1::Operation& operation);
+GeneralResult<Model> convert(const hal::V1_1::Model& model);
+GeneralResult<ExecutionPreference> convert(
+        const hal::V1_1::ExecutionPreference& executionPreference);
 
 }  // namespace android::nn
 
 namespace android::hardware::neuralnetworks::V1_1::utils {
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType);
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
-nn::Result<Operation> convert(const nn::Operation& operation);
-nn::Result<Model> convert(const nn::Model& model);
-nn::Result<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference);
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Operation> convert(const nn::Operation& operation);
+nn::GeneralResult<Model> convert(const nn::Model& model);
+nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference);
 
 }  // namespace android::hardware::neuralnetworks::V1_1::utils
 
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
new file mode 100644
index 0000000..f55ac6c
--- /dev/null
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
+
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_1::utils {
+
+class Device final : public nn::IDevice {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
+                                                                   sp<V1_1::IDevice> device);
+
+    Device(PrivateConstructorTag tag, std::string name, nn::Capabilities capabilities,
+           sp<V1_1::IDevice> device, hal::utils::DeathHandler deathHandler);
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    const std::string kName;
+    const std::string kVersionString = "UNKNOWN";
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    const sp<V1_1::IDevice> kDevice;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_1::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_DEVICE_H
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h
new file mode 100644
index 0000000..a3ad3cf
--- /dev/null
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Service.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <string>
+
+namespace android::hardware::neuralnetworks::V1_1::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
+
+}  // namespace android::hardware::neuralnetworks::V1_1::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_SERVICE_H
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
index 6f9aa60..0fee628 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.1/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
@@ -33,10 +34,14 @@
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
-    const auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeCanonical = nn::convert(halObject);
+    if (!maybeCanonical.has_value()) {
+        return nn::error() << maybeCanonical.error().message;
+    }
+    const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return {};
 }
@@ -53,9 +58,14 @@
 template <typename Type>
 decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
     auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeVersion = nn::validate(canonical);
+    if (!maybeVersion.has_value()) {
+        return nn::error() << maybeVersion.error();
+    }
+    const auto version = maybeVersion.value();
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return canonical;
 }
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index 7fee16b..ffe0752 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -42,7 +42,7 @@
 using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-Result<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<convertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
     std::vector<convertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
@@ -53,11 +53,11 @@
 
 }  // anonymous namespace
 
-Result<OperationType> convert(const hal::V1_1::OperationType& operationType) {
+GeneralResult<OperationType> convert(const hal::V1_1::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-Result<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
+GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
     const auto quantized8Performance = NN_TRY(convert(capabilities.quantized8Performance));
     const auto float32Performance = NN_TRY(convert(capabilities.float32Performance));
     const auto relaxedFloat32toFloat16Performance =
@@ -73,7 +73,7 @@
     };
 }
 
-Result<Operation> convert(const hal::V1_1::Operation& operation) {
+GeneralResult<Operation> convert(const hal::V1_1::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -81,7 +81,7 @@
     };
 }
 
-Result<Model> convert(const hal::V1_1::Model& model) {
+GeneralResult<Model> convert(const hal::V1_1::Model& model) {
     auto operations = NN_TRY(convert(model.operations));
 
     // Verify number of consumers.
@@ -90,9 +90,9 @@
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
-            return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
-                              << numberOfConsumers[i] << " but found "
-                              << model.operands[i].numberOfConsumers;
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Invalid numberOfConsumers for operand " << i << ", expected "
+                   << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
         }
     }
 
@@ -111,7 +111,8 @@
     };
 }
 
-Result<ExecutionPreference> convert(const hal::V1_1::ExecutionPreference& executionPreference) {
+GeneralResult<ExecutionPreference> convert(
+        const hal::V1_1::ExecutionPreference& executionPreference) {
     return static_cast<ExecutionPreference>(executionPreference);
 }
 
@@ -122,20 +123,20 @@
 
 using utils::convert;
 
-nn::Result<V1_0::PerformanceInfo> convert(
+nn::GeneralResult<V1_0::PerformanceInfo> convert(
         const nn::Capabilities::PerformanceInfo& performanceInfo) {
     return V1_0::utils::convert(performanceInfo);
 }
 
-nn::Result<V1_0::Operand> convert(const nn::Operand& operand) {
+nn::GeneralResult<V1_0::Operand> convert(const nn::Operand& operand) {
     return V1_0::utils::convert(operand);
 }
 
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
     return V1_0::utils::convert(operandValues);
 }
 
-nn::Result<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
     return V1_0::utils::convert(memory);
 }
 
@@ -143,7 +144,7 @@
 using convertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::Result<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<convertOutput<Type>>> convert(const std::vector<Type>& arguments) {
     hidl_vec<convertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(convert(arguments[i]));
@@ -153,11 +154,11 @@
 
 }  // anonymous namespace
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType) {
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
     return Capabilities{
             .float32Performance = NN_TRY(convert(
                     capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32))),
@@ -168,7 +169,7 @@
     };
 }
 
-nn::Result<Operation> convert(const nn::Operation& operation) {
+nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -176,9 +177,10 @@
     };
 }
 
-nn::Result<Model> convert(const nn::Model& model) {
+nn::GeneralResult<Model> convert(const nn::Model& model) {
     if (!hal::utils::hasNoPointerData(model)) {
-        return NN_ERROR() << "Mdoel cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Mdoel cannot be converted because it contains pointer-based memory";
     }
 
     auto operands = NN_TRY(convert(model.main.operands));
@@ -202,7 +204,7 @@
     };
 }
 
-nn::Result<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
+nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
     return static_cast<ExecutionPreference>(executionPreference);
 }
 
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
new file mode 100644
index 0000000..03b0d6e
--- /dev/null
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_1::utils {
+namespace {
+
+nn::GeneralResult<nn::Capabilities> initCapabilities(V1_1::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                 << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getCapabilities_1_1 failed with " << toString(status);
+        } else {
+            result = validatedConvertToCanonical(capabilities);
+        }
+    };
+
+    const auto ret = device->getCapabilities_1_1(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
+                                                                sp<V1_1::IDevice> device) {
+    if (name.empty()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_1::utils::Device::create must have non-empty name";
+    }
+    if (device == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_1::utils::Device::create must have non-null device";
+    }
+
+    auto capabilities = NN_TRY(initCapabilities(device.get()));
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
+    return std::make_shared<const Device>(PrivateConstructorTag{}, std::move(name),
+                                          std::move(capabilities), std::move(device),
+                                          std::move(deathHandler));
+}
+
+Device::Device(PrivateConstructorTag /*tag*/, std::string name, nn::Capabilities capabilities,
+               sp<V1_1::IDevice> device, hal::utils::DeathHandler deathHandler)
+    : kName(std::move(name)),
+      kCapabilities(std::move(capabilities)),
+      kDevice(std::move(device)),
+      kDeathHandler(std::move(deathHandler)) {}
+
+const std::string& Device::getName() const {
+    return kName;
+}
+
+const std::string& Device::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version Device::getFeatureLevel() const {
+    return nn::Version::ANDROID_P;
+}
+
+nn::DeviceType Device::getType() const {
+    return nn::DeviceType::UNKNOWN;
+}
+
+const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& Device::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
+    return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0);
+}
+
+nn::GeneralResult<void> Device::wait() const {
+    const auto ret = kDevice->ping();
+    return hal::utils::handleTransportError(ret);
+}
+
+nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+
+    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                  << "uninitialized";
+    auto cb = [&result, &model](V1_0::ErrorStatus status,
+                                const hidl_vec<bool>& supportedOperations) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical)
+                     << "getSupportedOperations_1_1 failed with " << toString(status);
+        } else if (supportedOperations.size() != model.main.operations.size()) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                     << "getSupportedOperations_1_1 returned vector of size "
+                     << supportedOperations.size() << " but expected "
+                     << model.main.operations.size();
+        } else {
+            result = supportedOperations;
+        }
+    };
+
+    const auto ret = kDevice->getSupportedOperations_1_1(hidlModel, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
+        const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
+        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+    const auto hidlPreference = NN_TRY(convert(preference));
+
+    const auto cb = sp<V1_0::utils::PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModel failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
+        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IDevice::prepareModelFromCache not supported on 1.1 HAL service";
+}
+
+nn::GeneralResult<nn::SharedBuffer> Device::allocate(
+        const nn::BufferDesc& /*desc*/,
+        const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
+        const std::vector<nn::BufferRole>& /*inputRoles*/,
+        const std::vector<nn::BufferRole>& /*outputRoles*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IDevice::allocate not supported on 1.1 HAL service";
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_1::utils
diff --git a/neuralnetworks/1.1/utils/src/Service.cpp b/neuralnetworks/1.1/utils/src/Service.cpp
new file mode 100644
index 0000000..e2d3240
--- /dev/null
+++ b/neuralnetworks/1.1/utils/src/Service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Service.h"
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientDevice.h>
+#include <string>
+#include "Device.h"
+
+namespace android::hardware::neuralnetworks::V1_1::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
+    hal::utils::ResilientDevice::Factory makeDevice =
+            [name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
+        auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
+        if (service == nullptr) {
+            return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
+        }
+        return Device::create(name, std::move(service));
+    };
+
+    return hal::utils::ResilientDevice::create(std::move(makeDevice));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_1::utils
diff --git a/neuralnetworks/1.2/utils/Android.bp b/neuralnetworks/1.2/utils/Android.bp
index a1dd3d0..22e8659 100644
--- a/neuralnetworks/1.2/utils/Android.bp
+++ b/neuralnetworks/1.2/utils/Android.bp
@@ -20,6 +20,7 @@
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal/1.2/"],
     export_include_dirs: ["include"],
+    cflags: ["-Wthread-safety"],
     static_libs: [
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
new file mode 100644
index 0000000..bc7d92a
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
+
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+class PreparedModelCallback final : public IPreparedModelCallback,
+                                    public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::GeneralResult<nn::SharedPreparedModel>;
+
+    Return<void> notify(V1_0::ErrorStatus status,
+                        const sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
+                            const sp<IPreparedModel>& preparedModel) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
+
+    Return<void> notify(V1_0::ErrorStatus status) override;
+    Return<void> notify_1_2(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+                            const Timing& timing) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_CALLBACKS_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index 81bf792..e6de011 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -24,62 +24,64 @@
 
 namespace android::nn {
 
-Result<OperandType> convert(const hal::V1_2::OperandType& operandType);
-Result<OperationType> convert(const hal::V1_2::OperationType& operationType);
-Result<DeviceType> convert(const hal::V1_2::DeviceType& deviceType);
-Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities);
-Result<Capabilities::OperandPerformance> convert(
+GeneralResult<OperandType> convert(const hal::V1_2::OperandType& operandType);
+GeneralResult<OperationType> convert(const hal::V1_2::OperationType& operationType);
+GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType);
+GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities);
+GeneralResult<Capabilities::OperandPerformance> convert(
         const hal::V1_2::Capabilities::OperandPerformance& operandPerformance);
-Result<Operation> convert(const hal::V1_2::Operation& operation);
-Result<Operand::SymmPerChannelQuantParams> convert(
+GeneralResult<Operation> convert(const hal::V1_2::Operation& operation);
+GeneralResult<Operand::SymmPerChannelQuantParams> convert(
         const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams);
-Result<Operand> convert(const hal::V1_2::Operand& operand);
-Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams);
-Result<Model> convert(const hal::V1_2::Model& model);
-Result<Model::ExtensionNameAndPrefix> convert(
+GeneralResult<Operand> convert(const hal::V1_2::Operand& operand);
+GeneralResult<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams);
+GeneralResult<Model> convert(const hal::V1_2::Model& model);
+GeneralResult<Model::ExtensionNameAndPrefix> convert(
         const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
-Result<OutputShape> convert(const hal::V1_2::OutputShape& outputShape);
-Result<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming);
-Result<Timing> convert(const hal::V1_2::Timing& timing);
-Result<Extension> convert(const hal::V1_2::Extension& extension);
-Result<Extension::OperandTypeInformation> convert(
+GeneralResult<OutputShape> convert(const hal::V1_2::OutputShape& outputShape);
+GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming);
+GeneralResult<Timing> convert(const hal::V1_2::Timing& timing);
+GeneralResult<Extension> convert(const hal::V1_2::Extension& extension);
+GeneralResult<Extension::OperandTypeInformation> convert(
         const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation);
-Result<NativeHandle> convert(const hardware::hidl_handle& handle);
+GeneralResult<NativeHandle> convert(const hardware::hidl_handle& handle);
 
-Result<std::vector<Extension>> convert(const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
-Result<std::vector<NativeHandle>> convert(const hardware::hidl_vec<hardware::hidl_handle>& handles);
-Result<std::vector<OutputShape>> convert(
+GeneralResult<std::vector<Extension>> convert(
+        const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
+GeneralResult<std::vector<NativeHandle>> convert(
+        const hardware::hidl_vec<hardware::hidl_handle>& handles);
+GeneralResult<std::vector<OutputShape>> convert(
         const hardware::hidl_vec<hal::V1_2::OutputShape>& outputShapes);
 
 }  // namespace android::nn
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType);
-nn::Result<OperationType> convert(const nn::OperationType& operationType);
-nn::Result<DeviceType> convert(const nn::DeviceType& deviceType);
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
-nn::Result<Capabilities::OperandPerformance> convert(
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Capabilities::OperandPerformance> convert(
         const nn::Capabilities::OperandPerformance& operandPerformance);
-nn::Result<Operation> convert(const nn::Operation& operation);
-nn::Result<SymmPerChannelQuantParams> convert(
+nn::GeneralResult<Operation> convert(const nn::Operation& operation);
+nn::GeneralResult<SymmPerChannelQuantParams> convert(
         const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams);
-nn::Result<Operand> convert(const nn::Operand& operand);
-nn::Result<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams);
-nn::Result<Model> convert(const nn::Model& model);
-nn::Result<Model::ExtensionNameAndPrefix> convert(
+nn::GeneralResult<Operand> convert(const nn::Operand& operand);
+nn::GeneralResult<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams);
+nn::GeneralResult<Model> convert(const nn::Model& model);
+nn::GeneralResult<Model::ExtensionNameAndPrefix> convert(
         const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
-nn::Result<OutputShape> convert(const nn::OutputShape& outputShape);
-nn::Result<MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
-nn::Result<Timing> convert(const nn::Timing& timing);
-nn::Result<Extension> convert(const nn::Extension& extension);
-nn::Result<Extension::OperandTypeInformation> convert(
+nn::GeneralResult<OutputShape> convert(const nn::OutputShape& outputShape);
+nn::GeneralResult<MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
+nn::GeneralResult<Timing> convert(const nn::Timing& timing);
+nn::GeneralResult<Extension> convert(const nn::Extension& extension);
+nn::GeneralResult<Extension::OperandTypeInformation> convert(
         const nn::Extension::OperandTypeInformation& operandTypeInformation);
-nn::Result<hidl_handle> convert(const nn::NativeHandle& handle);
+nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle);
 
-nn::Result<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
-nn::Result<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
-nn::Result<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
+nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
+nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
new file mode 100644
index 0000000..eb317b1
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
+
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device);
+nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device);
+nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device);
+nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device);
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+        V1_2::IDevice* device);
+
+class Device final : public nn::IDevice {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
+                                                                   sp<V1_2::IDevice> device);
+
+    Device(PrivateConstructorTag tag, std::string name, std::string versionString,
+           nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
+           nn::Capabilities capabilities, std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
+           sp<V1_2::IDevice> device, hal::utils::DeathHandler deathHandler);
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    const std::string kName;
+    const std::string kVersionString;
+    const nn::DeviceType kDeviceType;
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
+    const sp<V1_2::IDevice> kDevice;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_DEVICE_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
new file mode 100644
index 0000000..65e1e8a
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
+
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+class PreparedModel final : public nn::IPreparedModel {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
+            sp<V1_2::IPreparedModel> preparedModel);
+
+    PreparedModel(PrivateConstructorTag tag, sp<V1_2::IPreparedModel> preparedModel,
+                  hal::utils::DeathHandler deathHandler);
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
+            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+
+    std::any getUnderlyingResource() const override;
+
+  private:
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
+            const V1_0::Request& request, MeasureTiming measure) const;
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
+            const V1_0::Request& request, MeasureTiming measure) const;
+
+    const sp<V1_2::IPreparedModel> kPreparedModel;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h
new file mode 100644
index 0000000..44f004f
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Service.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <string>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_SERVICE_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index b1c2f1a..a9a6bae 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
@@ -38,10 +39,14 @@
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
-    const auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeCanonical = nn::convert(halObject);
+    if (!maybeCanonical.has_value()) {
+        return nn::error() << maybeCanonical.error().message;
+    }
+    const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return {};
 }
@@ -58,9 +63,14 @@
 template <typename Type>
 decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
     auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeVersion = nn::validate(canonical);
+    if (!maybeVersion.has_value()) {
+        return nn::error() << maybeVersion.error();
+    }
+    const auto version = maybeVersion.value();
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return canonical;
 }
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
new file mode 100644
index 0000000..cb739f0
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Callbacks.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+#include <utility>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+namespace {
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<V1_0::IPreparedModel>& preparedModel) {
+    return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<IPreparedModel>& preparedModel) {
+    return NN_TRY(utils::PreparedModel::create(preparedModel));
+}
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
+                                     const Timing& timing) {
+    return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
+                          NN_TRY(validatedConvertToCanonical(timing)));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionGeneralResults(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+    return hal::utils::makeExecutionFailure(
+            convertExecutionGeneralResultsHelper(outputShapes, timing));
+}
+
+}  // namespace
+
+Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
+                                               const sp<IPreparedModel>& preparedModel) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+void PreparedModelCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+PreparedModelCallback::Data PreparedModelCallback::get() {
+    return mData.take();
+}
+
+void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+// ExecutionCallback methods begin here
+
+Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal({});
+    }
+    return Void();
+}
+
+Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
+                                           const hidl_vec<OutputShape>& outputShapes,
+                                           const Timing& timing) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
+    }
+    return Void();
+}
+
+void ExecutionCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+ExecutionCallback::Data ExecutionCallback::get() {
+    return mData.take();
+}
+
+void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index fed314b..378719a 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -26,6 +26,7 @@
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <functional>
@@ -78,7 +79,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
     std::vector<ConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
@@ -88,25 +89,25 @@
 }
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
     return convertVec(arguments);
 }
 
 }  // anonymous namespace
 
-Result<OperandType> convert(const hal::V1_2::OperandType& operandType) {
+GeneralResult<OperandType> convert(const hal::V1_2::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-Result<OperationType> convert(const hal::V1_2::OperationType& operationType) {
+GeneralResult<OperationType> convert(const hal::V1_2::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-Result<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
+GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
     return static_cast<DeviceType>(deviceType);
 }
 
-Result<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
+GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) {
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
@@ -114,7 +115,7 @@
                 return !maybeType.has_value() ? false : validOperandType(maybeType.value());
             });
     if (!validOperandTypes) {
-        return NN_ERROR()
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                << "Invalid OperandType when converting OperandPerformance in Capabilities";
     }
 
@@ -124,8 +125,9 @@
             NN_TRY(convert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
     auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance));
 
-    auto table =
-            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+    auto table = NN_TRY(hal::utils::makeGeneralFailure(
+            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
+            nn::ErrorStatus::GENERAL_FAILURE));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
@@ -134,7 +136,7 @@
     };
 }
 
-Result<Capabilities::OperandPerformance> convert(
+GeneralResult<Capabilities::OperandPerformance> convert(
         const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -142,7 +144,7 @@
     };
 }
 
-Result<Operation> convert(const hal::V1_2::Operation& operation) {
+GeneralResult<Operation> convert(const hal::V1_2::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -150,7 +152,7 @@
     };
 }
 
-Result<Operand::SymmPerChannelQuantParams> convert(
+GeneralResult<Operand::SymmPerChannelQuantParams> convert(
         const hal::V1_2::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
     return Operand::SymmPerChannelQuantParams{
             .scales = symmPerChannelQuantParams.scales,
@@ -158,7 +160,7 @@
     };
 }
 
-Result<Operand> convert(const hal::V1_2::Operand& operand) {
+GeneralResult<Operand> convert(const hal::V1_2::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -170,7 +172,7 @@
     };
 }
 
-Result<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams) {
+GeneralResult<Operand::ExtraParams> convert(const hal::V1_2::Operand::ExtraParams& extraParams) {
     using Discriminator = hal::V1_2::Operand::ExtraParams::hidl_discriminator;
     switch (extraParams.getDiscriminator()) {
         case Discriminator::none:
@@ -180,11 +182,12 @@
         case Discriminator::extension:
             return extraParams.extension();
     }
-    return NN_ERROR() << "Unrecognized Operand::ExtraParams discriminator: "
-                      << underlyingType(extraParams.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Unrecognized Operand::ExtraParams discriminator: "
+           << underlyingType(extraParams.getDiscriminator());
 }
 
-Result<Model> convert(const hal::V1_2::Model& model) {
+GeneralResult<Model> convert(const hal::V1_2::Model& model) {
     auto operations = NN_TRY(convert(model.operations));
 
     // Verify number of consumers.
@@ -193,9 +196,9 @@
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
-            return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
-                              << numberOfConsumers[i] << " but found "
-                              << model.operands[i].numberOfConsumers;
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Invalid numberOfConsumers for operand " << i << ", expected "
+                   << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
         }
     }
 
@@ -215,7 +218,7 @@
     };
 }
 
-Result<Model::ExtensionNameAndPrefix> convert(
+GeneralResult<Model::ExtensionNameAndPrefix> convert(
         const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return Model::ExtensionNameAndPrefix{
             .name = extensionNameAndPrefix.name,
@@ -223,29 +226,29 @@
     };
 }
 
-Result<OutputShape> convert(const hal::V1_2::OutputShape& outputShape) {
+GeneralResult<OutputShape> convert(const hal::V1_2::OutputShape& outputShape) {
     return OutputShape{
             .dimensions = outputShape.dimensions,
             .isSufficient = outputShape.isSufficient,
     };
 }
 
-Result<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) {
+GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) {
     return static_cast<MeasureTiming>(measureTiming);
 }
 
-Result<Timing> convert(const hal::V1_2::Timing& timing) {
+GeneralResult<Timing> convert(const hal::V1_2::Timing& timing) {
     return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
 }
 
-Result<Extension> convert(const hal::V1_2::Extension& extension) {
+GeneralResult<Extension> convert(const hal::V1_2::Extension& extension) {
     return Extension{
             .name = extension.name,
             .operandTypes = NN_TRY(convert(extension.operandTypes)),
     };
 }
 
-Result<Extension::OperandTypeInformation> convert(
+GeneralResult<Extension::OperandTypeInformation> convert(
         const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation) {
     return Extension::OperandTypeInformation{
             .type = operandTypeInformation.type,
@@ -254,20 +257,21 @@
     };
 }
 
-Result<NativeHandle> convert(const hidl_handle& handle) {
+GeneralResult<NativeHandle> convert(const hidl_handle& handle) {
     auto* cloned = native_handle_clone(handle.getNativeHandle());
     return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
 }
 
-Result<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
+GeneralResult<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
     return convertVec(extensions);
 }
 
-Result<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
+GeneralResult<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
     return convertVec(handles);
 }
 
-Result<std::vector<OutputShape>> convert(const hidl_vec<hal::V1_2::OutputShape>& outputShapes) {
+GeneralResult<std::vector<OutputShape>> convert(
+        const hidl_vec<hal::V1_2::OutputShape>& outputShapes) {
     return convertVec(outputShapes);
 }
 
@@ -278,24 +282,24 @@
 
 using utils::convert;
 
-nn::Result<V1_0::OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
+nn::GeneralResult<V1_0::OperandLifeTime> convert(const nn::Operand::LifeTime& lifetime) {
     return V1_0::utils::convert(lifetime);
 }
 
-nn::Result<V1_0::PerformanceInfo> convert(
+nn::GeneralResult<V1_0::PerformanceInfo> convert(
         const nn::Capabilities::PerformanceInfo& performanceInfo) {
     return V1_0::utils::convert(performanceInfo);
 }
 
-nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& location) {
+nn::GeneralResult<V1_0::DataLocation> convert(const nn::DataLocation& location) {
     return V1_0::utils::convert(location);
 }
 
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
     return V1_0::utils::convert(operandValues);
 }
 
-nn::Result<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
     return V1_0::utils::convert(memory);
 }
 
@@ -303,7 +307,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
     hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(convert(arguments[i]));
@@ -312,22 +316,23 @@
 }
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
     return convertVec(arguments);
 }
 
-nn::Result<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) {
+nn::GeneralResult<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) {
     return Operand::ExtraParams{};
 }
 
-nn::Result<Operand::ExtraParams> makeExtraParams(
+nn::GeneralResult<Operand::ExtraParams> makeExtraParams(
         const nn::Operand::SymmPerChannelQuantParams& channelQuant) {
     Operand::ExtraParams ret;
     ret.channelQuant(NN_TRY(convert(channelQuant)));
     return ret;
 }
 
-nn::Result<Operand::ExtraParams> makeExtraParams(const nn::Operand::ExtensionParams& extension) {
+nn::GeneralResult<Operand::ExtraParams> makeExtraParams(
+        const nn::Operand::ExtensionParams& extension) {
     Operand::ExtraParams ret;
     ret.extension(extension);
     return ret;
@@ -335,28 +340,29 @@
 
 }  // anonymous namespace
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType) {
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType) {
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-nn::Result<DeviceType> convert(const nn::DeviceType& deviceType) {
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
     switch (deviceType) {
         case nn::DeviceType::UNKNOWN:
-            return NN_ERROR() << "Invalid DeviceType UNKNOWN";
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Invalid DeviceType UNKNOWN";
         case nn::DeviceType::OTHER:
         case nn::DeviceType::CPU:
         case nn::DeviceType::GPU:
         case nn::DeviceType::ACCELERATOR:
             return static_cast<DeviceType>(deviceType);
     }
-    return NN_ERROR() << "Invalid DeviceType " << underlyingType(deviceType);
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid DeviceType " << underlyingType(deviceType);
 }
 
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
     std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
     operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
     std::copy_if(capabilities.operandPerformance.asVector().begin(),
@@ -375,7 +381,7 @@
     };
 }
 
-nn::Result<Capabilities::OperandPerformance> convert(
+nn::GeneralResult<Capabilities::OperandPerformance> convert(
         const nn::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -383,7 +389,7 @@
     };
 }
 
-nn::Result<Operation> convert(const nn::Operation& operation) {
+nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -391,7 +397,7 @@
     };
 }
 
-nn::Result<SymmPerChannelQuantParams> convert(
+nn::GeneralResult<SymmPerChannelQuantParams> convert(
         const nn::Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) {
     return SymmPerChannelQuantParams{
             .scales = symmPerChannelQuantParams.scales,
@@ -399,7 +405,7 @@
     };
 }
 
-nn::Result<Operand> convert(const nn::Operand& operand) {
+nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -412,13 +418,14 @@
     };
 }
 
-nn::Result<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
+nn::GeneralResult<Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
     return std::visit([](const auto& x) { return makeExtraParams(x); }, extraParams);
 }
 
-nn::Result<Model> convert(const nn::Model& model) {
+nn::GeneralResult<Model> convert(const nn::Model& model) {
     if (!hal::utils::hasNoPointerData(model)) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
 
     auto operands = NN_TRY(convert(model.main.operands));
@@ -443,7 +450,7 @@
     };
 }
 
-nn::Result<Model::ExtensionNameAndPrefix> convert(
+nn::GeneralResult<Model::ExtensionNameAndPrefix> convert(
         const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return Model::ExtensionNameAndPrefix{
             .name = extensionNameAndPrefix.name,
@@ -451,27 +458,27 @@
     };
 }
 
-nn::Result<OutputShape> convert(const nn::OutputShape& outputShape) {
+nn::GeneralResult<OutputShape> convert(const nn::OutputShape& outputShape) {
     return OutputShape{.dimensions = outputShape.dimensions,
                        .isSufficient = outputShape.isSufficient};
 }
 
-nn::Result<MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
+nn::GeneralResult<MeasureTiming> convert(const nn::MeasureTiming& measureTiming) {
     return static_cast<MeasureTiming>(measureTiming);
 }
 
-nn::Result<Timing> convert(const nn::Timing& timing) {
+nn::GeneralResult<Timing> convert(const nn::Timing& timing) {
     return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
 }
 
-nn::Result<Extension> convert(const nn::Extension& extension) {
+nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
     return Extension{
             .name = extension.name,
             .operandTypes = NN_TRY(convert(extension.operandTypes)),
     };
 }
 
-nn::Result<Extension::OperandTypeInformation> convert(
+nn::GeneralResult<Extension::OperandTypeInformation> convert(
         const nn::Extension::OperandTypeInformation& operandTypeInformation) {
     return Extension::OperandTypeInformation{
             .type = operandTypeInformation.type,
@@ -480,22 +487,22 @@
     };
 }
 
-nn::Result<hidl_handle> convert(const nn::NativeHandle& handle) {
+nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle) {
     const auto hidlHandle = hidl_handle(handle->handle());
     // Copy memory to force the native_handle_t to be copied.
     auto copiedHandle = hidlHandle;
     return copiedHandle;
 }
 
-nn::Result<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
+nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
     return convertVec(extensions);
 }
 
-nn::Result<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
     return convertVec(handles);
 }
 
-nn::Result<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes) {
+nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes) {
     return convertVec(outputShapes);
 }
 
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
new file mode 100644
index 0000000..ca236f1
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<std::string> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                            << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status);
+        } else {
+            result = versionString;
+        }
+    };
+
+    const auto ret = device->getVersionString(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<nn::DeviceType> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                               << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status);
+        } else {
+            result = nn::convert(deviceType);
+        }
+    };
+
+    const auto ret = device->getType(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<std::vector<nn::Extension>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status);
+        } else {
+            result = nn::convert(extensions);
+        }
+    };
+
+    const auto ret = device->getSupportedExtensions(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                 << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status);
+        } else {
+            result = validatedConvertToCanonical(capabilities);
+        }
+    };
+
+    const auto ret = device->getCapabilities_1_2(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+        V1_2::IDevice* device) {
+    CHECK(device != nullptr);
+
+    nn::GeneralResult<std::pair<uint32_t, uint32_t>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache,
+                              uint32_t numDataCache) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical)
+                     << "getNumberOfCacheFilesNeeded failed with " << toString(status);
+        } else {
+            result = std::make_pair(numModelCache, numDataCache);
+        }
+    };
+
+    const auto ret = device->getNumberOfCacheFilesNeeded(cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
+                                                                sp<V1_2::IDevice> device) {
+    if (name.empty()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_2::utils::Device::create must have non-empty name";
+    }
+    if (device == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_2::utils::Device::create must have non-null device";
+    }
+
+    auto versionString = NN_TRY(initVersionString(device.get()));
+    const auto deviceType = NN_TRY(initDeviceType(device.get()));
+    auto extensions = NN_TRY(initExtensions(device.get()));
+    auto capabilities = NN_TRY(initCapabilities(device.get()));
+    const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get()));
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
+    return std::make_shared<const Device>(
+            PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
+            std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
+            std::move(device), std::move(deathHandler));
+}
+
+Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
+               nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
+               nn::Capabilities capabilities,
+               std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, sp<V1_2::IDevice> device,
+               hal::utils::DeathHandler deathHandler)
+    : kName(std::move(name)),
+      kVersionString(std::move(versionString)),
+      kDeviceType(deviceType),
+      kExtensions(std::move(extensions)),
+      kCapabilities(std::move(capabilities)),
+      kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded),
+      kDevice(std::move(device)),
+      kDeathHandler(std::move(deathHandler)) {}
+
+const std::string& Device::getName() const {
+    return kName;
+}
+
+const std::string& Device::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version Device::getFeatureLevel() const {
+    return nn::Version::ANDROID_Q;
+}
+
+nn::DeviceType Device::getType() const {
+    return kDeviceType;
+}
+
+const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& Device::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
+    return kNumberOfCacheFilesNeeded;
+}
+
+nn::GeneralResult<void> Device::wait() const {
+    const auto ret = kDevice->ping();
+    return hal::utils::handleTransportError(ret);
+}
+
+nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+
+    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                  << "uninitialized";
+    auto cb = [&result, &model](V1_0::ErrorStatus status,
+                                const hidl_vec<bool>& supportedOperations) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical)
+                     << "getSupportedOperations_1_2 failed with " << toString(status);
+        } else if (supportedOperations.size() != model.main.operations.size()) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                     << "getSupportedOperations_1_2 returned vector of size "
+                     << supportedOperations.size() << " but expected "
+                     << model.main.operations.size();
+        } else {
+            result = supportedOperations;
+        }
+    };
+
+    const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
+        const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+    const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+    const auto hidlModelCache = NN_TRY(convert(modelCache));
+    const auto hidlDataCache = NN_TRY(convert(dataCache));
+    const auto hidlToken = token;
+
+    const auto cb = sp<PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
+                                               hidlDataCache, hidlToken, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    const auto hidlModelCache = NN_TRY(convert(modelCache));
+    const auto hidlDataCache = NN_TRY(convert(dataCache));
+    const auto hidlToken = token;
+
+    const auto cb = sp<PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedBuffer> Device::allocate(
+        const nn::BufferDesc& /*desc*/,
+        const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
+        const std::vector<nn::BufferRole>& /*inputRoles*/,
+        const std::vector<nn::BufferRole>& /*outputRoles*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IDevice::allocate not supported on 1.2 HAL service";
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
new file mode 100644
index 0000000..ff9db21
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+namespace {
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionResultsHelper(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+    return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
+                          NN_TRY(validatedConvertToCanonical(timing)));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
+        const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+    return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
+        sp<V1_2::IPreparedModel> preparedModel) {
+    if (preparedModel == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_2::utils::PreparedModel::create must have non-null preparedModel";
+    }
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
+                                                 std::move(deathHandler));
+}
+
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_2::IPreparedModel> preparedModel,
+                             hal::utils::DeathHandler deathHandler)
+    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const {
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+                              const Timing& timing) {
+        if (status != V1_0::ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
+        } else {
+            result = convertExecutionResults(outputShapes, timing);
+        }
+    };
+
+    const auto ret = kPreparedModel->executeSynchronously(request, measure, cb);
+    NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+
+    return result;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming measure) const {
+    const auto cb = sp<ExecutionCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
+    const auto status =
+            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "execute failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+    // Ensure that request is ready for IPC.
+    std::optional<nn::Request> maybeRequestInShared;
+    const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+
+    const auto hidlRequest =
+            NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared)));
+    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const bool preferSynchronous = true;
+
+    // Execute synchronously if allowed.
+    if (preferSynchronous) {
+        result = executeSynchronously(hidlRequest, hidlMeasure);
+    }
+
+    // Run asymchronous execution if execution has not already completed.
+    if (!result.has_value()) {
+        result = executeAsynchronously(hidlRequest, hidlMeasure);
+    }
+
+    // Flush output buffers if suxcessful execution.
+    if (result.has_value()) {
+        NN_TRY(hal::utils::makeExecutionFailure(
+                hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
+    }
+
+    return result;
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFenced(
+        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
+}
+
+std::any PreparedModel::getUnderlyingResource() const {
+    sp<V1_0::IPreparedModel> resource = kPreparedModel;
+    return resource;
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Service.cpp b/neuralnetworks/1.2/utils/src/Service.cpp
new file mode 100644
index 0000000..110188f
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/Service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Service.h"
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientDevice.h>
+#include <string>
+#include "Device.h"
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
+    hal::utils::ResilientDevice::Factory makeDevice =
+            [name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
+        auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
+        if (service == nullptr) {
+            return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
+        }
+        return Device::create(name, std::move(service));
+    };
+
+    return hal::utils::ResilientDevice::create(std::move(makeDevice));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.3/utils/Android.bp b/neuralnetworks/1.3/utils/Android.bp
index 279b250..d5d897d 100644
--- a/neuralnetworks/1.3/utils/Android.bp
+++ b/neuralnetworks/1.3/utils/Android.bp
@@ -20,6 +20,7 @@
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal/1.3/"],
     export_include_dirs: ["include"],
+    cflags: ["-Wthread-safety"],
     static_libs: [
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
new file mode 100644
index 0000000..637179d
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Buffer.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
+
+#include <android/hardware/neuralnetworks/1.3/IBuffer.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <memory>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class Buffer final : public nn::IBuffer {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const Buffer>> create(
+            sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token);
+
+    Buffer(PrivateConstructorTag tag, sp<V1_3::IBuffer> buffer,
+           nn::Request::MemoryDomainToken token);
+
+    nn::Request::MemoryDomainToken getToken() const override;
+
+    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
+    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+                                     const nn::Dimensions& dimensions) const override;
+
+  private:
+    const sp<V1_3::IBuffer> kBuffer;
+    const nn::Request::MemoryDomainToken kToken;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_BUFFER_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
new file mode 100644
index 0000000..d46b111
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
+
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class PreparedModelCallback final : public IPreparedModelCallback,
+                                    public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::GeneralResult<nn::SharedPreparedModel>;
+
+    Return<void> notify(V1_0::ErrorStatus status,
+                        const sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
+                            const sp<V1_2::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_3(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
+  public:
+    using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
+
+    Return<void> notify(V1_0::ErrorStatus status) override;
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
+                            const hidl_vec<V1_2::OutputShape>& outputShapes,
+                            const V1_2::Timing& timing) override;
+    Return<void> notify_1_3(ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+                            const V1_2::Timing& timing) override;
+
+    void notifyAsDeadObject() override;
+
+    Data get();
+
+  private:
+    void notifyInternal(Data result);
+
+    hal::utils::TransferValue<Data> mData;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_CALLBACKS_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index 43987a9..64aa96e 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -25,54 +25,54 @@
 
 namespace android::nn {
 
-Result<OperandType> convert(const hal::V1_3::OperandType& operandType);
-Result<OperationType> convert(const hal::V1_3::OperationType& operationType);
-Result<Priority> convert(const hal::V1_3::Priority& priority);
-Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities);
-Result<Capabilities::OperandPerformance> convert(
+GeneralResult<OperandType> convert(const hal::V1_3::OperandType& operandType);
+GeneralResult<OperationType> convert(const hal::V1_3::OperationType& operationType);
+GeneralResult<Priority> convert(const hal::V1_3::Priority& priority);
+GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities);
+GeneralResult<Capabilities::OperandPerformance> convert(
         const hal::V1_3::Capabilities::OperandPerformance& operandPerformance);
-Result<Operation> convert(const hal::V1_3::Operation& operation);
-Result<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime);
-Result<Operand> convert(const hal::V1_3::Operand& operand);
-Result<Model> convert(const hal::V1_3::Model& model);
-Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph);
-Result<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
-Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole);
-Result<Request> convert(const hal::V1_3::Request& request);
-Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool);
-Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
-Result<OptionalTimeoutDuration> convert(
+GeneralResult<Operation> convert(const hal::V1_3::Operation& operation);
+GeneralResult<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime);
+GeneralResult<Operand> convert(const hal::V1_3::Operand& operand);
+GeneralResult<Model> convert(const hal::V1_3::Model& model);
+GeneralResult<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph);
+GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
+GeneralResult<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole);
+GeneralResult<Request> convert(const hal::V1_3::Request& request);
+GeneralResult<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool);
+GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
+GeneralResult<OptionalTimeoutDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
-Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
+GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
 
-Result<std::vector<BufferRole>> convert(
+GeneralResult<std::vector<BufferRole>> convert(
         const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles);
 
 }  // namespace android::nn
 
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType);
-nn::Result<OperationType> convert(const nn::OperationType& operationType);
-nn::Result<Priority> convert(const nn::Priority& priority);
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities);
-nn::Result<Capabilities::OperandPerformance> convert(
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType);
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType);
+nn::GeneralResult<Priority> convert(const nn::Priority& priority);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Capabilities::OperandPerformance> convert(
         const nn::Capabilities::OperandPerformance& operandPerformance);
-nn::Result<Operation> convert(const nn::Operation& operation);
-nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime);
-nn::Result<Operand> convert(const nn::Operand& operand);
-nn::Result<Model> convert(const nn::Model& model);
-nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph);
-nn::Result<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
-nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole);
-nn::Result<Request> convert(const nn::Request& request);
-nn::Result<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool);
-nn::Result<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
-nn::Result<OptionalTimeoutDuration> convert(
+nn::GeneralResult<Operation> convert(const nn::Operation& operation);
+nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime);
+nn::GeneralResult<Operand> convert(const nn::Operand& operand);
+nn::GeneralResult<Model> convert(const nn::Model& model);
+nn::GeneralResult<Subgraph> convert(const nn::Model::Subgraph& subgraph);
+nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
+nn::GeneralResult<BufferRole> convert(const nn::BufferRole& bufferRole);
+nn::GeneralResult<Request> convert(const nn::Request& request);
+nn::GeneralResult<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool);
+nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
+nn::GeneralResult<OptionalTimeoutDuration> convert(
         const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
-nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
+nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
 
-nn::Result<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
+nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
new file mode 100644
index 0000000..2f6c46a
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
+
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class Device final : public nn::IDevice {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const Device>> create(std::string name,
+                                                                   sp<V1_3::IDevice> device);
+
+    Device(PrivateConstructorTag tag, std::string name, std::string versionString,
+           nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
+           nn::Capabilities capabilities, std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
+           sp<V1_3::IDevice> device, hal::utils::DeathHandler deathHandler);
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    const std::string kName;
+    const std::string kVersionString;
+    const nn::DeviceType kDeviceType;
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
+    const sp<V1_3::IDevice> kDevice;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_DEVICE_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
new file mode 100644
index 0000000..e0d69dd
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
+
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class PreparedModel final : public nn::IPreparedModel {
+    struct PrivateConstructorTag {};
+
+  public:
+    static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
+            sp<V1_3::IPreparedModel> preparedModel);
+
+    PreparedModel(PrivateConstructorTag tag, sp<V1_3::IPreparedModel> preparedModel,
+                  hal::utils::DeathHandler deathHandler);
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
+            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+
+    std::any getUnderlyingResource() const override;
+
+  private:
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
+            const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+            const OptionalTimeoutDuration& loopTimeoutDuration) const;
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
+            const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+            const OptionalTimeoutDuration& loopTimeoutDuration) const;
+
+    const sp<V1_3::IPreparedModel> kPreparedModel;
+    const hal::utils::DeathHandler kDeathHandler;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h
new file mode 100644
index 0000000..2bc3257
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Service.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <string>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name);
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_SERVICE_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
index f8c975d..e61859d 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
@@ -35,10 +36,14 @@
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
-    const auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeCanonical = nn::convert(halObject);
+    if (!maybeCanonical.has_value()) {
+        return nn::error() << maybeCanonical.error().message;
+    }
+    const auto version = NN_TRY(nn::validate(maybeCanonical.value()));
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return {};
 }
@@ -55,9 +60,14 @@
 template <typename Type>
 decltype(nn::convert(std::declval<Type>())) validatedConvertToCanonical(const Type& halObject) {
     auto canonical = NN_TRY(nn::convert(halObject));
-    const auto version = NN_TRY(nn::validate(canonical));
+    const auto maybeVersion = nn::validate(canonical);
+    if (!maybeVersion.has_value()) {
+        return nn::error() << maybeVersion.error();
+    }
+    const auto version = maybeVersion.value();
     if (version > utils::kVersion) {
-        return NN_ERROR() << "";
+        return NN_ERROR() << "Insufficient version: " << version << " vs required "
+                          << utils::kVersion;
     }
     return canonical;
 }
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
new file mode 100644
index 0000000..f3fe9b5
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Buffer.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IBuffer.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/HandleError.h>
+
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <memory>
+#include <utility>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+nn::GeneralResult<std::shared_ptr<const Buffer>> Buffer::create(
+        sp<V1_3::IBuffer> buffer, nn::Request::MemoryDomainToken token) {
+    if (buffer == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_3::utils::Buffer::create must have non-null buffer";
+    }
+    if (token == static_cast<nn::Request::MemoryDomainToken>(0)) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_3::utils::Buffer::create must have non-zero token";
+    }
+
+    return std::make_shared<const Buffer>(PrivateConstructorTag{}, std::move(buffer), token);
+}
+
+Buffer::Buffer(PrivateConstructorTag /*tag*/, sp<V1_3::IBuffer> buffer,
+               nn::Request::MemoryDomainToken token)
+    : kBuffer(std::move(buffer)), kToken(token) {
+    CHECK(kBuffer != nullptr);
+    CHECK(kToken != static_cast<nn::Request::MemoryDomainToken>(0));
+}
+
+nn::Request::MemoryDomainToken Buffer::getToken() const {
+    return kToken;
+}
+
+nn::GeneralResult<void> Buffer::copyTo(const nn::Memory& dst) const {
+    const auto hidlDst = NN_TRY(V1_0::utils::convert(dst));
+
+    const auto ret = kBuffer->copyTo(hidlDst);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "IBuffer::copyTo failed with " << toString(status);
+    }
+
+    return {};
+}
+
+nn::GeneralResult<void> Buffer::copyFrom(const nn::Memory& src,
+                                         const nn::Dimensions& dimensions) const {
+    const auto hidlSrc = NN_TRY(V1_0::utils::convert(src));
+    const auto hidlDimensions = hidl_vec<uint32_t>(dimensions);
+
+    const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "IBuffer::copyFrom failed with " << toString(status);
+    }
+
+    return {};
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
new file mode 100644
index 0000000..ff81275
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Callbacks.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.2/PreparedModel.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/TransferValue.h>
+
+#include <utility>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+namespace {
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<V1_0::IPreparedModel>& preparedModel) {
+    return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<V1_2::IPreparedModel>& preparedModel) {
+    return NN_TRY(V1_2::utils::PreparedModel::create(preparedModel));
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
+        const sp<IPreparedModel>& preparedModel) {
+    return NN_TRY(utils::PreparedModel::create(preparedModel));
+}
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
+                                     const V1_2::Timing& timing) {
+    return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
+                          NN_TRY(validatedConvertToCanonical(timing)));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionGeneralResults(const hidl_vec<V1_2::OutputShape>& outputShapes,
+                               const V1_2::Timing& timing) {
+    return hal::utils::makeExecutionFailure(
+            convertExecutionGeneralResultsHelper(outputShapes, timing));
+}
+
+}  // namespace
+
+Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_3(ErrorStatus status,
+                                               const sp<IPreparedModel>& preparedModel) {
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
+    } else if (preparedModel == nullptr) {
+        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                       << "Returned preparedModel is nullptr");
+    } else {
+        notifyInternal(convertPreparedModel(preparedModel));
+    }
+    return Void();
+}
+
+void PreparedModelCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+PreparedModelCallback::Data PreparedModelCallback::get() {
+    return mData.take();
+}
+
+void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+// ExecutionCallback methods begin here
+
+Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal({});
+    }
+    return Void();
+}
+
+Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
+                                           const hidl_vec<V1_2::OutputShape>& outputShapes,
+                                           const V1_2::Timing& timing) {
+    if (status != V1_0::ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
+    }
+    return Void();
+}
+
+Return<void> ExecutionCallback::notify_1_3(ErrorStatus status,
+                                           const hidl_vec<V1_2::OutputShape>& outputShapes,
+                                           const V1_2::Timing& timing) {
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
+    } else {
+        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
+    }
+    return Void();
+}
+
+void ExecutionCallback::notifyAsDeadObject() {
+    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+}
+
+ExecutionCallback::Data ExecutionCallback::get() {
+    return mData.take();
+}
+
+void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
+    mData.put(std::move(result));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 4c54e3b..0dc0785 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -27,6 +27,7 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <chrono>
@@ -79,7 +80,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
     std::vector<ConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
@@ -89,25 +90,25 @@
 }
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
     return convertVec(arguments);
 }
 
 }  // anonymous namespace
 
-Result<OperandType> convert(const hal::V1_3::OperandType& operandType) {
+GeneralResult<OperandType> convert(const hal::V1_3::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-Result<OperationType> convert(const hal::V1_3::OperationType& operationType) {
+GeneralResult<OperationType> convert(const hal::V1_3::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-Result<Priority> convert(const hal::V1_3::Priority& priority) {
+GeneralResult<Priority> convert(const hal::V1_3::Priority& priority) {
     return static_cast<Priority>(priority);
 }
 
-Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
+GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
@@ -115,13 +116,14 @@
                 return !maybeType.has_value() ? false : validOperandType(maybeType.value());
             });
     if (!validOperandTypes) {
-        return NN_ERROR()
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                << "Invalid OperandType when converting OperandPerformance in Capabilities";
     }
 
     auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance));
-    auto table =
-            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+    auto table = NN_TRY(hal::utils::makeGeneralFailure(
+            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
+            nn::ErrorStatus::GENERAL_FAILURE));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar =
@@ -134,7 +136,7 @@
     };
 }
 
-Result<Capabilities::OperandPerformance> convert(
+GeneralResult<Capabilities::OperandPerformance> convert(
         const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -142,7 +144,7 @@
     };
 }
 
-Result<Operation> convert(const hal::V1_3::Operation& operation) {
+GeneralResult<Operation> convert(const hal::V1_3::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -150,11 +152,11 @@
     };
 }
 
-Result<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
+GeneralResult<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
     return static_cast<Operand::LifeTime>(operandLifeTime);
 }
 
-Result<Operand> convert(const hal::V1_3::Operand& operand) {
+GeneralResult<Operand> convert(const hal::V1_3::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -166,7 +168,7 @@
     };
 }
 
-Result<Model> convert(const hal::V1_3::Model& model) {
+GeneralResult<Model> convert(const hal::V1_3::Model& model) {
     return Model{
             .main = NN_TRY(convert(model.main)),
             .referenced = NN_TRY(convert(model.referenced)),
@@ -177,7 +179,7 @@
     };
 }
 
-Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
+GeneralResult<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
     auto operations = NN_TRY(convert(subgraph.operations));
 
     // Verify number of consumers.
@@ -186,9 +188,10 @@
     CHECK(subgraph.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < subgraph.operands.size(); ++i) {
         if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) {
-            return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
-                              << numberOfConsumers[i] << " but found "
-                              << subgraph.operands[i].numberOfConsumers;
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Invalid numberOfConsumers for operand " << i << ", expected "
+                   << numberOfConsumers[i] << " but found "
+                   << subgraph.operands[i].numberOfConsumers;
         }
     }
 
@@ -200,11 +203,11 @@
     };
 }
 
-Result<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
+GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
     return BufferDesc{.dimensions = bufferDesc.dimensions};
 }
 
-Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
+GeneralResult<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
@@ -212,7 +215,7 @@
     };
 }
 
-Result<Request> convert(const hal::V1_3::Request& request) {
+GeneralResult<Request> convert(const hal::V1_3::Request& request) {
     return Request{
             .inputs = NN_TRY(convert(request.inputs)),
             .outputs = NN_TRY(convert(request.outputs)),
@@ -220,7 +223,7 @@
     };
 }
 
-Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
+GeneralResult<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
     using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
     switch (memoryPool.getDiscriminator()) {
         case Discriminator::hidlMemory:
@@ -228,15 +231,16 @@
         case Discriminator::token:
             return static_cast<Request::MemoryDomainToken>(memoryPool.token());
     }
-    return NN_ERROR() << "Invalid Request::MemoryPool discriminator "
-                      << underlyingType(memoryPool.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid Request::MemoryPool discriminator "
+           << underlyingType(memoryPool.getDiscriminator());
 }
 
-Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
+GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
     constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count();
-    const auto makeTimePoint = [](uint64_t count) -> Result<OptionalTimePoint> {
+    const auto makeTimePoint = [](uint64_t count) -> GeneralResult<OptionalTimePoint> {
         if (count > kTimePointMaxCount) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimePoint because the count exceeds the max";
         }
         const auto nanoseconds = std::chrono::nanoseconds{count};
@@ -250,16 +254,17 @@
         case Discriminator::nanosecondsSinceEpoch:
             return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch());
     }
-    return NN_ERROR() << "Invalid OptionalTimePoint discriminator "
-                      << underlyingType(optionalTimePoint.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid OptionalTimePoint discriminator "
+           << underlyingType(optionalTimePoint.getDiscriminator());
 }
 
-Result<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalTimeoutDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
     constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count();
-    const auto makeTimeoutDuration = [](uint64_t count) -> Result<OptionalTimeoutDuration> {
+    const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult<OptionalTimeoutDuration> {
         if (count > kTimeoutDurationMaxCount) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimeoutDuration because the count exceeds the max";
         }
         return TimeoutDuration{count};
@@ -272,11 +277,12 @@
         case Discriminator::nanoseconds:
             return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds());
     }
-    return NN_ERROR() << "Invalid OptionalTimeoutDuration discriminator "
-                      << underlyingType(optionalTimeoutDuration.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid OptionalTimeoutDuration discriminator "
+           << underlyingType(optionalTimeoutDuration.getDiscriminator());
 }
 
-Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
+GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
     switch (status) {
         case hal::V1_3::ErrorStatus::NONE:
         case hal::V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
@@ -289,10 +295,11 @@
         case hal::V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
             return static_cast<ErrorStatus>(status);
     }
-    return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid ErrorStatus " << underlyingType(status);
 }
 
-Result<std::vector<BufferRole>> convert(
+GeneralResult<std::vector<BufferRole>> convert(
         const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles) {
     return convertVec(bufferRoles);
 }
@@ -304,32 +311,32 @@
 
 using utils::convert;
 
-nn::Result<V1_0::PerformanceInfo> convert(
+nn::GeneralResult<V1_0::PerformanceInfo> convert(
         const nn::Capabilities::PerformanceInfo& performanceInfo) {
     return V1_0::utils::convert(performanceInfo);
 }
 
-nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
+nn::GeneralResult<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
     return V1_0::utils::convert(dataLocation);
 }
 
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
     return V1_0::utils::convert(operandValues);
 }
 
-nn::Result<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
     return V1_0::utils::convert(memory);
 }
 
-nn::Result<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
+nn::GeneralResult<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
     return V1_0::utils::convert(argument);
 }
 
-nn::Result<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
+nn::GeneralResult<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
     return V1_2::utils::convert(extraParams);
 }
 
-nn::Result<V1_2::Model::ExtensionNameAndPrefix> convert(
+nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> convert(
         const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return V1_2::utils::convert(extensionNameAndPrefix);
 }
@@ -338,7 +345,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
     hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(convert(arguments[i]));
@@ -347,42 +354,41 @@
 }
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
     return convertVec(arguments);
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
     Request::MemoryPool ret;
     ret.hidlMemory(NN_TRY(convert(memory)));
     return ret;
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
     Request::MemoryPool ret;
     ret.token(underlyingType(token));
     return ret;
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(
-        const std::shared_ptr<const nn::IBuffer>& /*buffer*/) {
-    return NN_ERROR() << "Unable to make memory pool from IBuffer";
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::SharedBuffer& /*buffer*/) {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to make memory pool from IBuffer";
 }
 
 }  // anonymous namespace
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType) {
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType) {
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-nn::Result<Priority> convert(const nn::Priority& priority) {
+nn::GeneralResult<Priority> convert(const nn::Priority& priority) {
     return static_cast<Priority>(priority);
 }
 
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
     std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
     operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
     std::copy_if(capabilities.operandPerformance.asVector().begin(),
@@ -403,7 +409,7 @@
     };
 }
 
-nn::Result<Capabilities::OperandPerformance> convert(
+nn::GeneralResult<Capabilities::OperandPerformance> convert(
         const nn::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -411,7 +417,7 @@
     };
 }
 
-nn::Result<Operation> convert(const nn::Operation& operation) {
+nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -419,14 +425,15 @@
     };
 }
 
-nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
+nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
     if (operandLifeTime == nn::Operand::LifeTime::POINTER) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
     return static_cast<OperandLifeTime>(operandLifeTime);
 }
 
-nn::Result<Operand> convert(const nn::Operand& operand) {
+nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -439,9 +446,10 @@
     };
 }
 
-nn::Result<Model> convert(const nn::Model& model) {
+nn::GeneralResult<Model> convert(const nn::Model& model) {
     if (!hal::utils::hasNoPointerData(model)) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
 
     return Model{
@@ -454,7 +462,7 @@
     };
 }
 
-nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
+nn::GeneralResult<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
     auto operands = NN_TRY(convert(subgraph.operands));
 
     // Update number of consumers.
@@ -473,11 +481,11 @@
     };
 }
 
-nn::Result<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
+nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
     return BufferDesc{.dimensions = bufferDesc.dimensions};
 }
 
-nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole) {
+nn::GeneralResult<BufferRole> convert(const nn::BufferRole& bufferRole) {
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
@@ -485,9 +493,10 @@
     };
 }
 
-nn::Result<Request> convert(const nn::Request& request) {
+nn::GeneralResult<Request> convert(const nn::Request& request) {
     if (!hal::utils::hasNoPointerData(request)) {
-        return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Request cannot be converted because it contains pointer-based memory";
     }
 
     return Request{
@@ -497,30 +506,31 @@
     };
 }
 
-nn::Result<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
+nn::GeneralResult<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
     return std::visit([](const auto& o) { return makeMemoryPool(o); }, memoryPool);
 }
 
-nn::Result<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
+nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
     OptionalTimePoint ret;
     if (optionalTimePoint.has_value()) {
         const auto count = optionalTimePoint.value().time_since_epoch().count();
         if (count < 0) {
-            return NN_ERROR() << "Unable to convert OptionalTimePoint because time since epoch "
-                                 "count is negative";
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Unable to convert OptionalTimePoint because time since epoch count is "
+                      "negative";
         }
         ret.nanosecondsSinceEpoch(count);
     }
     return ret;
 }
 
-nn::Result<OptionalTimeoutDuration> convert(
+nn::GeneralResult<OptionalTimeoutDuration> convert(
         const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
     OptionalTimeoutDuration ret;
     if (optionalTimeoutDuration.has_value()) {
         const auto count = optionalTimeoutDuration.value().count();
         if (count < 0) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimeoutDuration because count is negative";
         }
         ret.nanoseconds(count);
@@ -528,7 +538,7 @@
     return ret;
 }
 
-nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
+nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
     switch (errorStatus) {
         case nn::ErrorStatus::NONE:
         case nn::ErrorStatus::DEVICE_UNAVAILABLE:
@@ -545,7 +555,7 @@
     }
 }
 
-nn::Result<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
+nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
     return convertVec(bufferRoles);
 }
 
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
new file mode 100644
index 0000000..c215f39
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Buffer.h"
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.2/Device.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <any>
+#include <functional>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+namespace {
+
+nn::GeneralResult<hidl_vec<sp<IPreparedModel>>> convert(
+        const std::vector<nn::SharedPreparedModel>& preparedModels) {
+    hidl_vec<sp<IPreparedModel>> hidlPreparedModels(preparedModels.size());
+    for (size_t i = 0; i < preparedModels.size(); ++i) {
+        std::any underlyingResource = preparedModels[i]->getUnderlyingResource();
+        if (const auto* hidlPreparedModel =
+                    std::any_cast<sp<IPreparedModel>>(&underlyingResource)) {
+            hidlPreparedModels[i] = *hidlPreparedModel;
+        } else {
+            return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+                   << "Unable to convert from nn::IPreparedModel to V1_3::IPreparedModel";
+        }
+    }
+    return hidlPreparedModels;
+}
+
+nn::GeneralResult<nn::SharedBuffer> convert(
+        nn::GeneralResult<std::shared_ptr<const Buffer>> result) {
+    return NN_TRY(std::move(result));
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
+                                                                sp<V1_3::IDevice> device) {
+    if (name.empty()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_3::utils::Device::create must have non-empty name";
+    }
+    if (device == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_3::utils::Device::create must have non-null device";
+    }
+
+    auto versionString = NN_TRY(V1_2::utils::initVersionString(device.get()));
+    const auto deviceType = NN_TRY(V1_2::utils::initDeviceType(device.get()));
+    auto extensions = NN_TRY(V1_2::utils::initExtensions(device.get()));
+    auto capabilities = NN_TRY(V1_2::utils::initCapabilities(device.get()));
+    const auto numberOfCacheFilesNeeded =
+            NN_TRY(V1_2::utils::initNumberOfCacheFilesNeeded(device.get()));
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
+    return std::make_shared<const Device>(
+            PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
+            std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
+            std::move(device), std::move(deathHandler));
+}
+
+Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
+               nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
+               nn::Capabilities capabilities,
+               std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, sp<V1_3::IDevice> device,
+               hal::utils::DeathHandler deathHandler)
+    : kName(std::move(name)),
+      kVersionString(std::move(versionString)),
+      kDeviceType(deviceType),
+      kExtensions(std::move(extensions)),
+      kCapabilities(std::move(capabilities)),
+      kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded),
+      kDevice(std::move(device)),
+      kDeathHandler(std::move(deathHandler)) {}
+
+const std::string& Device::getName() const {
+    return kName;
+}
+
+const std::string& Device::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version Device::getFeatureLevel() const {
+    return nn::Version::ANDROID_R;
+}
+
+nn::DeviceType Device::getType() const {
+    return kDeviceType;
+}
+
+const std::vector<nn::Extension>& Device::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& Device::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const {
+    return kNumberOfCacheFilesNeeded;
+}
+
+nn::GeneralResult<void> Device::wait() const {
+    const auto ret = kDevice->ping();
+    return hal::utils::handleTransportError(ret);
+}
+
+nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Model& model) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+
+    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                  << "uninitialized";
+    auto cb = [&result, &model](ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical)
+                     << "IDevice::getSupportedOperations_1_3 failed with " << toString(status);
+        } else if (supportedOperations.size() != model.main.operations.size()) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                     << "IDevice::getSupportedOperations_1_3 returned vector of size "
+                     << supportedOperations.size() << " but expected "
+                     << model.main.operations.size();
+        } else {
+            result = supportedOperations;
+        }
+    };
+
+    const auto ret = kDevice->getSupportedOperations_1_3(hidlModel, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
+        const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    // Ensure that model is ready for IPC.
+    std::optional<nn::Model> maybeModelInShared;
+    const nn::Model& modelInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
+
+    const auto hidlModel = NN_TRY(convert(modelInShared));
+    const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+    const auto hidlPriority = NN_TRY(convert(priority));
+    const auto hidlDeadline = NN_TRY(convert(deadline));
+    const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
+    const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
+    const auto hidlToken = token;
+
+    const auto cb = sp<PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret =
+            kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
+                                      hidlModelCache, hidlDataCache, hidlToken, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModel_1_3 failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
+        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    const auto hidlDeadline = NN_TRY(convert(deadline));
+    const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
+    const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
+    const auto hidlToken = token;
+
+    const auto cb = sp<PreparedModelCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
+                                                        hidlToken, cb);
+    const auto status = NN_TRY(hal::utils::handleTransportError(ret));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "prepareModelFromCache_1_3 failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::GeneralResult<nn::SharedBuffer> Device::allocate(
+        const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+        const std::vector<nn::BufferRole>& inputRoles,
+        const std::vector<nn::BufferRole>& outputRoles) const {
+    const auto hidlDesc = NN_TRY(convert(desc));
+    const auto hidlPreparedModels = NN_TRY(convert(preparedModels));
+    const auto hidlInputRoles = NN_TRY(convert(inputRoles));
+    const auto hidlOutputRoles = NN_TRY(convert(outputRoles));
+
+    nn::GeneralResult<nn::SharedBuffer> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                                                 << "uninitialized";
+    auto cb = [&result](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "IDevice::allocate failed with " << toString(status);
+        } else if (buffer == nullptr) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned buffer is nullptr";
+        } else if (token == 0) {
+            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Returned token is invalid (0)";
+        } else {
+            result = convert(
+                    Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token)));
+        }
+    };
+
+    const auto ret =
+            kDevice->allocate(hidlDesc, hidlPreparedModels, hidlInputRoles, hidlOutputRoles, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+
+    return result;
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
new file mode 100644
index 0000000..df9b280
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+namespace {
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+convertExecutionResultsHelper(const hidl_vec<V1_2::OutputShape>& outputShapes,
+                              const V1_2::Timing& timing) {
+    return std::make_pair(NN_TRY(validatedConvertToCanonical(outputShapes)),
+                          NN_TRY(validatedConvertToCanonical(timing)));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
+        const hidl_vec<V1_2::OutputShape>& outputShapes, const V1_2::Timing& timing) {
+    return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& syncFences) {
+    hidl_vec<hidl_handle> handles(syncFences.size());
+    for (size_t i = 0; i < syncFences.size(); ++i) {
+        handles[i] = NN_TRY(V1_2::utils::convert(syncFences[i].getHandle()));
+    }
+    return handles;
+}
+
+nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
+        const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
+    return std::make_pair(NN_TRY(validatedConvertToCanonical(timingLaunched)),
+                          NN_TRY(validatedConvertToCanonical(timingFenced)));
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+convertExecuteFencedResults(const hidl_handle& syncFence,
+                            const sp<IFencedExecutionCallback>& callback) {
+    auto resultSyncFence = nn::SyncFence::createAsSignaled();
+    if (syncFence.getNativeHandle() != nullptr) {
+        auto nativeHandle = NN_TRY(validatedConvertToCanonical(syncFence));
+        resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure(
+                nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::GENERAL_FAILURE));
+    }
+
+    if (callback == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
+    }
+
+    // Create callback which can be used to retrieve the execution error status and timings.
+    nn::ExecuteFencedInfoCallback resultCallback =
+            [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
+        nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> result =
+                NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+        auto cb = [&result](ErrorStatus status, const V1_2::Timing& timingLaunched,
+                            const V1_2::Timing& timingFenced) {
+            if (status != ErrorStatus::NONE) {
+                const auto canonical = validatedConvertToCanonical(status).value_or(
+                        nn::ErrorStatus::GENERAL_FAILURE);
+                result = NN_ERROR(canonical) << "getExecutionInfo failed with " << toString(status);
+            } else {
+                result = convertFencedExecutionCallbackResults(timingLaunched, timingFenced);
+            }
+        };
+
+        const auto ret = callback->getExecutionInfo(cb);
+        NN_TRY(hal::utils::handleTransportError(ret));
+
+        return result;
+    };
+
+    return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
+        sp<V1_3::IPreparedModel> preparedModel) {
+    if (preparedModel == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "V1_3::utils::PreparedModel::create must have non-null preparedModel";
+    }
+
+    auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
+                                                 std::move(deathHandler));
+}
+
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_3::IPreparedModel> preparedModel,
+                             hal::utils::DeathHandler deathHandler)
+    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeSynchronously(const Request& request, V1_2::MeasureTiming measure,
+                                    const OptionalTimePoint& deadline,
+                                    const OptionalTimeoutDuration& loopTimeoutDuration) const {
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const auto cb = [&result](ErrorStatus status, const hidl_vec<V1_2::OutputShape>& outputShapes,
+                              const V1_2::Timing& timing) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
+        } else {
+            result = convertExecutionResults(outputShapes, timing);
+        }
+    };
+
+    const auto ret = kPreparedModel->executeSynchronously_1_3(request, measure, deadline,
+                                                              loopTimeoutDuration, cb);
+    NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+
+    return result;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming measure,
+                                     const OptionalTimePoint& deadline,
+                                     const OptionalTimeoutDuration& loopTimeoutDuration) const {
+    const auto cb = sp<ExecutionCallback>::make();
+    const auto scoped = kDeathHandler.protectCallback(cb.get());
+
+    const auto ret =
+            kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
+    const auto status =
+            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::handleTransportError(ret)));
+    if (status != ErrorStatus::NONE) {
+        const auto canonical =
+                validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+        return NN_ERROR(canonical) << "executeAsynchronously failed with " << toString(status);
+    }
+
+    return cb->get();
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline,
+        const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+    // Ensure that request is ready for IPC.
+    std::optional<nn::Request> maybeRequestInShared;
+    const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+
+    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+    const auto hidlMeasure =
+            NN_TRY(hal::utils::makeExecutionFailure(V1_2::utils::convert(measure)));
+    const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+    const auto hidlLoopTimeoutDuration =
+            NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    const bool preferSynchronous = true;
+
+    // Execute synchronously if allowed.
+    if (preferSynchronous) {
+        result = executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+                                      hidlLoopTimeoutDuration);
+    }
+
+    // Run asymchronous execution if execution has not already completed.
+    if (!result.has_value()) {
+        result = executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
+                                       hidlLoopTimeoutDuration);
+    }
+
+    // Flush output buffers if suxcessful execution.
+    if (result.has_value()) {
+        NN_TRY(hal::utils::makeExecutionFailure(
+                hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
+    }
+
+    return result;
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+                             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+                             const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+                             const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+    // Ensure that request is ready for IPC.
+    std::optional<nn::Request> maybeRequestInShared;
+    const nn::Request& requestInShared =
+            NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
+
+    const auto hidlRequest = NN_TRY(convert(requestInShared));
+    const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
+    const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
+    const auto hidlDeadline = NN_TRY(convert(deadline));
+    const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+    const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> result =
+            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
+    auto cb = [&result](ErrorStatus status, const hidl_handle& syncFence,
+                        const sp<IFencedExecutionCallback>& callback) {
+        if (status != ErrorStatus::NONE) {
+            const auto canonical =
+                    validatedConvertToCanonical(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
+            result = NN_ERROR(canonical) << "executeFenced failed with " << toString(status);
+        } else {
+            result = convertExecuteFencedResults(syncFence, callback);
+        }
+    };
+
+    const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
+                                                   hidlDeadline, hidlLoopTimeoutDuration,
+                                                   hidlTimeoutDurationAfterFence, cb);
+    NN_TRY(hal::utils::handleTransportError(ret));
+    auto [syncFence, callback] = NN_TRY(std::move(result));
+
+    // If executeFenced required the request memory to be moved into shared memory, block here until
+    // the fenced execution has completed and flush the memory back.
+    if (maybeRequestInShared.has_value()) {
+        const auto state = syncFence.syncWait({});
+        if (state != nn::SyncFence::FenceState::SIGNALED) {
+            return NN_ERROR() << "syncWait failed with " << state;
+        }
+        NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
+    }
+
+    return std::make_pair(std::move(syncFence), std::move(callback));
+}
+
+std::any PreparedModel::getUnderlyingResource() const {
+    sp<V1_3::IPreparedModel> resource = kPreparedModel;
+    return resource;
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Service.cpp b/neuralnetworks/1.3/utils/src/Service.cpp
new file mode 100644
index 0000000..62887fb
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Service.h"
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientDevice.h>
+#include <string>
+#include "Device.h"
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& name) {
+    hal::utils::ResilientDevice::Factory makeDevice =
+            [name](bool blocking) -> nn::GeneralResult<nn::SharedDevice> {
+        auto service = blocking ? IDevice::getService(name) : IDevice::tryGetService(name);
+        if (service == nullptr) {
+            return NN_ERROR() << (blocking ? "getService" : "tryGetService") << " returned nullptr";
+        }
+        return Device::create(name, std::move(service));
+    };
+
+    return hal::utils::ResilientDevice::create(std::move(makeDevice));
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index b61dc97..21562cf 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -20,6 +20,7 @@
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal"],
     export_include_dirs: ["include"],
+    cflags: ["-Wthread-safety"],
     static_libs: [
         "neuralnetworks_types",
     ],
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 8c01368..254a3d4 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -19,6 +19,7 @@
 
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <functional>
 #include <vector>
 
 // Shorthand
@@ -42,14 +43,16 @@
 bool hasNoPointerData(const nn::Request& request);
 
 // Relocate pointer-based data to shared memory.
-nn::Result<nn::Model> flushDataFromPointerToShared(const nn::Model& model);
-nn::Result<nn::Request> flushDataFromPointerToShared(const nn::Request& request);
+nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
+        const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
+        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
 
 // Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
 // `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
 // Request object back to the output pointer-based memory in the original Request object.
-nn::Result<void> unflushDataFromSharedToPointer(const nn::Request& request,
-                                                const nn::Request& requestInShared);
+nn::GeneralResult<void> unflushDataFromSharedToPointer(
+        const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
 
 std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
                                              const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
new file mode 100644
index 0000000..e4046b5
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hidl/base/1.0/IBase.h>
+#include <hidl/HidlSupport.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+namespace android::hardware::neuralnetworks::utils {
+
+template <typename Type>
+nn::GeneralResult<Type> handleTransportError(const hardware::Return<Type>& ret) {
+    if (ret.isDeadObject()) {
+        return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
+               << "Return<>::isDeadObject returned true: " << ret.description();
+    }
+    if (!ret.isOk()) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+               << "Return<>::isOk returned false: " << ret.description();
+    }
+    return ret;
+}
+
+template <>
+inline nn::GeneralResult<void> handleTransportError(const hardware::Return<void>& ret) {
+    if (ret.isDeadObject()) {
+        return NN_ERROR(nn::ErrorStatus::DEAD_OBJECT)
+               << "Return<>::isDeadObject returned true: " << ret.description();
+    }
+    if (!ret.isOk()) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+               << "Return<>::isOk returned false: " << ret.description();
+    }
+    return {};
+}
+
+template <typename Type>
+nn::GeneralResult<Type> makeGeneralFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+    if (!result.has_value()) {
+        return nn::error(status) << std::move(result).error();
+    }
+    return std::move(result).value();
+}
+
+template <>
+inline nn::GeneralResult<void> makeGeneralFailure(nn::Result<void> result, nn::ErrorStatus status) {
+    if (!result.has_value()) {
+        return nn::error(status) << std::move(result).error();
+    }
+    return {};
+}
+
+template <typename Type>
+nn::ExecutionResult<Type> makeExecutionFailure(nn::Result<Type> result, nn::ErrorStatus status) {
+    if (!result.has_value()) {
+        return nn::error(status) << std::move(result).error();
+    }
+    return std::move(result).value();
+}
+
+template <>
+inline nn::ExecutionResult<void> makeExecutionFailure(nn::Result<void> result,
+                                                      nn::ErrorStatus status) {
+    if (!result.has_value()) {
+        return nn::error(status) << std::move(result).error();
+    }
+    return {};
+}
+
+template <typename Type>
+nn::ExecutionResult<Type> makeExecutionFailure(nn::GeneralResult<Type> result) {
+    if (!result.has_value()) {
+        const auto [message, status] = std::move(result).error();
+        return nn::error(status) << message;
+    }
+    return std::move(result).value();
+}
+
+template <>
+inline nn::ExecutionResult<void> makeExecutionFailure(nn::GeneralResult<void> result) {
+    if (!result.has_value()) {
+        const auto [message, status] = std::move(result).error();
+        return nn::error(status) << message;
+    }
+    return {};
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
\ No newline at end of file
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
new file mode 100644
index 0000000..85bd613
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
+
+#include <android-base/scopeguard.h>
+#include <android-base/thread_annotations.h>
+#include <android/hidl/base/1.0/IBase.h>
+#include <hidl/HidlSupport.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <mutex>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class IProtectedCallback {
+  public:
+    /**
+     * Marks this object as a dead object.
+     */
+    virtual void notifyAsDeadObject() = 0;
+
+    // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers.
+    // E.g., std::unique_ptr<IProtectedCallback>.
+    virtual ~IProtectedCallback() = default;
+
+  protected:
+    // Protect the non-destructor special member functions to prevent object slicing.
+    IProtectedCallback() = default;
+    IProtectedCallback(const IProtectedCallback&) = default;
+    IProtectedCallback(IProtectedCallback&&) noexcept = default;
+    IProtectedCallback& operator=(const IProtectedCallback&) = default;
+    IProtectedCallback& operator=(IProtectedCallback&&) noexcept = default;
+};
+
+// Thread safe class
+class DeathRecipient final : public hidl_death_recipient {
+  public:
+    void serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) override;
+    // Precondition: `killable` must be non-null.
+    void add(IProtectedCallback* killable) const;
+    // Precondition: `killable` must be non-null.
+    void remove(IProtectedCallback* killable) const;
+
+  private:
+    mutable std::mutex mMutex;
+    mutable std::vector<IProtectedCallback*> mObjects GUARDED_BY(mMutex);
+};
+
+class DeathHandler final {
+  public:
+    static nn::GeneralResult<DeathHandler> create(sp<hidl::base::V1_0::IBase> object);
+
+    DeathHandler(const DeathHandler&) = delete;
+    DeathHandler(DeathHandler&&) noexcept = default;
+    DeathHandler& operator=(const DeathHandler&) = delete;
+    DeathHandler& operator=(DeathHandler&&) noexcept = delete;
+    ~DeathHandler();
+
+    using Cleanup = std::function<void()>;
+    // Precondition: `killable` must be non-null.
+    [[nodiscard]] base::ScopeGuard<Cleanup> protectCallback(IProtectedCallback* killable) const;
+
+  private:
+    DeathHandler(sp<hidl::base::V1_0::IBase> object, sp<DeathRecipient> deathRecipient);
+
+    sp<hidl::base::V1_0::IBase> kObject;
+    sp<DeathRecipient> kDeathRecipient;
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
new file mode 100644
index 0000000..996ec1e
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBuffer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientBuffer final : public nn::IBuffer {
+    struct PrivateConstructorTag {};
+
+  public:
+    using Factory = std::function<nn::GeneralResult<nn::SharedBuffer>(bool blocking)>;
+
+    static nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> create(Factory makeBuffer);
+
+    explicit ResilientBuffer(PrivateConstructorTag tag, Factory makeBuffer,
+                             nn::SharedBuffer buffer);
+
+    nn::SharedBuffer getBuffer() const;
+    nn::SharedBuffer recover(const nn::IBuffer* failingBuffer, bool blocking) const;
+
+    nn::Request::MemoryDomainToken getToken() const override;
+
+    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
+
+    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+                                     const nn::Dimensions& dimensions) const override;
+
+  private:
+    const Factory kMakeBuffer;
+    mutable std::mutex mMutex;
+    mutable nn::SharedBuffer mBuffer GUARDED_BY(mMutex);
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_BUFFER_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
new file mode 100644
index 0000000..4f1afb9
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientDevice final : public nn::IDevice,
+                              public std::enable_shared_from_this<ResilientDevice> {
+    struct PrivateConstructorTag {};
+
+  public:
+    using Factory = std::function<nn::GeneralResult<nn::SharedDevice>(bool blocking)>;
+
+    static nn::GeneralResult<std::shared_ptr<const ResilientDevice>> create(Factory makeDevice);
+
+    explicit ResilientDevice(PrivateConstructorTag tag, Factory makeDevice, std::string name,
+                             std::string versionString, std::vector<nn::Extension> extensions,
+                             nn::Capabilities capabilities, nn::SharedDevice device);
+
+    nn::SharedDevice getDevice() const;
+    nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const;
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
+            bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
+            nn::Priority priority, nn::OptionalTimePoint deadline,
+            const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
+            bool blocking, nn::OptionalTimePoint deadline,
+            const std::vector<nn::NativeHandle>& modelCache,
+            const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+    nn::GeneralResult<nn::SharedBuffer> allocateInternal(
+            bool blocking, const nn::BufferDesc& desc,
+            const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const;
+
+    const Factory kMakeDevice;
+    const std::string kName;
+    const std::string kVersionString;
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    mutable std::mutex mMutex;
+    mutable nn::SharedDevice mDevice GUARDED_BY(mMutex);
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_DEVICE_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
new file mode 100644
index 0000000..c2940d1
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientPreparedModel final : public nn::IPreparedModel {
+    struct PrivateConstructorTag {};
+
+  public:
+    using Factory = std::function<nn::GeneralResult<nn::SharedPreparedModel>(bool blocking)>;
+
+    static nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> create(
+            Factory makePreparedModel);
+
+    explicit ResilientPreparedModel(PrivateConstructorTag tag, Factory makePreparedModel,
+                                    nn::SharedPreparedModel preparedModel);
+
+    nn::SharedPreparedModel getPreparedModel() const;
+    nn::SharedPreparedModel recover(const nn::IPreparedModel* failingPreparedModel,
+                                    bool blocking) const;
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
+            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+
+    std::any getUnderlyingResource() const override;
+
+  private:
+    const Factory kMakePreparedModel;
+    mutable std::mutex mMutex;
+    mutable nn::SharedPreparedModel mPreparedModel GUARDED_BY(mMutex);
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_PREPARED_MODEL_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h
new file mode 100644
index 0000000..7103c6b
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/TransferValue.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
+
+#include <android-base/thread_annotations.h>
+
+#include <condition_variable>
+#include <mutex>
+#include <optional>
+
+namespace android::hardware::neuralnetworks::utils {
+
+// This class is thread safe.
+template <typename Type>
+class TransferValue final {
+  public:
+    void put(Type object) const;
+    [[nodiscard]] Type take() const;
+
+  private:
+    mutable std::mutex mMutex;
+    mutable std::condition_variable mCondition;
+    mutable std::optional<Type> mObject GUARDED_BY(mMutex);
+};
+
+// template implementation
+
+template <typename Type>
+void TransferValue<Type>::put(Type object) const {
+    {
+        std::lock_guard guard(mMutex);
+        // Immediately return if value already exists.
+        if (mObject.has_value()) return;
+        mObject.emplace(std::move(object));
+    }
+    mCondition.notify_all();
+}
+
+template <typename Type>
+Type TransferValue<Type>::take() const {
+    std::unique_lock lock(mMutex);
+    base::ScopedLockAssertion lockAssertion(mMutex);
+    mCondition.wait(lock, [this]() REQUIRES(mMutex) { return mObject.has_value(); });
+    std::optional<Type> object;
+    std::swap(object, mObject);
+    return std::move(object).value();
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_TRANSFER_VALUE_H
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 667189b..2565972 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -16,6 +16,8 @@
 
 #include "CommonUtils.h"
 
+#include "HandleError.h"
+
 #include <android-base/logging.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
@@ -25,6 +27,7 @@
 
 #include <algorithm>
 #include <any>
+#include <functional>
 #include <optional>
 #include <variant>
 #include <vector>
@@ -111,8 +114,18 @@
     return hasNoPointerData(request.inputs) && hasNoPointerData(request.outputs);
 }
 
-nn::Result<nn::Model> flushDataFromPointerToShared(const nn::Model& model) {
-    auto modelInShared = model;
+nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
+        const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut) {
+    CHECK(model != nullptr);
+    CHECK(maybeModelInSharedOut != nullptr);
+
+    if (hasNoPointerData(*model)) {
+        return *model;
+    }
+
+    // Make a copy of the model in order to make modifications. The modified model is returned to
+    // the caller through `maybeModelInSharedOut` if the function succeeds.
+    nn::Model modelInShared = *model;
 
     nn::ConstantMemoryBuilder memoryBuilder(modelInShared.pools.size());
     copyPointersToSharedMemory(&modelInShared.main, &memoryBuilder);
@@ -126,11 +139,22 @@
         modelInShared.pools.push_back(std::move(memory));
     }
 
-    return modelInShared;
+    *maybeModelInSharedOut = modelInShared;
+    return **maybeModelInSharedOut;
 }
 
-nn::Result<nn::Request> flushDataFromPointerToShared(const nn::Request& request) {
-    auto requestInShared = request;
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
+        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
+    CHECK(request != nullptr);
+    CHECK(maybeRequestInSharedOut != nullptr);
+
+    if (hasNoPointerData(*request)) {
+        return *request;
+    }
+
+    // Make a copy of the request in order to make modifications. The modified request is returned
+    // to the caller through `maybeRequestInSharedOut` if the function succeeds.
+    nn::Request requestInShared = *request;
 
     // Change input pointers to shared memory.
     nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
@@ -171,15 +195,17 @@
         requestInShared.pools.push_back(std::move(memory));
     }
 
-    return requestInShared;
+    *maybeRequestInSharedOut = requestInShared;
+    return **maybeRequestInSharedOut;
 }
 
-nn::Result<void> unflushDataFromSharedToPointer(const nn::Request& request,
-                                                const nn::Request& requestInShared) {
-    if (requestInShared.pools.empty() ||
-        !std::holds_alternative<nn::Memory>(requestInShared.pools.back())) {
+nn::GeneralResult<void> unflushDataFromSharedToPointer(
+        const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
+    if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
+        !std::holds_alternative<nn::Memory>(maybeRequestInShared->pools.back())) {
         return {};
     }
+    const auto& requestInShared = *maybeRequestInShared;
 
     // Map the memory.
     const auto& outputMemory = std::get<nn::Memory>(requestInShared.pools.back());
diff --git a/neuralnetworks/utils/common/src/ProtectCallback.cpp b/neuralnetworks/utils/common/src/ProtectCallback.cpp
new file mode 100644
index 0000000..1d9a307
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ProtectCallback.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ProtectCallback.h"
+
+#include <android-base/logging.h>
+#include <android-base/scopeguard.h>
+#include <android-base/thread_annotations.h>
+#include <android/hidl/base/1.0/IBase.h>
+#include <hidl/HidlSupport.h>
+#include <nnapi/Result.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <algorithm>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+void DeathRecipient::serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) {
+    std::lock_guard guard(mMutex);
+    std::for_each(mObjects.begin(), mObjects.end(),
+                  [](IProtectedCallback* killable) { killable->notifyAsDeadObject(); });
+}
+
+void DeathRecipient::add(IProtectedCallback* killable) const {
+    CHECK(killable != nullptr);
+    std::lock_guard guard(mMutex);
+    mObjects.push_back(killable);
+}
+
+void DeathRecipient::remove(IProtectedCallback* killable) const {
+    CHECK(killable != nullptr);
+    std::lock_guard guard(mMutex);
+    const auto removedIter = std::remove(mObjects.begin(), mObjects.end(), killable);
+    mObjects.erase(removedIter);
+}
+
+nn::GeneralResult<DeathHandler> DeathHandler::create(sp<hidl::base::V1_0::IBase> object) {
+    if (object == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "utils::DeathHandler::create must have non-null object";
+    }
+    auto deathRecipient = sp<DeathRecipient>::make();
+
+    const auto ret = object->linkToDeath(deathRecipient, /*cookie=*/0);
+    const bool success = NN_TRY(handleTransportError(ret));
+    if (!success) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IBase::linkToDeath returned false";
+    }
+
+    return DeathHandler(std::move(object), std::move(deathRecipient));
+}
+
+DeathHandler::DeathHandler(sp<hidl::base::V1_0::IBase> object, sp<DeathRecipient> deathRecipient)
+    : kObject(std::move(object)), kDeathRecipient(std::move(deathRecipient)) {
+    CHECK(kObject != nullptr);
+    CHECK(kDeathRecipient != nullptr);
+}
+
+DeathHandler::~DeathHandler() {
+    if (kObject != nullptr && kDeathRecipient != nullptr) {
+        const auto ret = kObject->unlinkToDeath(kDeathRecipient);
+        const auto maybeSuccess = handleTransportError(ret);
+        if (!maybeSuccess.has_value()) {
+            LOG(ERROR) << maybeSuccess.error().message;
+        } else if (!maybeSuccess.value()) {
+            LOG(ERROR) << "IBase::linkToDeath returned false";
+        }
+    }
+}
+
+[[nodiscard]] base::ScopeGuard<DeathHandler::Cleanup> DeathHandler::protectCallback(
+        IProtectedCallback* killable) const {
+    CHECK(killable != nullptr);
+    kDeathRecipient->add(killable);
+    return base::make_scope_guard(
+            [deathRecipient = kDeathRecipient, killable] { deathRecipient->remove(killable); });
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientBuffer.cpp b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
new file mode 100644
index 0000000..984295b
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientBuffer.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientBuffer.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::GeneralResult<std::shared_ptr<const ResilientBuffer>> ResilientBuffer::create(
+        Factory makeBuffer) {
+    if (makeBuffer == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "utils::ResilientBuffer::create must have non-empty makeBuffer";
+    }
+    auto buffer = NN_TRY(makeBuffer(/*blocking=*/true));
+    CHECK(buffer != nullptr);
+    return std::make_shared<const ResilientBuffer>(PrivateConstructorTag{}, std::move(makeBuffer),
+                                                   std::move(buffer));
+}
+
+ResilientBuffer::ResilientBuffer(PrivateConstructorTag /*tag*/, Factory makeBuffer,
+                                 nn::SharedBuffer buffer)
+    : kMakeBuffer(std::move(makeBuffer)), mBuffer(std::move(buffer)) {
+    CHECK(kMakeBuffer != nullptr);
+    CHECK(mBuffer != nullptr);
+}
+
+nn::SharedBuffer ResilientBuffer::getBuffer() const {
+    std::lock_guard guard(mMutex);
+    return mBuffer;
+}
+nn::SharedBuffer ResilientBuffer::recover(const nn::IBuffer* /*failingBuffer*/,
+                                          bool /*blocking*/) const {
+    std::lock_guard guard(mMutex);
+    return mBuffer;
+}
+
+nn::Request::MemoryDomainToken ResilientBuffer::getToken() const {
+    return getBuffer()->getToken();
+}
+
+nn::GeneralResult<void> ResilientBuffer::copyTo(const nn::Memory& dst) const {
+    return getBuffer()->copyTo(dst);
+}
+
+nn::GeneralResult<void> ResilientBuffer::copyFrom(const nn::Memory& src,
+                                                  const nn::Dimensions& dimensions) const {
+    return getBuffer()->copyFrom(src, dimensions);
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
new file mode 100644
index 0000000..95662d9
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientDevice.h"
+
+#include "ResilientBuffer.h"
+#include "ResilientPreparedModel.h"
+
+#include <android-base/logging.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientDevice& resilientDevice, const FnType& fn, bool blocking)
+        -> decltype(fn(*resilientDevice.getDevice())) {
+    auto device = resilientDevice.getDevice();
+    auto result = fn(*device);
+
+    // Immediately return if device is not dead.
+    if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+        return result;
+    }
+
+    device = resilientDevice.recover(device.get(), blocking);
+    return fn(*device);
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientDevice>> ResilientDevice::create(
+        Factory makeDevice) {
+    if (makeDevice == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "utils::ResilientDevice::create must have non-empty makeDevice";
+    }
+    auto device = NN_TRY(makeDevice(/*blocking=*/true));
+    CHECK(device != nullptr);
+
+    auto name = device->getName();
+    auto versionString = device->getVersionString();
+    auto extensions = device->getSupportedExtensions();
+    auto capabilities = device->getCapabilities();
+
+    return std::make_shared<ResilientDevice>(PrivateConstructorTag{}, std::move(makeDevice),
+                                             std::move(name), std::move(versionString),
+                                             std::move(extensions), std::move(capabilities),
+                                             std::move(device));
+}
+
+ResilientDevice::ResilientDevice(PrivateConstructorTag /*tag*/, Factory makeDevice,
+                                 std::string name, std::string versionString,
+                                 std::vector<nn::Extension> extensions,
+                                 nn::Capabilities capabilities, nn::SharedDevice device)
+    : kMakeDevice(std::move(makeDevice)),
+      kName(std::move(name)),
+      kVersionString(std::move(versionString)),
+      kExtensions(std::move(extensions)),
+      kCapabilities(std::move(capabilities)),
+      mDevice(std::move(device)) {
+    CHECK(kMakeDevice != nullptr);
+    CHECK(mDevice != nullptr);
+}
+
+nn::SharedDevice ResilientDevice::getDevice() const {
+    std::lock_guard guard(mMutex);
+    return mDevice;
+}
+
+nn::SharedDevice ResilientDevice::recover(const nn::IDevice* failingDevice, bool blocking) const {
+    std::lock_guard guard(mMutex);
+
+    // Another caller updated the failing device.
+    if (mDevice.get() != failingDevice) {
+        return mDevice;
+    }
+
+    auto maybeDevice = kMakeDevice(blocking);
+    if (!maybeDevice.has_value()) {
+        const auto& [message, code] = maybeDevice.error();
+        LOG(ERROR) << "Failed to recover dead device with error " << code << ": " << message;
+        return mDevice;
+    }
+    auto device = std::move(maybeDevice).value();
+
+    // TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an
+    // invalid/"null" IDevice object that always fails.
+    CHECK_EQ(kName, device->getName());
+    CHECK_EQ(kVersionString, device->getVersionString());
+    CHECK(kExtensions == device->getSupportedExtensions());
+    CHECK_EQ(kCapabilities, device->getCapabilities());
+
+    mDevice = std::move(device);
+    return mDevice;
+}
+
+const std::string& ResilientDevice::getName() const {
+    return kName;
+}
+
+const std::string& ResilientDevice::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version ResilientDevice::getFeatureLevel() const {
+    return getDevice()->getFeatureLevel();
+}
+
+nn::DeviceType ResilientDevice::getType() const {
+    return getDevice()->getType();
+}
+
+const std::vector<nn::Extension>& ResilientDevice::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& ResilientDevice::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> ResilientDevice::getNumberOfCacheFilesNeeded() const {
+    return getDevice()->getNumberOfCacheFilesNeeded();
+}
+
+nn::GeneralResult<void> ResilientDevice::wait() const {
+    const auto fn = [](const nn::IDevice& device) { return device.wait(); };
+    return protect(*this, fn, /*blocking=*/true);
+}
+
+nn::GeneralResult<std::vector<bool>> ResilientDevice::getSupportedOperations(
+        const nn::Model& model) const {
+    const auto fn = [&model](const nn::IDevice& device) {
+        return device.getSupportedOperations(model);
+    };
+    return protect(*this, fn, /*blocking=*/false);
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
+        const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    auto self = shared_from_this();
+    ResilientPreparedModel::Factory makePreparedModel =
+            [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
+             token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
+        return device->prepareModelInternal(blocking, model, preference, priority, deadline,
+                                            modelCache, dataCache, token);
+    };
+    return ResilientPreparedModel::create(std::move(makePreparedModel));
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
+        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    auto self = shared_from_this();
+    ResilientPreparedModel::Factory makePreparedModel =
+            [device = std::move(self), deadline, modelCache, dataCache,
+             token](bool blocking) -> nn::GeneralResult<nn::SharedPreparedModel> {
+        return device->prepareModelFromCacheInternal(blocking, deadline, modelCache, dataCache,
+                                                     token);
+    };
+    return ResilientPreparedModel::create(std::move(makePreparedModel));
+}
+
+nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocate(
+        const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+        const std::vector<nn::BufferRole>& inputRoles,
+        const std::vector<nn::BufferRole>& outputRoles) const {
+    auto self = shared_from_this();
+    ResilientBuffer::Factory makeBuffer =
+            [device = std::move(self), desc, preparedModels, inputRoles,
+             outputRoles](bool blocking) -> nn::GeneralResult<nn::SharedBuffer> {
+        return device->allocateInternal(blocking, desc, preparedModels, inputRoles, outputRoles);
+    };
+    return ResilientBuffer::create(std::move(makeBuffer));
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
+        bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
+        nn::Priority priority, nn::OptionalTimePoint deadline,
+        const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
+                     token](const nn::IDevice& device) {
+        return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
+                                   token);
+    };
+    return protect(*this, fn, blocking);
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
+        bool blocking, nn::OptionalTimePoint deadline,
+        const std::vector<nn::NativeHandle>& modelCache,
+        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+    const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
+        return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
+    };
+    return protect(*this, fn, blocking);
+}
+
+nn::GeneralResult<nn::SharedBuffer> ResilientDevice::allocateInternal(
+        bool blocking, const nn::BufferDesc& desc,
+        const std::vector<nn::SharedPreparedModel>& preparedModels,
+        const std::vector<nn::BufferRole>& inputRoles,
+        const std::vector<nn::BufferRole>& outputRoles) const {
+    const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) {
+        return device.allocate(desc, preparedModels, inputRoles, outputRoles);
+    };
+    return protect(*this, fn, blocking);
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
new file mode 100644
index 0000000..1c9ecba
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientPreparedModel.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::GeneralResult<std::shared_ptr<const ResilientPreparedModel>> ResilientPreparedModel::create(
+        Factory makePreparedModel) {
+    if (makePreparedModel == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "utils::ResilientPreparedModel::create must have non-empty makePreparedModel";
+    }
+    auto preparedModel = NN_TRY(makePreparedModel(/*blocking=*/true));
+    CHECK(preparedModel != nullptr);
+    return std::make_shared<ResilientPreparedModel>(
+            PrivateConstructorTag{}, std::move(makePreparedModel), std::move(preparedModel));
+}
+
+ResilientPreparedModel::ResilientPreparedModel(PrivateConstructorTag /*tag*/,
+                                               Factory makePreparedModel,
+                                               nn::SharedPreparedModel preparedModel)
+    : kMakePreparedModel(std::move(makePreparedModel)), mPreparedModel(std::move(preparedModel)) {
+    CHECK(kMakePreparedModel != nullptr);
+    CHECK(mPreparedModel != nullptr);
+}
+
+nn::SharedPreparedModel ResilientPreparedModel::getPreparedModel() const {
+    std::lock_guard guard(mMutex);
+    return mPreparedModel;
+}
+
+nn::SharedPreparedModel ResilientPreparedModel::recover(
+        const nn::IPreparedModel* /*failingPreparedModel*/, bool /*blocking*/) const {
+    std::lock_guard guard(mMutex);
+    return mPreparedModel;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
+                                const nn::OptionalTimePoint& deadline,
+                                const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+    return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ResilientPreparedModel::executeFenced(
+        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+        const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+        const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+    return getPreparedModel()->executeFenced(request, waitFor, measure, deadline,
+                                             loopTimeoutDuration, timeoutDurationAfterFence);
+}
+
+std::any ResilientPreparedModel::getUnderlyingResource() const {
+    return getPreparedModel()->getUnderlyingResource();
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
new file mode 100644
index 0000000..87d27c7
--- /dev/null
+++ b/neuralnetworks/utils/service/Android.bp
@@ -0,0 +1,36 @@
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library_static {
+    name: "neuralnetworks_utils_hal_service",
+    defaults: ["neuralnetworks_utils_defaults"],
+    srcs: ["src/*"],
+    local_include_dirs: ["include/nnapi/hal"],
+    export_include_dirs: ["include"],
+    static_libs: [
+        "neuralnetworks_types",
+        "neuralnetworks_utils_hal_1_0",
+        "neuralnetworks_utils_hal_1_1",
+        "neuralnetworks_utils_hal_1_2",
+        "neuralnetworks_utils_hal_1_3",
+    ],
+    shared_libs: [
+        "android.hardware.neuralnetworks@1.0",
+        "android.hardware.neuralnetworks@1.1",
+        "android.hardware.neuralnetworks@1.2",
+        "android.hardware.neuralnetworks@1.3",
+    ],
+}
diff --git a/neuralnetworks/utils/service/include/nnapi/hal/Service.h b/neuralnetworks/utils/service/include/nnapi/hal/Service.h
new file mode 100644
index 0000000..e339627
--- /dev/null
+++ b/neuralnetworks/utils/service/include/nnapi/hal/Service.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
+
+#include <nnapi/IDevice.h>
+#include <nnapi/Types.h>
+#include <memory>
+#include <vector>
+
+namespace android::nn::hal {
+
+std::vector<nn::SharedDevice> getDevices();
+
+}  // namespace android::nn::hal
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_SERVICE_H
diff --git a/neuralnetworks/utils/service/src/Service.cpp b/neuralnetworks/utils/service/src/Service.cpp
new file mode 100644
index 0000000..a59549d
--- /dev/null
+++ b/neuralnetworks/utils/service/src/Service.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Service.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hidl/manager/1.2/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Service.h>
+#include <nnapi/hal/1.1/Service.h>
+#include <nnapi/hal/1.2/Service.h>
+#include <nnapi/hal/1.3/Service.h>
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <unordered_set>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::service {
+namespace {
+
+using getDeviceFn = std::add_pointer_t<nn::GeneralResult<nn::SharedDevice>(const std::string&)>;
+
+void getDevicesForVersion(const std::string& descriptor, getDeviceFn getDevice,
+                          std::vector<nn::SharedDevice>* devices,
+                          std::unordered_set<std::string>* registeredDevices) {
+    CHECK(devices != nullptr);
+    CHECK(registeredDevices != nullptr);
+
+    const auto names = getAllHalInstanceNames(descriptor);
+    for (const auto& name : names) {
+        if (const auto [it, unregistered] = registeredDevices->insert(name); unregistered) {
+            auto maybeDevice = getDevice(name);
+            if (maybeDevice.has_value()) {
+                auto device = std::move(maybeDevice).value();
+                CHECK(device != nullptr);
+                devices->push_back(std::move(device));
+            } else {
+                LOG(ERROR) << "getDevice(" << name << ") failed with " << maybeDevice.error().code
+                           << ": " << maybeDevice.error().message;
+            }
+        }
+    }
+}
+
+std::vector<nn::SharedDevice> getDevices() {
+    std::vector<nn::SharedDevice> devices;
+    std::unordered_set<std::string> registeredDevices;
+
+    getDevicesForVersion(V1_3::IDevice::descriptor, &V1_3::utils::getDevice, &devices,
+                         &registeredDevices);
+    getDevicesForVersion(V1_2::IDevice::descriptor, &V1_2::utils::getDevice, &devices,
+                         &registeredDevices);
+    getDevicesForVersion(V1_1::IDevice::descriptor, &V1_1::utils::getDevice, &devices,
+                         &registeredDevices);
+    getDevicesForVersion(V1_0::IDevice::descriptor, &V1_0::utils::getDevice, &devices,
+                         &registeredDevices);
+
+    return devices;
+}
+
+}  // namespace
+}  // namespace android::hardware::neuralnetworks::service
+
+namespace android::nn::hal {
+
+std::vector<nn::SharedDevice> getDevices() {
+    return hardware::neuralnetworks::service::getDevices();
+}
+
+}  // namespace android::nn::hal
diff --git a/radio/1.6/IRadio.hal b/radio/1.6/IRadio.hal
index 747b2f2..a093dee 100644
--- a/radio/1.6/IRadio.hal
+++ b/radio/1.6/IRadio.hal
@@ -231,9 +231,9 @@
      * 3. Disable NR dual connectivity and force secondary cell to be released
      * {NrDualConnectivityState:DISABLE_IMMEDIATE}
 
-     * Response callback is IRadioResponse.enableNRDualConnectivityResponse()
+     * Response callback is IRadioResponse.setNRDualConnectivityStateResponse()
      */
-    oneway enableNrDualConnectivity(int32_t serial,
+    oneway setNrDualConnectivityState(int32_t serial,
             NrDualConnectivityState nrDualConnectivityState);
 
     /**
diff --git a/radio/1.6/IRadioIndication.hal b/radio/1.6/IRadioIndication.hal
index c135090..f195c0e 100644
--- a/radio/1.6/IRadioIndication.hal
+++ b/radio/1.6/IRadioIndication.hal
@@ -18,6 +18,8 @@
 
 import @1.0::RadioIndicationType;
 import @1.5::IRadioIndication;
+import @1.6::SetupDataCallResult;
+import @1.6::LinkCapacityEstimate;
 
 /**
  * Interface declaring unsolicited radio indications.
@@ -53,4 +55,16 @@
      * @param apn Apn to unthrottle
      */
     oneway unthrottleApn(RadioIndicationType type, string apn);
+
+    /**
+     * Indicates current link capacity estimate.
+     * This replaces @1.2::IRadioIndication.currentLinkCapacityEstimate().
+     * This indication is sent whenever the reporting criteria, as set by
+     * @1.2::IRadio.setLinkCapacityReportingCriteria, are met and the indication is not
+     * suppressed by @1.2::IRadio.setIndicationFilter_1_2().
+     *
+     * @param type Type of radio indication
+     * @param lce LinkCapacityEstimate
+     */
+    oneway currentLinkCapacityEstimate_1_6(RadioIndicationType type, LinkCapacityEstimate lce);
 };
diff --git a/radio/1.6/IRadioResponse.hal b/radio/1.6/IRadioResponse.hal
index 523185e..0379e00 100644
--- a/radio/1.6/IRadioResponse.hal
+++ b/radio/1.6/IRadioResponse.hal
@@ -225,7 +225,7 @@
      *   RadioError:RADIO_NOT_AVAILABLE
      *   RadioError:INTERNAL_ERR
      */
-    oneway enableNrDualConnectivityResponse(RadioResponseInfo info);
+    oneway setNrDualConnectivityStateResponse(RadioResponseInfo info);
 
     /**
      * @param info Response info struct containing response type, serial no. and error
diff --git a/radio/1.6/types.hal b/radio/1.6/types.hal
index a98cd1f..32da295 100644
--- a/radio/1.6/types.hal
+++ b/radio/1.6/types.hal
@@ -284,3 +284,47 @@
      */
     DISABLE_IMMEDIATE= 3,
 };
+
+/**
+ * Overwritten from @1.2::LinkCapacityEstimate to update LinkCapacityEstimate to 1.6 version.
+ */
+struct LinkCapacityEstimate {
+
+   /**
+    * Estimated downlink capacity in kbps. In case of a dual connected network,
+    * this includes capacity of both primary and secondary. This bandwidth estimate shall be
+    * the estimated maximum sustainable link bandwidth (as would be measured
+    * at the Upper PDCP or SNDCP SAP). If the DL Aggregate Maximum Bit Rate is known,
+    * this value shall not exceed the DL-AMBR for the Internet PDN connection.
+    * This must be filled with -1 if network is not connected.
+    */
+   uint32_t downlinkCapacityKbps;
+
+   /**
+    * Estimated uplink capacity in kbps. In case of a dual connected network,
+    * this includes capacity of both primary and secondary. This bandwidth estimate shall be the
+    * estimated maximum sustainable link bandwidth (as would be measured at the
+    * Upper PDCP or SNDCP SAP). If the UL Aggregate Maximum Bit Rate is known,
+    * this value shall not exceed the UL-AMBR for the Internet PDN connection.
+    * This must be filled with -1 if network is not connected.
+    */
+   uint32_t uplinkCapacityKbps;
+
+   /**
+    * Estimated downlink capacity of secondary carrier in a dual connected NR mode in kbps.
+    * This bandwidth estimate shall be the estimated maximum sustainable link bandwidth
+    * (as would be measured at the Upper PDCP or SNDCP SAP). This is valid only
+    * in if device is connected to both primary and secodary in dual connected
+    * mode. This must be filled with -1 if secondary is not connected.
+    */
+   uint32_t secondaryDownlinkCapacityKbps;
+
+   /**
+    * Estimated uplink capacity secondary carrier in a dual connected NR mode in kbps.
+    * This bandwidth estimate shall be the estimated
+    * maximum sustainable link bandwidth (as would be measured at the Upper PDCP or SNDCP SAP).
+    * This is valid only in if device is connected to both primary and secodary in dual connected
+    * mode.This must be filled with -1 if secondary is not connected.
+    */
+   uint32_t secondaryUplinkCapacityKbps;
+};
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index 01236c6..6547611 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -260,13 +260,13 @@
 }
 
 /*
- * Test IRadio.enableNrDualConnectivity() for the response returned.
+ * Test IRadio.setNrDualConnectivityState() for the response returned.
  */
-TEST_P(RadioHidlTest_v1_6, enableNrDualConnectivity) {
+TEST_P(RadioHidlTest_v1_6, setNrDualConnectivityState) {
     serial = GetRandomSerialNumber();
 
     Return<void> res =
-            radio_v1_6->enableNrDualConnectivity(serial, NrDualConnectivityState::DISABLE);
+            radio_v1_6->setNrDualConnectivityState(serial, NrDualConnectivityState::DISABLE);
     ASSERT_OK(res);
 
     EXPECT_EQ(std::cv_status::no_timeout, wait());
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
index f061c60..6189be6 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
+++ b/radio/1.6/vts/functional/radio_hidl_hal_utils_v1_6.h
@@ -773,7 +773,7 @@
             const ::android::hardware::radio::V1_6::RadioResponseInfo& info,
             const SendSmsResult& sms);
 
-    Return<void> enableNrDualConnectivityResponse(
+    Return<void> setNrDualConnectivityStateResponse(
             const ::android::hardware::radio::V1_6::RadioResponseInfo& info);
     Return<void> isNrDualConnectivityEnabledResponse(
             const ::android::hardware::radio::V1_6::RadioResponseInfo& info, bool isEnabled);
@@ -865,6 +865,10 @@
             RadioIndicationType type,
             const ::android::hardware::radio::V1_2::LinkCapacityEstimate& lce);
 
+    Return<void> currentLinkCapacityEstimate_1_6(
+            RadioIndicationType type,
+            const ::android::hardware::radio::V1_6::LinkCapacityEstimate& lce);
+
     Return<void> currentPhysicalChannelConfigs(
             RadioIndicationType type,
             const ::android::hardware::hidl_vec<
diff --git a/radio/1.6/vts/functional/radio_indication.cpp b/radio/1.6/vts/functional/radio_indication.cpp
index b353c82..afde291 100644
--- a/radio/1.6/vts/functional/radio_indication.cpp
+++ b/radio/1.6/vts/functional/radio_indication.cpp
@@ -124,6 +124,12 @@
     return Void();
 }
 
+Return<void> RadioIndication_v1_6::currentLinkCapacityEstimate_1_6(
+        RadioIndicationType /*type*/,
+        const ::android::hardware::radio::V1_6::LinkCapacityEstimate& /*lce*/) {
+    return Void();
+}
+
 Return<void> RadioIndication_v1_6::currentPhysicalChannelConfigs(
         RadioIndicationType /*type*/,
         const ::android::hardware::hidl_vec<
diff --git a/radio/1.6/vts/functional/radio_response.cpp b/radio/1.6/vts/functional/radio_response.cpp
index dc4f57d..18cda6a 100644
--- a/radio/1.6/vts/functional/radio_response.cpp
+++ b/radio/1.6/vts/functional/radio_response.cpp
@@ -1055,7 +1055,7 @@
     parent_v1_6.notify(info.serial);
     return Void();
 }
-Return<void> RadioResponse_v1_6::enableNrDualConnectivityResponse(
+Return<void> RadioResponse_v1_6::setNrDualConnectivityStateResponse(
         const ::android::hardware::radio::V1_6::RadioResponseInfo& info) {
     rspInfo = info;
     parent_v1_6.notify(info.serial);
diff --git a/sensors/common/default/2.X/Sensor.cpp b/sensors/common/default/2.X/Sensor.cpp
index 1841dff..870980f 100644
--- a/sensors/common/default/2.X/Sensor.cpp
+++ b/sensors/common/default/2.X/Sensor.cpp
@@ -57,11 +57,11 @@
     return mSensorInfo;
 }
 
-void Sensor::batch(int32_t samplingPeriodNs) {
-    if (samplingPeriodNs < mSensorInfo.minDelay * 1000) {
-        samplingPeriodNs = mSensorInfo.minDelay * 1000;
-    } else if (samplingPeriodNs > mSensorInfo.maxDelay * 1000) {
-        samplingPeriodNs = mSensorInfo.maxDelay * 1000;
+void Sensor::batch(int64_t samplingPeriodNs) {
+    if (samplingPeriodNs < mSensorInfo.minDelay * 1000ll) {
+        samplingPeriodNs = mSensorInfo.minDelay * 1000ll;
+    } else if (samplingPeriodNs > mSensorInfo.maxDelay * 1000ll) {
+        samplingPeriodNs = mSensorInfo.maxDelay * 1000ll;
     }
 
     if (mSamplingPeriodNs != samplingPeriodNs) {
@@ -133,6 +133,11 @@
 }
 
 std::vector<Event> Sensor::readEvents() {
+    // For an accelerometer sensor type, default the z-direction
+    // value to -9.8
+    float zValue = (mSensorInfo.type == SensorType::ACCELEROMETER)
+        ? -9.8 : 0.0;
+
     std::vector<Event> events;
     Event event;
     event.sensorHandle = mSensorInfo.sensorHandle;
@@ -140,7 +145,7 @@
     event.timestamp = ::android::elapsedRealtimeNano();
     event.u.vec3.x = 0;
     event.u.vec3.y = 0;
-    event.u.vec3.z = 0;
+    event.u.vec3.z = zValue;
     event.u.vec3.status = SensorStatus::ACCURACY_HIGH;
     events.push_back(event);
     return events;
@@ -330,25 +335,6 @@
     mSensorInfo.flags = static_cast<uint32_t>(SensorFlagBits::ON_CHANGE_MODE);
 };
 
-DeviceTempSensor::DeviceTempSensor(int32_t sensorHandle, ISensorsEventCallback* callback)
-    : OnChangeSensor(callback) {
-    mSensorInfo.sensorHandle = sensorHandle;
-    mSensorInfo.name = "Device Temp Sensor";
-    mSensorInfo.vendor = "Vendor String";
-    mSensorInfo.version = 1;
-    mSensorInfo.type = SensorType::TEMPERATURE;
-    mSensorInfo.typeAsString = "";
-    mSensorInfo.maxRange = 80.0f;
-    mSensorInfo.resolution = 0.01f;
-    mSensorInfo.power = 0.001f;
-    mSensorInfo.minDelay = 40 * 1000;  // microseconds
-    mSensorInfo.maxDelay = kDefaultMaxDelayUs;
-    mSensorInfo.fifoReservedEventCount = 0;
-    mSensorInfo.fifoMaxEventCount = 0;
-    mSensorInfo.requiredPermission = "";
-    mSensorInfo.flags = static_cast<uint32_t>(SensorFlagBits::ON_CHANGE_MODE);
-}
-
 RelativeHumiditySensor::RelativeHumiditySensor(int32_t sensorHandle,
                                                ISensorsEventCallback* callback)
     : OnChangeSensor(callback) {
diff --git a/sensors/common/default/2.X/Sensor.h b/sensors/common/default/2.X/Sensor.h
index 2f8a143..a792797 100644
--- a/sensors/common/default/2.X/Sensor.h
+++ b/sensors/common/default/2.X/Sensor.h
@@ -32,7 +32,7 @@
 namespace V2_X {
 namespace implementation {
 
-static constexpr float kDefaultMaxDelayUs = 10 * 1000 * 1000;
+static constexpr int32_t kDefaultMaxDelayUs = 10 * 1000 * 1000;
 
 class ISensorsEventCallback {
   public:
@@ -54,7 +54,7 @@
     virtual ~Sensor();
 
     const SensorInfo& getSensorInfo() const;
-    void batch(int32_t samplingPeriodNs);
+    void batch(int64_t samplingPeriodNs);
     virtual void activate(bool enable);
     Result flush();
 
@@ -113,11 +113,6 @@
     AmbientTempSensor(int32_t sensorHandle, ISensorsEventCallback* callback);
 };
 
-class DeviceTempSensor : public OnChangeSensor {
-  public:
-    DeviceTempSensor(int32_t sensorHandle, ISensorsEventCallback* callback);
-};
-
 class PressureSensor : public Sensor {
   public:
     PressureSensor(int32_t sensorHandle, ISensorsEventCallback* callback);
diff --git a/sensors/common/default/2.X/Sensors.h b/sensors/common/default/2.X/Sensors.h
index ee8240d..8969c0f 100644
--- a/sensors/common/default/2.X/Sensors.h
+++ b/sensors/common/default/2.X/Sensors.h
@@ -64,7 +64,6 @@
         AddSensor<AccelSensor>();
         AddSensor<GyroSensor>();
         AddSensor<AmbientTempSensor>();
-        AddSensor<DeviceTempSensor>();
         AddSensor<PressureSensor>();
         AddSensor<MagnetometerSensor>();
         AddSensor<LightSensor>();
diff --git a/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.cpp b/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.cpp
index 1efd971..69debb6 100644
--- a/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.cpp
+++ b/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.cpp
@@ -71,9 +71,10 @@
     return mSensorInfo;
 }
 
-void Sensor::batch(int32_t samplingPeriodNs) {
-    samplingPeriodNs =
-            std::clamp(samplingPeriodNs, mSensorInfo.minDelay * 1000, mSensorInfo.maxDelay * 1000);
+void Sensor::batch(int64_t samplingPeriodNs) {
+    samplingPeriodNs = std::clamp(samplingPeriodNs,
+                                  static_cast<int64_t>(mSensorInfo.minDelay) * 1000,
+                                  static_cast<int64_t>(mSensorInfo.maxDelay) * 1000);
 
     if (mSamplingPeriodNs != samplingPeriodNs) {
         mSamplingPeriodNs = samplingPeriodNs;
@@ -323,17 +324,6 @@
     mSensorInfo.minDelay = 40 * 1000;  // microseconds
 }
 
-DeviceTempSensor::DeviceTempSensor(int32_t sensorHandle, ISensorsEventCallback* callback)
-    : ContinuousSensor(sensorHandle, callback) {
-    mSensorInfo.name = "Device Temp Sensor";
-    mSensorInfo.type = SensorType::TEMPERATURE;
-    mSensorInfo.typeAsString = SENSOR_STRING_TYPE_TEMPERATURE;
-    mSensorInfo.maxRange = 80.0f;
-    mSensorInfo.resolution = 0.01f;
-    mSensorInfo.power = 0.001f;
-    mSensorInfo.minDelay = 40 * 1000;  // microseconds
-}
-
 RelativeHumiditySensor::RelativeHumiditySensor(int32_t sensorHandle,
                                                ISensorsEventCallback* callback)
     : OnChangeSensor(sensorHandle, callback) {
diff --git a/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.h b/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.h
index 5cf9f83..08c8647 100644
--- a/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.h
+++ b/sensors/common/default/2.X/multihal/tests/fake_subhal/Sensor.h
@@ -49,7 +49,7 @@
     virtual ~Sensor();
 
     const SensorInfo& getSensorInfo() const;
-    void batch(int32_t samplingPeriodNs);
+    void batch(int64_t samplingPeriodNs);
     virtual void activate(bool enable);
     Result flush();
 
@@ -114,11 +114,6 @@
     std::vector<Event> readEvents() override;
 };
 
-class DeviceTempSensor : public ContinuousSensor {
-  public:
-    DeviceTempSensor(int32_t sensorHandle, ISensorsEventCallback* callback);
-};
-
 class PressureSensor : public ContinuousSensor {
   public:
     PressureSensor(int32_t sensorHandle, ISensorsEventCallback* callback);
diff --git a/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h b/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
index 1a78e84..353563c 100644
--- a/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
+++ b/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
@@ -206,7 +206,6 @@
         ISensorsSubHalBase::AddSensor<GyroSensor>();
         ISensorsSubHalBase::AddSensor<MagnetometerSensor>();
         ISensorsSubHalBase::AddSensor<PressureSensor>();
-        ISensorsSubHalBase::AddSensor<DeviceTempSensor>();
     }
 };
 
@@ -231,7 +230,6 @@
         ISensorsSubHalBase::AddSensor<GyroSensor>();
         ISensorsSubHalBase::AddSensor<MagnetometerSensor>();
         ISensorsSubHalBase::AddSensor<PressureSensor>();
-        ISensorsSubHalBase::AddSensor<DeviceTempSensor>();
         ISensorsSubHalBase::AddSensor<AmbientTempSensor>();
         ISensorsSubHalBase::AddSensor<LightSensor>();
         ISensorsSubHalBase::AddSensor<ProximitySensor>();
diff --git a/sensors/common/vts/2_X/VtsHalSensorsV2_XTargetTest.h b/sensors/common/vts/2_X/VtsHalSensorsV2_XTargetTest.h
index e674ddb..f857827 100644
--- a/sensors/common/vts/2_X/VtsHalSensorsV2_XTargetTest.h
+++ b/sensors/common/vts/2_X/VtsHalSensorsV2_XTargetTest.h
@@ -845,7 +845,11 @@
         std::shared_ptr<SensorsTestSharedMemory<SensorTypeVersion, EventType>> mem,
         int32_t* directChannelHandle, bool supportsSharedMemType, bool supportsAnyDirectChannel) {
     char* buffer = mem->getBuffer();
-    memset(buffer, 0xff, mem->getSize());
+    size_t size = mem->getSize();
+
+    if (supportsSharedMemType) {
+        memset(buffer, 0xff, size);
+    }
 
     registerDirectChannel(mem->getSharedMemInfo(), [&](Result result, int32_t channelHandle) {
         if (supportsSharedMemType) {
diff --git a/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h b/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
index 03bec87..a8e1996 100644
--- a/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
+++ b/sensors/common/vts/utils/include/sensors-vts-utils/SensorsHidlTestBase.h
@@ -109,7 +109,6 @@
         case SensorTypeT::MAGNETIC_FIELD:
         case SensorTypeT::ORIENTATION:
         case SensorTypeT::PRESSURE:
-        case SensorTypeT::TEMPERATURE:
         case SensorTypeT::GRAVITY:
         case SensorTypeT::LINEAR_ACCELERATION:
         case SensorTypeT::ROTATION_VECTOR:
@@ -145,6 +144,10 @@
         case SensorTypeT::DYNAMIC_SENSOR_META:
             return SensorFlagBits::SPECIAL_REPORTING_MODE;
 
+        case SensorTypeT::TEMPERATURE:
+            ALOGW("Device temperature sensor is deprecated, ignoring for test");
+            return (SensorFlagBits)-1;
+
         default:
             ALOGW("Type %d is not implemented in expectedReportModeForType", (int)type);
             return (SensorFlagBits)-1;
@@ -334,7 +337,7 @@
         usleep(500000);  // sleep 0.5 sec to wait for change rate to happen
         events1 = collectEvents(collectionTimeoutUs, minNEvent, getEnvironment());
 
-        // second collection, without stop sensor
+        // second collection, without stopping the sensor
         ASSERT_EQ(batch(handle, secondCollectionPeriod, batchingPeriodInNs), Result::OK);
 
         usleep(500000);  // sleep 0.5 sec to wait for change rate to happen
diff --git a/tetheroffload/control/1.0/vts/functional/Android.bp b/tetheroffload/control/1.0/vts/functional/Android.bp
index c51dd8b..c397df4 100644
--- a/tetheroffload/control/1.0/vts/functional/Android.bp
+++ b/tetheroffload/control/1.0/vts/functional/Android.bp
@@ -15,10 +15,18 @@
 cc_test {
     name: "VtsHalTetheroffloadControlV1_0TargetTest",
     defaults: ["VtsHalTargetTestDefaults"],
-    srcs: ["VtsHalTetheroffloadControlV1_0TargetTest.cpp"],
+    local_include_dirs: ["include"],
+    srcs: [
+        "VtsHalTetheroffloadControlV1_0TargetTest.cpp",
+        "OffloadControlTestBase.cpp",
+        "OffloadControlTestUtils.cpp",
+    ],
     static_libs: [
         "android.hardware.tetheroffload.config@1.0",
         "android.hardware.tetheroffload.control@1.0",
     ],
-    test_suites: ["general-tests", "vts"],
+    test_suites: [
+        "general-tests",
+        "vts",
+    ],
 }
diff --git a/tetheroffload/control/1.0/vts/functional/OffloadControlTestBase.cpp b/tetheroffload/control/1.0/vts/functional/OffloadControlTestBase.cpp
new file mode 100644
index 0000000..bd0dad7
--- /dev/null
+++ b/tetheroffload/control/1.0/vts/functional/OffloadControlTestBase.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <OffloadControlTestBase.h>
+
+void OffloadControlTestBase::TearDown() {
+    // For good measure, the teardown should try stopOffload() once more, since
+    // different HAL call test cycles might enter this function. Also the
+    // return code cannot be actually expected for all cases, hence ignore it.
+    stopOffload(ExpectBoolean::Ignored);
+}
+
+// The IOffloadConfig HAL is tested more thoroughly elsewhere. Here the class
+// just setup everything correctly and verify basic readiness.
+void OffloadControlTestBase::setupConfigHal() {
+    config = IOffloadConfig::getService(std::get<0>(GetParam()));
+    ASSERT_NE(nullptr, config.get()) << "Could not get HIDL instance";
+
+    unique_fd fd1(conntrackSocket(NF_NETLINK_CONNTRACK_NEW | NF_NETLINK_CONNTRACK_DESTROY));
+    if (fd1.get() < 0) {
+        ALOGE("Unable to create conntrack handles: %d/%s", errno, strerror(errno));
+        FAIL();
+    }
+    native_handle_t* const nativeHandle1 = native_handle_create(1, 0);
+    nativeHandle1->data[0] = fd1.release();
+    hidl_handle h1;
+    h1.setTo(nativeHandle1, true);
+
+    unique_fd fd2(conntrackSocket(NF_NETLINK_CONNTRACK_UPDATE | NF_NETLINK_CONNTRACK_DESTROY));
+    if (fd2.get() < 0) {
+        ALOGE("Unable to create conntrack handles: %d/%s", errno, strerror(errno));
+        FAIL();
+    }
+    native_handle_t* const nativeHandle2 = native_handle_create(1, 0);
+    nativeHandle2->data[0] = fd2.release();
+    hidl_handle h2;
+    h2.setTo(nativeHandle2, true);
+
+    const Return<void> ret = config->setHandles(h1, h2, ASSERT_TRUE_CALLBACK);
+    ASSERT_TRUE(ret.isOk());
+}
+
+void OffloadControlTestBase::prepareControlHal() {
+    control = createControl(std::get<1>(GetParam()));
+    ASSERT_NE(nullptr, control.get()) << "Could not get HIDL instance";
+
+    control_cb = new TetheringOffloadCallback();
+    ASSERT_NE(nullptr, control_cb.get()) << "Could not get get offload callback";
+}
+
+void OffloadControlTestBase::initOffload(const bool expected_result) {
+    auto init_cb = [&](bool success, std::string errMsg) {
+        std::string msg = StringPrintf("Unexpectedly %s to init offload: %s",
+                                       success ? "succeeded" : "failed", errMsg.c_str());
+        ASSERT_EQ(expected_result, success) << msg;
+    };
+    const Return<void> ret = control->initOffload(control_cb, init_cb);
+    ASSERT_TRUE(ret.isOk());
+}
+
+void OffloadControlTestBase::setupControlHal() {
+    prepareControlHal();
+    initOffload(true);
+}
+
+void OffloadControlTestBase::stopOffload(const ExpectBoolean value) {
+    auto cb = [&](bool success, const hidl_string& errMsg) {
+        switch (value) {
+            case ExpectBoolean::False:
+                ASSERT_EQ(false, success) << "Unexpectedly able to stop offload: " << errMsg;
+                break;
+            case ExpectBoolean::True:
+                ASSERT_EQ(true, success) << "Unexpectedly failed to stop offload: " << errMsg;
+                break;
+            case ExpectBoolean::Ignored:
+                break;
+        }
+    };
+    const Return<void> ret = control->stopOffload(cb);
+    ASSERT_TRUE(ret.isOk());
+}
diff --git a/tetheroffload/control/1.0/vts/functional/OffloadControlTestUtils.cpp b/tetheroffload/control/1.0/vts/functional/OffloadControlTestUtils.cpp
new file mode 100644
index 0000000..c784fe1
--- /dev/null
+++ b/tetheroffload/control/1.0/vts/functional/OffloadControlTestUtils.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <OffloadControlTestUtils.h>
+#include <android-base/unique_fd.h>
+
+using android::base::unique_fd;
+
+inline const sockaddr* asSockaddr(const sockaddr_nl* nladdr) {
+    return reinterpret_cast<const sockaddr*>(nladdr);
+}
+
+int conntrackSocket(unsigned groups) {
+    unique_fd s(socket(AF_NETLINK, SOCK_DGRAM, NETLINK_NETFILTER));
+    if (s.get() < 0) {
+        return -errno;
+    }
+
+    const struct sockaddr_nl bind_addr = {
+            .nl_family = AF_NETLINK,
+            .nl_pad = 0,
+            .nl_pid = 0,
+            .nl_groups = groups,
+    };
+    if (::bind(s.get(), asSockaddr(&bind_addr), sizeof(bind_addr)) < 0) {
+        return -errno;
+    }
+
+    const struct sockaddr_nl kernel_addr = {
+            .nl_family = AF_NETLINK,
+            .nl_pad = 0,
+            .nl_pid = 0,
+            .nl_groups = groups,
+    };
+    if (connect(s.get(), asSockaddr(&kernel_addr), sizeof(kernel_addr)) != 0) {
+        return -errno;
+    }
+
+    return s.release();
+}
\ No newline at end of file
diff --git a/tetheroffload/control/1.0/vts/functional/VtsHalTetheroffloadControlV1_0TargetTest.cpp b/tetheroffload/control/1.0/vts/functional/VtsHalTetheroffloadControlV1_0TargetTest.cpp
index d3a7020..ad4ef12 100644
--- a/tetheroffload/control/1.0/vts/functional/VtsHalTetheroffloadControlV1_0TargetTest.cpp
+++ b/tetheroffload/control/1.0/vts/functional/VtsHalTetheroffloadControlV1_0TargetTest.cpp
@@ -16,215 +16,24 @@
 
 #define LOG_TAG "VtsOffloadControlV1_0TargetTest"
 
-#include <VtsHalHidlTargetCallbackBase.h>
+#include <OffloadControlTestV1_0.h>
 #include <android-base/stringprintf.h>
-#include <android-base/unique_fd.h>
-#include <android/hardware/tetheroffload/config/1.0/IOffloadConfig.h>
-#include <android/hardware/tetheroffload/control/1.0/IOffloadControl.h>
-#include <android/hardware/tetheroffload/control/1.0/types.h>
 #include <gtest/gtest.h>
 #include <hidl/GtestPrinter.h>
 #include <hidl/ServiceManagement.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netlink.h>
-#include <log/log.h>
 #include <net/if.h>
 #include <sys/socket.h>
-#include <unistd.h>
-#include <set>
 
 using android::base::StringPrintf;
-using android::base::unique_fd;
-using android::hardware::hidl_handle;
-using android::hardware::hidl_string;
-using android::hardware::hidl_vec;
 using android::hardware::Return;
 using android::hardware::tetheroffload::config::V1_0::IOffloadConfig;
 using android::hardware::tetheroffload::control::V1_0::IOffloadControl;
-using android::hardware::tetheroffload::control::V1_0::IPv4AddrPortPair;
-using android::hardware::tetheroffload::control::V1_0::ITetheringOffloadCallback;
-using android::hardware::tetheroffload::control::V1_0::OffloadCallbackEvent;
-using android::hardware::tetheroffload::control::V1_0::NatTimeoutUpdate;
-using android::hardware::tetheroffload::control::V1_0::NetworkProtocol;
 using android::hardware::Void;
-using android::sp;
-
-enum class ExpectBoolean {
-    Ignored = -1,
-    False = 0,
-    True = 1,
-};
 
 constexpr const char* TEST_IFACE = "rmnet_data0";
 
-// We use #defines here so as to get local lamba captures and error message line numbers
-#define ASSERT_TRUE_CALLBACK                                                    \
-    [&](bool success, std::string errMsg) {                                     \
-        std::string msg = StringPrintf("unexpected error: %s", errMsg.c_str()); \
-        ASSERT_TRUE(success) << msg;                                            \
-    }
-
-#define ASSERT_FALSE_CALLBACK                                                 \
-    [&](bool success, std::string errMsg) {                                   \
-        std::string msg = StringPrintf("expected error: %s", errMsg.c_str()); \
-        ASSERT_FALSE(success) << msg;                                         \
-    }
-
-#define ASSERT_ZERO_BYTES_CALLBACK            \
-    [&](uint64_t rxBytes, uint64_t txBytes) { \
-        EXPECT_EQ(0ULL, rxBytes);             \
-        EXPECT_EQ(0ULL, txBytes);             \
-    }
-
-inline const sockaddr* asSockaddr(const sockaddr_nl* nladdr) {
-    return reinterpret_cast<const sockaddr*>(nladdr);
-}
-
-int conntrackSocket(unsigned groups) {
-    unique_fd s(socket(AF_NETLINK, SOCK_DGRAM, NETLINK_NETFILTER));
-    if (s.get() < 0) {
-        return -errno;
-    }
-
-    const struct sockaddr_nl bind_addr = {
-        .nl_family = AF_NETLINK, .nl_pad = 0, .nl_pid = 0, .nl_groups = groups,
-    };
-    if (::bind(s.get(), asSockaddr(&bind_addr), sizeof(bind_addr)) < 0) {
-        return -errno;
-    }
-
-    const struct sockaddr_nl kernel_addr = {
-        .nl_family = AF_NETLINK, .nl_pad = 0, .nl_pid = 0, .nl_groups = groups,
-    };
-    if (connect(s.get(), asSockaddr(&kernel_addr), sizeof(kernel_addr)) != 0) {
-        return -errno;
-    }
-
-    return s.release();
-}
-
-constexpr char kCallbackOnEvent[] = "onEvent";
-constexpr char kCallbackUpdateTimeout[] = "updateTimeout";
-
-class TetheringOffloadCallbackArgs {
-   public:
-    OffloadCallbackEvent last_event;
-    NatTimeoutUpdate last_params;
-};
-
-class OffloadControlHidlTestBase
-    : public testing::TestWithParam<std::tuple<std::string, std::string>> {
-   public:
-    virtual void SetUp() override {
-        setupConfigHal();
-        prepareControlHal();
-    }
-
-    virtual void TearDown() override {
-        // For good measure, we should try stopOffload() once more. Since we
-        // don't know where we are in HAL call test cycle we don't know what
-        // return code to actually expect, so we just ignore it.
-        stopOffload(ExpectBoolean::Ignored);
-    }
-
-    // The IOffloadConfig HAL is tested more thoroughly elsewhere. He we just
-    // setup everything correctly and verify basic readiness.
-    void setupConfigHal() {
-        config = IOffloadConfig::getService(std::get<0>(GetParam()));
-        ASSERT_NE(nullptr, config.get()) << "Could not get HIDL instance";
-
-        unique_fd fd1(conntrackSocket(NF_NETLINK_CONNTRACK_NEW | NF_NETLINK_CONNTRACK_DESTROY));
-        if (fd1.get() < 0) {
-            ALOGE("Unable to create conntrack handles: %d/%s", errno, strerror(errno));
-            FAIL();
-        }
-        native_handle_t* const nativeHandle1 = native_handle_create(1, 0);
-        nativeHandle1->data[0] = fd1.release();
-        hidl_handle h1;
-        h1.setTo(nativeHandle1, true);
-
-        unique_fd fd2(conntrackSocket(NF_NETLINK_CONNTRACK_UPDATE | NF_NETLINK_CONNTRACK_DESTROY));
-        if (fd2.get() < 0) {
-            ALOGE("Unable to create conntrack handles: %d/%s", errno, strerror(errno));
-            FAIL();
-        }
-        native_handle_t* const nativeHandle2 = native_handle_create(1, 0);
-        nativeHandle2->data[0] = fd2.release();
-        hidl_handle h2;
-        h2.setTo(nativeHandle2, true);
-
-        const Return<void> ret = config->setHandles(h1, h2, ASSERT_TRUE_CALLBACK);
-        ASSERT_TRUE(ret.isOk());
-    }
-
-    void prepareControlHal() {
-        control = IOffloadControl::getService(std::get<1>(GetParam()));
-        ASSERT_NE(nullptr, control.get()) << "Could not get HIDL instance";
-
-        control_cb = new TetheringOffloadCallback();
-        ASSERT_NE(nullptr, control_cb.get()) << "Could not get get offload callback";
-    }
-
-    void initOffload(const bool expected_result) {
-        auto init_cb = [&](bool success, std::string errMsg) {
-            std::string msg = StringPrintf("Unexpectedly %s to init offload: %s",
-                                           success ? "succeeded" : "failed", errMsg.c_str());
-            ASSERT_EQ(expected_result, success) << msg;
-        };
-        const Return<void> ret = control->initOffload(control_cb, init_cb);
-        ASSERT_TRUE(ret.isOk());
-    }
-
-    void setupControlHal() {
-        prepareControlHal();
-        initOffload(true);
-    }
-
-    void stopOffload(const ExpectBoolean value) {
-        auto cb = [&](bool success, const hidl_string& errMsg) {
-            switch (value) {
-                case ExpectBoolean::False:
-                    ASSERT_EQ(false, success) << "Unexpectedly able to stop offload: " << errMsg;
-                    break;
-                case ExpectBoolean::True:
-                    ASSERT_EQ(true, success) << "Unexpectedly failed to stop offload: " << errMsg;
-                    break;
-                case ExpectBoolean::Ignored:
-                    break;
-            }
-        };
-        const Return<void> ret = control->stopOffload(cb);
-        ASSERT_TRUE(ret.isOk());
-    }
-
-    // Callback class for both events and NAT timeout updates.
-    class TetheringOffloadCallback
-        : public testing::VtsHalHidlTargetCallbackBase<TetheringOffloadCallbackArgs>,
-          public ITetheringOffloadCallback {
-       public:
-        TetheringOffloadCallback() = default;
-        virtual ~TetheringOffloadCallback() = default;
-
-        Return<void> onEvent(OffloadCallbackEvent event) override {
-            const TetheringOffloadCallbackArgs args{.last_event = event};
-            NotifyFromCallback(kCallbackOnEvent, args);
-            return Void();
-        };
-
-        Return<void> updateTimeout(const NatTimeoutUpdate& params) override {
-            const TetheringOffloadCallbackArgs args{.last_params = params};
-            NotifyFromCallback(kCallbackUpdateTimeout, args);
-            return Void();
-        };
-    };
-
-    sp<IOffloadConfig> config;
-    sp<IOffloadControl> control;
-    sp<TetheringOffloadCallback> control_cb;
-};
-
 // Call initOffload() multiple times. Check that non-first initOffload() calls return false.
-TEST_P(OffloadControlHidlTestBase, AdditionalInitsWithoutStopReturnFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, AdditionalInitsWithoutStopReturnFalse) {
     initOffload(true);
     initOffload(false);
     initOffload(false);
@@ -232,7 +41,7 @@
 }
 
 // Check that calling stopOffload() without first having called initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, MultipleStopsWithoutInitReturnFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, MultipleStopsWithoutInitReturnFalse) {
     stopOffload(ExpectBoolean::False);
     stopOffload(ExpectBoolean::False);
     stopOffload(ExpectBoolean::False);
@@ -251,7 +60,7 @@
 }
 
 // Check that calling stopOffload() after a complete init/stop cycle returns false.
-TEST_P(OffloadControlHidlTestBase, AdditionalStopsWithInitReturnFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, AdditionalStopsWithInitReturnFalse) {
     initOffload(true);
     // Call setUpstreamParameters() so that "offload" can be reasonably said
     // to be both requested and operational.
@@ -273,7 +82,7 @@
 }
 
 // Check that calling setLocalPrefixes() without first having called initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, SetLocalPrefixesWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, SetLocalPrefixesWithoutInitReturnsFalse) {
     const vector<hidl_string> prefixes{hidl_string("2001:db8::/64")};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_FALSE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
@@ -281,14 +90,14 @@
 
 // Check that calling getForwardedStats() without first having called initOffload()
 // returns zero bytes statistics.
-TEST_P(OffloadControlHidlTestBase, GetForwardedStatsWithoutInitReturnsZeroValues) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, GetForwardedStatsWithoutInitReturnsZeroValues) {
     const hidl_string upstream(TEST_IFACE);
     const Return<void> ret = control->getForwardedStats(upstream, ASSERT_ZERO_BYTES_CALLBACK);
     EXPECT_TRUE(ret.isOk());
 }
 
 // Check that calling setDataLimit() without first having called initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, SetDataLimitWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, SetDataLimitWithoutInitReturnsFalse) {
     const hidl_string upstream(TEST_IFACE);
     const uint64_t limit = 5000ULL;
     const Return<void> ret = control->setDataLimit(upstream, limit, ASSERT_FALSE_CALLBACK);
@@ -297,7 +106,7 @@
 
 // Check that calling setUpstreamParameters() without first having called initOffload()
 // returns false.
-TEST_P(OffloadControlHidlTestBase, SetUpstreamParametersWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, SetUpstreamParametersWithoutInitReturnsFalse) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("192.0.2.0/24");
     const hidl_string v4Gw("192.0.2.1");
@@ -309,7 +118,7 @@
 
 // Check that calling addDownstream() with an IPv4 prefix without first having called
 // initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, AddIPv4DownstreamWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, AddIPv4DownstreamWithoutInitReturnsFalse) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string prefix("192.0.2.0/24");
     const Return<void> ret = control->addDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
@@ -318,7 +127,7 @@
 
 // Check that calling addDownstream() with an IPv6 prefix without first having called
 // initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, AddIPv6DownstreamWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, AddIPv6DownstreamWithoutInitReturnsFalse) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string prefix("2001:db8::/64");
     const Return<void> ret = control->addDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
@@ -327,7 +136,7 @@
 
 // Check that calling removeDownstream() with an IPv4 prefix without first having called
 // initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, RemoveIPv4DownstreamWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, RemoveIPv4DownstreamWithoutInitReturnsFalse) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string prefix("192.0.2.0/24");
     const Return<void> ret = control->removeDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
@@ -336,48 +145,33 @@
 
 // Check that calling removeDownstream() with an IPv6 prefix without first having called
 // initOffload() returns false.
-TEST_P(OffloadControlHidlTestBase, RemoveIPv6DownstreamWithoutInitReturnsFalse) {
+TEST_P(OffloadControlTestV1_0_HalNotStarted, RemoveIPv6DownstreamWithoutInitReturnsFalse) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string prefix("2001:db8::/64");
     const Return<void> ret = control->removeDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
 }
 
-class OffloadControlHidlTest : public OffloadControlHidlTestBase {
-   public:
-    virtual void SetUp() override {
-        setupConfigHal();
-        setupControlHal();
-    }
-
-    virtual void TearDown() override {
-        // For good measure, we should try stopOffload() once more. Since we
-        // don't know where we are in HAL call test cycle we don't know what
-        // return code to actually expect, so we just ignore it.
-        stopOffload(ExpectBoolean::Ignored);
-    }
-};
-
 /*
  * Tests for IOffloadControl::setLocalPrefixes().
  */
 
 // Test setLocalPrefixes() accepts an IPv4 address.
-TEST_P(OffloadControlHidlTest, SetLocalPrefixesIPv4AddressOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetLocalPrefixesIPv4AddressOk) {
     const vector<hidl_string> prefixes{hidl_string("192.0.2.1")};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_TRUE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
 }
 
 // Test setLocalPrefixes() accepts an IPv6 address.
-TEST_P(OffloadControlHidlTest, SetLocalPrefixesIPv6AddressOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetLocalPrefixesIPv6AddressOk) {
     const vector<hidl_string> prefixes{hidl_string("fe80::1")};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_TRUE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
 }
 
 // Test setLocalPrefixes() accepts both IPv4 and IPv6 prefixes.
-TEST_P(OffloadControlHidlTest, SetLocalPrefixesIPv4v6PrefixesOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetLocalPrefixesIPv4v6PrefixesOk) {
     const vector<hidl_string> prefixes{hidl_string("192.0.2.0/24"), hidl_string("fe80::/64")};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_TRUE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
@@ -386,14 +180,14 @@
 // Test that setLocalPrefixes() fails given empty input. There is always
 // a non-empty set of local prefixes; when all networking interfaces are down
 // we still apply {127.0.0.0/8, ::1/128, fe80::/64} here.
-TEST_P(OffloadControlHidlTest, SetLocalPrefixesEmptyFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetLocalPrefixesEmptyFails) {
     const vector<hidl_string> prefixes{};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_FALSE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
 }
 
 // Test setLocalPrefixes() fails on incorrectly formed input strings.
-TEST_P(OffloadControlHidlTest, SetLocalPrefixesInvalidFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetLocalPrefixesInvalidFails) {
     const vector<hidl_string> prefixes{hidl_string("192.0.2.0/24"), hidl_string("invalid")};
     const Return<void> ret = control->setLocalPrefixes(prefixes, ASSERT_FALSE_CALLBACK);
     EXPECT_TRUE(ret.isOk());
@@ -404,7 +198,7 @@
  */
 
 // Test that getForwardedStats() for a non-existent upstream yields zero bytes statistics.
-TEST_P(OffloadControlHidlTest, GetForwardedStatsInvalidUpstreamIface) {
+TEST_P(OffloadControlTestV1_0_HalStarted, GetForwardedStatsInvalidUpstreamIface) {
     const hidl_string upstream("invalid");
     const Return<void> ret = control->getForwardedStats(upstream, ASSERT_ZERO_BYTES_CALLBACK);
     EXPECT_TRUE(ret.isOk());
@@ -412,7 +206,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, GetForwardedStatsDummyIface) {
+TEST_P(OffloadControlTestV1_0_HalStarted, GetForwardedStatsDummyIface) {
     const hidl_string upstream(TEST_IFACE);
     const Return<void> ret = control->getForwardedStats(upstream, ASSERT_ZERO_BYTES_CALLBACK);
     EXPECT_TRUE(ret.isOk());
@@ -423,7 +217,7 @@
  */
 
 // Test that setDataLimit() for an empty interface name fails.
-TEST_P(OffloadControlHidlTest, SetDataLimitEmptyUpstreamIfaceFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetDataLimitEmptyUpstreamIfaceFails) {
     const hidl_string upstream("");
     const uint64_t limit = 5000ULL;
     const Return<void> ret = control->setDataLimit(upstream, limit, ASSERT_FALSE_CALLBACK);
@@ -432,7 +226,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetDataLimitNonZeroOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetDataLimitNonZeroOk) {
     const hidl_string upstream(TEST_IFACE);
     const uint64_t limit = 5000ULL;
     const Return<void> ret = control->setDataLimit(upstream, limit, ASSERT_TRUE_CALLBACK);
@@ -441,7 +235,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetDataLimitZeroOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetDataLimitZeroOk) {
     const hidl_string upstream(TEST_IFACE);
     const uint64_t limit = 0ULL;
     const Return<void> ret = control->setDataLimit(upstream, limit, ASSERT_TRUE_CALLBACK);
@@ -454,7 +248,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersIPv6OnlyOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersIPv6OnlyOk) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("");
     const hidl_string v4Gw("");
@@ -466,7 +260,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersAlternateIPv6OnlyOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersAlternateIPv6OnlyOk) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr;
     const hidl_string v4Gw;
@@ -478,7 +272,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersIPv4OnlyOk) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersIPv4OnlyOk) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("192.0.2.2");
     const hidl_string v4Gw("192.0.2.1");
@@ -490,7 +284,7 @@
 
 // TEST_IFACE is presumed to exist on the device and be up. No packets
 // are ever actually caused to be forwarded.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersIPv4v6Ok) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersIPv4v6Ok) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("192.0.2.2");
     const hidl_string v4Gw("192.0.2.1");
@@ -501,7 +295,7 @@
 }
 
 // Test that setUpstreamParameters() fails when all parameters are empty.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersEmptyFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersEmptyFails) {
     const hidl_string iface("");
     const hidl_string v4Addr("");
     const hidl_string v4Gw("");
@@ -512,7 +306,7 @@
 }
 
 // Test that setUpstreamParameters() fails when given empty or non-existent interface names.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersBogusIfaceFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersBogusIfaceFails) {
     const hidl_string v4Addr("192.0.2.2");
     const hidl_string v4Gw("192.0.2.1");
     const vector<hidl_string> v6Gws{hidl_string("fe80::db8:1")};
@@ -526,7 +320,7 @@
 }
 
 // Test that setUpstreamParameters() fails when given unparseable IPv4 addresses.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersInvalidIPv4AddrFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersInvalidIPv4AddrFails) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Gw("192.0.2.1");
     const vector<hidl_string> v6Gws{hidl_string("fe80::db8:1")};
@@ -540,7 +334,7 @@
 }
 
 // Test that setUpstreamParameters() fails when given unparseable IPv4 gateways.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersInvalidIPv4GatewayFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersInvalidIPv4GatewayFails) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("192.0.2.2");
     const vector<hidl_string> v6Gws{hidl_string("fe80::db8:1")};
@@ -554,7 +348,7 @@
 }
 
 // Test that setUpstreamParameters() fails when given unparseable IPv6 gateways.
-TEST_P(OffloadControlHidlTest, SetUpstreamParametersBadIPv6GatewaysFail) {
+TEST_P(OffloadControlTestV1_0_HalStarted, SetUpstreamParametersBadIPv6GatewaysFail) {
     const hidl_string iface(TEST_IFACE);
     const hidl_string v4Addr("192.0.2.2");
     const hidl_string v4Gw("192.0.2.1");
@@ -572,7 +366,7 @@
  */
 
 // Test addDownstream() works given an IPv4 prefix.
-TEST_P(OffloadControlHidlTest, AddDownstreamIPv4) {
+TEST_P(OffloadControlTestV1_0_HalStarted, AddDownstreamIPv4) {
     const hidl_string iface("dummy0");
     const hidl_string prefix("192.0.2.0/24");
     const Return<void> ret = control->addDownstream(iface, prefix, ASSERT_TRUE_CALLBACK);
@@ -580,7 +374,7 @@
 }
 
 // Test addDownstream() works given an IPv6 prefix.
-TEST_P(OffloadControlHidlTest, AddDownstreamIPv6) {
+TEST_P(OffloadControlTestV1_0_HalStarted, AddDownstreamIPv6) {
     const hidl_string iface("dummy0");
     const hidl_string prefix("2001:db8::/64");
     const Return<void> ret = control->addDownstream(iface, prefix, ASSERT_TRUE_CALLBACK);
@@ -588,7 +382,7 @@
 }
 
 // Test addDownstream() fails given all empty parameters.
-TEST_P(OffloadControlHidlTest, AddDownstreamEmptyFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, AddDownstreamEmptyFails) {
     const hidl_string iface("");
     const hidl_string prefix("");
     const Return<void> ret = control->addDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
@@ -596,7 +390,7 @@
 }
 
 // Test addDownstream() fails given empty or non-existent interface names.
-TEST_P(OffloadControlHidlTest, AddDownstreamInvalidIfaceFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, AddDownstreamInvalidIfaceFails) {
     const hidl_string prefix("192.0.2.0/24");
     for (const auto& bogus : {"", "invalid"}) {
         SCOPED_TRACE(StringPrintf("iface='%s'", bogus));
@@ -607,7 +401,7 @@
 }
 
 // Test addDownstream() fails given unparseable prefix arguments.
-TEST_P(OffloadControlHidlTest, AddDownstreamBogusPrefixFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, AddDownstreamBogusPrefixFails) {
     const hidl_string iface("dummy0");
     for (const auto& bogus : {"", "192.0.2/24", "2001:db8/64"}) {
         SCOPED_TRACE(StringPrintf("prefix='%s'", bogus));
@@ -622,7 +416,7 @@
  */
 
 // Test removeDownstream() works given an IPv4 prefix.
-TEST_P(OffloadControlHidlTest, RemoveDownstreamIPv4) {
+TEST_P(OffloadControlTestV1_0_HalStarted, RemoveDownstreamIPv4) {
     const hidl_string iface("dummy0");
     const hidl_string prefix("192.0.2.0/24");
     // First add the downstream, otherwise removeDownstream logic can reasonably
@@ -634,7 +428,7 @@
 }
 
 // Test removeDownstream() works given an IPv6 prefix.
-TEST_P(OffloadControlHidlTest, RemoveDownstreamIPv6) {
+TEST_P(OffloadControlTestV1_0_HalStarted, RemoveDownstreamIPv6) {
     const hidl_string iface("dummy0");
     const hidl_string prefix("2001:db8::/64");
     // First add the downstream, otherwise removeDownstream logic can reasonably
@@ -646,7 +440,7 @@
 }
 
 // Test removeDownstream() fails given all empty parameters.
-TEST_P(OffloadControlHidlTest, RemoveDownstreamEmptyFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, RemoveDownstreamEmptyFails) {
     const hidl_string iface("");
     const hidl_string prefix("");
     const Return<void> ret = control->removeDownstream(iface, prefix, ASSERT_FALSE_CALLBACK);
@@ -654,7 +448,7 @@
 }
 
 // Test removeDownstream() fails given empty or non-existent interface names.
-TEST_P(OffloadControlHidlTest, RemoveDownstreamBogusIfaceFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, RemoveDownstreamBogusIfaceFails) {
     const hidl_string prefix("192.0.2.0/24");
     for (const auto& bogus : {"", "invalid"}) {
         SCOPED_TRACE(StringPrintf("iface='%s'", bogus));
@@ -665,7 +459,7 @@
 }
 
 // Test removeDownstream() fails given unparseable prefix arguments.
-TEST_P(OffloadControlHidlTest, RemoveDownstreamBogusPrefixFails) {
+TEST_P(OffloadControlTestV1_0_HalStarted, RemoveDownstreamBogusPrefixFails) {
     const hidl_string iface("dummy0");
     for (const auto& bogus : {"", "192.0.2/24", "2001:db8/64"}) {
         SCOPED_TRACE(StringPrintf("prefix='%s'", bogus));
@@ -677,21 +471,18 @@
 
 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(OffloadControlHidlTestBase);
 INSTANTIATE_TEST_CASE_P(
-    PerInstance, OffloadControlHidlTestBase,
-    testing::Combine(
-        testing::ValuesIn(
-            android::hardware::getAllHalInstanceNames(IOffloadConfig::descriptor)),
-        testing::ValuesIn(
-            android::hardware::getAllHalInstanceNames(IOffloadControl::descriptor))),
-    android::hardware::PrintInstanceTupleNameToString<>);
+        PerInstance, OffloadControlTestV1_0_HalNotStarted,
+        testing::Combine(testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+                                 IOffloadConfig::descriptor)),
+                         testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+                                 IOffloadControl::descriptor))),
+        android::hardware::PrintInstanceTupleNameToString<>);
 
 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(OffloadControlHidlTest);
 INSTANTIATE_TEST_CASE_P(
-    PerInstance, OffloadControlHidlTest,
-    testing::Combine(
-        testing::ValuesIn(
-            android::hardware::getAllHalInstanceNames(IOffloadConfig::descriptor)),
-        testing::ValuesIn(
-            android::hardware::getAllHalInstanceNames(IOffloadControl::descriptor))),
-    android::hardware::PrintInstanceTupleNameToString<>);
-
+        PerInstance, OffloadControlTestV1_0_HalStarted,
+        testing::Combine(testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+                                 IOffloadConfig::descriptor)),
+                         testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+                                 IOffloadControl::descriptor))),
+        android::hardware::PrintInstanceTupleNameToString<>);
diff --git a/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestBase.h b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestBase.h
new file mode 100644
index 0000000..004019a
--- /dev/null
+++ b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestBase.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <OffloadControlTestUtils.h>
+#include <VtsHalHidlTargetCallbackBase.h>
+#include <android-base/stringprintf.h>
+#include <android-base/unique_fd.h>
+#include <android/hardware/tetheroffload/config/1.0/IOffloadConfig.h>
+#include <android/hardware/tetheroffload/control/1.0/IOffloadControl.h>
+#include <android/hardware/tetheroffload/control/1.0/types.h>
+#include <gtest/gtest.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <log/log.h>
+
+using android::sp;
+using android::base::StringPrintf;
+using android::base::unique_fd;
+using android::hardware::hidl_handle;
+using android::hardware::hidl_string;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::tetheroffload::config::V1_0::IOffloadConfig;
+using android::hardware::tetheroffload::control::V1_0::IOffloadControl;
+using android::hardware::tetheroffload::control::V1_0::ITetheringOffloadCallback;
+using android::hardware::tetheroffload::control::V1_0::NatTimeoutUpdate;
+using android::hardware::tetheroffload::control::V1_0::OffloadCallbackEvent;
+
+constexpr char kCallbackOnEvent[] = "onEvent";
+constexpr char kCallbackUpdateTimeout[] = "updateTimeout";
+
+enum class ExpectBoolean {
+    Ignored = -1,
+    False = 0,
+    True = 1,
+};
+
+class TetheringOffloadCallbackArgs {
+  public:
+    OffloadCallbackEvent last_event;
+    NatTimeoutUpdate last_params;
+};
+
+class OffloadControlTestBase : public testing::TestWithParam<std::tuple<std::string, std::string>> {
+  public:
+    virtual void SetUp() = 0;
+
+    virtual void TearDown();
+
+    // Called once in setup stage to retrieve correct version of
+    // IOffloadControl object.
+    virtual sp<IOffloadControl> createControl(const std::string& serviceName) = 0;
+
+    // The IOffloadConfig HAL is tested more thoroughly elsewhere. Here the
+    // class just setup everything correctly and verify basic readiness.
+    void setupConfigHal();
+
+    void prepareControlHal();
+
+    void initOffload(const bool expected_result);
+
+    void setupControlHal();
+
+    void stopOffload(const ExpectBoolean value);
+
+    // Callback class for both events and NAT timeout updates.
+    class TetheringOffloadCallback
+        : public testing::VtsHalHidlTargetCallbackBase<TetheringOffloadCallbackArgs>,
+          public ITetheringOffloadCallback {
+      public:
+        TetheringOffloadCallback() = default;
+        virtual ~TetheringOffloadCallback() = default;
+
+        Return<void> onEvent(OffloadCallbackEvent event) override {
+            const TetheringOffloadCallbackArgs args{.last_event = event};
+            NotifyFromCallback(kCallbackOnEvent, args);
+            return Void();
+        };
+
+        Return<void> updateTimeout(const NatTimeoutUpdate& params) override {
+            const TetheringOffloadCallbackArgs args{.last_params = params};
+            NotifyFromCallback(kCallbackUpdateTimeout, args);
+            return Void();
+        };
+    };
+
+    sp<IOffloadConfig> config;
+    sp<IOffloadControl> control;
+    sp<TetheringOffloadCallback> control_cb;
+};
\ No newline at end of file
diff --git a/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestUtils.h b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestUtils.h
new file mode 100644
index 0000000..f9e5783
--- /dev/null
+++ b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestUtils.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <linux/netlink.h>
+#include <sys/socket.h>
+
+// We use #defines here so as to get local lamba captures and error message line numbers
+#define ASSERT_TRUE_CALLBACK                                    \
+    [&](bool success, std::string errMsg) {                     \
+        ASSERT_TRUE(success) << "unexpected error: " << errMsg; \
+    }
+
+#define ASSERT_FALSE_CALLBACK \
+    [&](bool success, std::string errMsg) { ASSERT_FALSE(success) << "expected error: " << errMsg; }
+
+#define ASSERT_ZERO_BYTES_CALLBACK            \
+    [&](uint64_t rxBytes, uint64_t txBytes) { \
+        EXPECT_EQ(0ULL, rxBytes);             \
+        EXPECT_EQ(0ULL, txBytes);             \
+    }
+
+inline const sockaddr* asSockaddr(const sockaddr_nl* nladdr);
+
+int conntrackSocket(unsigned groups);
\ No newline at end of file
diff --git a/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestV1_0.h b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestV1_0.h
new file mode 100644
index 0000000..7492f8a
--- /dev/null
+++ b/tetheroffload/control/1.0/vts/functional/include/OffloadControlTestV1_0.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <OffloadControlTestBase.h>
+
+class OffloadControlTestV1_0_HalNotStarted : public OffloadControlTestBase {
+  public:
+    virtual void SetUp() override {
+        setupConfigHal();
+        // Create tether offload control object without calling its initOffload.
+        prepareControlHal();
+    }
+
+    virtual sp<IOffloadControl> createControl(const std::string& serviceName) override {
+        return IOffloadControl::getService(serviceName);
+    }
+};
+
+class OffloadControlTestV1_0_HalStarted : public OffloadControlTestV1_0_HalNotStarted {
+  public:
+    virtual void SetUp() override {
+        setupConfigHal();
+        setupControlHal();
+    }
+};
diff --git a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test.cpp b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test.cpp
index 76d12d7..6ad4290 100644
--- a/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test.cpp
+++ b/wifi/supplicant/1.1/vts/functional/supplicant_hidl_test.cpp
@@ -75,7 +75,7 @@
  * AddP2pInterface
  */
 TEST_P(SupplicantHidlTest, AddP2pInterface) {
-    if (isP2pOn_) return;
+    if (!isP2pOn_) return;
     ISupplicant::IfaceInfo iface_info;
     iface_info.name = getP2pIfaceName();
     iface_info.type = IfaceType::P2P;
@@ -115,7 +115,7 @@
  * RemoveP2pInterface
  */
 TEST_P(SupplicantHidlTest, RemoveP2pInterface) {
-    if (isP2pOn_) return;
+    if (!isP2pOn_) return;
     ISupplicant::IfaceInfo iface_info;
     iface_info.name = getP2pIfaceName();
     iface_info.type = IfaceType::P2P;