Cleanup NN callback error handling

This CL introduces a new templated class CallbackValue to handle HIDL
"return value" callbacks in a terser and more readable way.

This CL also introduces a new macro HANDLE_HAL_STATUS to return from the
current function when an error is present with the ability to append a
more descriptive error message.

Finally, this CL changes the behavior of synchronous executions. Prior
to this CL, IPreparedModel fell back to an asynchronous execution if the
synchronous execution was allowed and failed. This change instead
returns a failure if synchronous execution is allowed and fails.

Bug: 173084343
Test: mma
Change-Id: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
Merged-In: I62714a932e71dfc77401bbcb9eaaaf3d94fb9707
(cherry picked from commit 98ed9baf5de85599847b2b2f53585243c3b7b776)
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
index 1162bc3..ba3c1ba 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
@@ -36,6 +36,19 @@
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
+// Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this
+// function returns with a non-null nn::SharedPreparedModel with a feature level of
+// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel);
+
+// Converts the results of IDevice::execute* to the NN canonical format. On success, this function
+// returns with the output shapes and the timing information. On failure, this function returns with
+// the appropriate nn::ExecutionError.
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing);
+
+// A HIDL callback class to receive the results of IDevice::prepareModel* asynchronously.
 class PreparedModelCallback final : public IPreparedModelCallback,
                                     public hal::utils::IProtectedCallback {
   public:
@@ -51,11 +64,10 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
+// A HIDL callback class to receive the results of IDevice::execute_1_2 asynchronously.
 class ExecutionCallback final : public IExecutionCallback, public hal::utils::IProtectedCallback {
   public:
     using Data = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
@@ -69,8 +81,6 @@
     Data get();
 
   private:
-    void notifyInternal(Data result);
-
     hal::utils::TransferValue<Data> mData;
 };
 
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index 5dcbc0b..6fd1337 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -97,6 +97,12 @@
 nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
 nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus);
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request);
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status);
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference);
+
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_CONVERSIONS_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index 79c3b04..b4bef5e 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -37,10 +37,21 @@
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
-nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device);
-nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device);
-nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device);
-nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+// Retrieves the version string from the provided device object. On failure, this function returns
+// with the appropriate nn::GeneralError.
+nn::GeneralResult<std::string> getVersionStringFrom(V1_2::IDevice* device);
+
+// Retrieves the device type from the provided device object. On failure, this function returns with
+// the appropriate nn::GeneralError.
+nn::GeneralResult<nn::DeviceType> getDeviceTypeFrom(V1_2::IDevice* device);
+
+// Retrieves the extensions supported by the provided device object. On failure, this function
+// returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::vector<nn::Extension>> getSupportedExtensionsFrom(V1_2::IDevice* device);
+
+// Retrieves the number of model cache files and data cache files needed by the provided device
+// object. On failure, this function returns with the appropriate nn::GeneralError.
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> getNumberOfCacheFilesNeededFrom(
         V1_2::IDevice* device);
 
 // Class that adapts V1_2::IDevice to nn::IDevice.
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 8ed5ca7..6a56a82 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -41,10 +41,10 @@
 
   public:
     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
-            sp<V1_2::IPreparedModel> preparedModel);
+            sp<V1_2::IPreparedModel> preparedModel, bool executeSynchronously);
 
-    PreparedModel(PrivateConstructorTag tag, sp<V1_2::IPreparedModel> preparedModel,
-                  hal::utils::DeathHandler deathHandler);
+    PreparedModel(PrivateConstructorTag tag, bool executeSynchronously,
+                  sp<V1_2::IPreparedModel> preparedModel, hal::utils::DeathHandler deathHandler);
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
@@ -65,6 +65,7 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeAsynchronously(
             const V1_0::Request& request, MeasureTiming measure) const;
 
+    const bool kExecuteSynchronously;
     const sp<V1_2::IPreparedModel> kPreparedModel;
     const hal::utils::DeathHandler kDeathHandler;
 };
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 70149a2..c289fc8 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -30,6 +30,8 @@
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
+using CacheToken = hidl_array<uint8_t, static_cast<size_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
 constexpr auto kDefaultMesaureTiming = MeasureTiming::NO;
 constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits<uint64_t>::max(),
                                   .timeInDriver = std::numeric_limits<uint64_t>::max()};
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
index ab3e0ca..fefa122 100644
--- a/neuralnetworks/1.2/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -27,6 +27,7 @@
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Callbacks.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.0/PreparedModel.h>
 #include <nnapi/hal/CommonUtils.h>
@@ -42,104 +43,73 @@
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<V1_0::IPreparedModel>& preparedModel) {
-    return NN_TRY(V1_0::utils::PreparedModel::create(preparedModel));
-}
-
-nn::GeneralResult<nn::SharedPreparedModel> convertPreparedModel(
-        const sp<IPreparedModel>& preparedModel) {
-    return NN_TRY(utils::PreparedModel::create(preparedModel));
-}
-
 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
                                      const Timing& timing) {
     return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
 }
 
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionGeneralResults(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+}  // namespace
+
+nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
+        V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
+        V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+    if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        auto canonicalOutputShapes =
+                nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
+        return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+               << "execution failed with " << toString(status);
+    }
+    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     return hal::utils::makeExecutionFailure(
             convertExecutionGeneralResultsHelper(outputShapes, timing));
 }
 
-}  // namespace
-
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
                                            const sp<V1_0::IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(V1_0::utils::prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
                                                const sp<IPreparedModel>& preparedModel) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "preparedModel failed with " << toString(status));
-    } else if (preparedModel == nullptr) {
-        notifyInternal(NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                       << "Returned preparedModel is nullptr");
-    } else {
-        notifyInternal(convertPreparedModel(preparedModel));
-    }
+    mData.put(prepareModelCallback(status, preparedModel));
     return Void();
 }
 
 void PreparedModelCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 PreparedModelCallback::Data PreparedModelCallback::get() {
     return mData.take();
 }
 
-void PreparedModelCallback::notifyInternal(PreparedModelCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 // ExecutionCallback methods begin here
 
 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal({});
-    }
+    mData.put(V1_0::utils::executionCallback(status));
     return Void();
 }
 
 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
                                            const hidl_vec<OutputShape>& outputShapes,
                                            const Timing& timing) {
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        notifyInternal(NN_ERROR(canonical) << "execute failed with " << toString(status));
-    } else {
-        notifyInternal(convertExecutionGeneralResults(outputShapes, timing));
-    }
+    mData.put(executionCallback(status, outputShapes, timing));
     return Void();
 }
 
 void ExecutionCallback::notifyAsDeadObject() {
-    notifyInternal(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
+    mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
 }
 
 ExecutionCallback::Data ExecutionCallback::get() {
     return mData.take();
 }
 
-void ExecutionCallback::notifyInternal(ExecutionCallback::Data result) {
-    mData.put(std::move(result));
-}
-
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 3790d1f..062f6f7 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -26,6 +26,7 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
 
@@ -622,4 +623,21 @@
     return validatedConvert(outputShapes);
 }
 
+nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
+    return V1_1::utils::convert(deviceStatus);
+}
+
+nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) {
+    return V1_1::utils::convert(request);
+}
+
+nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status) {
+    return V1_1::utils::convert(status);
+}
+
+nn::GeneralResult<V1_1::ExecutionPreference> convert(
+        const nn::ExecutionPreference& executionPreference) {
+    return V1_1::utils::convert(executionPreference);
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index 6cca841..9fe0de2 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -47,109 +47,102 @@
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-nn::GeneralResult<nn::Capabilities> initCapabilities(V1_2::IDevice* device) {
+nn::GeneralResult<nn::Capabilities> capabilitiesCallback(V1_0::ErrorStatus status,
+                                                         const Capabilities& capabilities) {
+    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    return nn::convert(capabilities);
+}
+
+nn::GeneralResult<std::string> versionStringCallback(V1_0::ErrorStatus status,
+                                                     const hidl_string& versionString) {
+    HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status);
+    return versionString;
+}
+
+nn::GeneralResult<nn::DeviceType> deviceTypeCallback(V1_0::ErrorStatus status,
+                                                     DeviceType deviceType) {
+    HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status);
+    return nn::convert(deviceType);
+}
+
+nn::GeneralResult<std::vector<nn::Extension>> supportedExtensionsCallback(
+        V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
+    HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status);
+    return nn::convert(extensions);
+}
+
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> numberOfCacheFilesNeededCallback(
+        V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+    HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status);
+    if (numModelCache > nn::kMaxNumberOfCacheFiles) {
+        return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater "
+                             "than allowed max ("
+                          << numModelCache << " vs " << nn::kMaxNumberOfCacheFiles << ")";
+    }
+    if (numDataCache > nn::kMaxNumberOfCacheFiles) {
+        return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numDataCache files greater "
+                             "than allowed max ("
+                          << numDataCache << " vs " << nn::kMaxNumberOfCacheFiles << ")";
+    }
+    return std::make_pair(numModelCache, numDataCache);
+}
+
+nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::Capabilities> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                 << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const Capabilities& capabilities) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getCapabilities_1_2 failed with " << toString(status);
-        } else {
-            result = nn::convert(capabilities);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(capabilitiesCallback);
 
     const auto ret = device->getCapabilities_1_2(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 }  // namespace
 
-nn::GeneralResult<std::string> initVersionString(V1_2::IDevice* device) {
+nn::GeneralResult<std::string> getVersionStringFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::string> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                            << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_string& versionString) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getVersionString failed with " << toString(status);
-        } else {
-            result = versionString;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(versionStringCallback);
 
     const auto ret = device->getVersionString(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<nn::DeviceType> initDeviceType(V1_2::IDevice* device) {
+nn::GeneralResult<nn::DeviceType> getDeviceTypeFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<nn::DeviceType> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                               << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, DeviceType deviceType) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getDeviceType failed with " << toString(status);
-        } else {
-            result = nn::convert(deviceType);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(deviceTypeCallback);
 
     const auto ret = device->getType(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<std::vector<nn::Extension>> initExtensions(V1_2::IDevice* device) {
+nn::GeneralResult<std::vector<nn::Extension>> getSupportedExtensionsFrom(V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::vector<nn::Extension>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "getExtensions failed with " << toString(status);
-        } else {
-            result = nn::convert(extensions);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(supportedExtensionsCallback);
 
     const auto ret = device->getSupportedExtensions(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
-nn::GeneralResult<std::pair<uint32_t, uint32_t>> initNumberOfCacheFilesNeeded(
+nn::GeneralResult<std::pair<uint32_t, uint32_t>> getNumberOfCacheFilesNeededFrom(
         V1_2::IDevice* device) {
     CHECK(device != nullptr);
 
-    nn::GeneralResult<std::pair<uint32_t, uint32_t>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, uint32_t numModelCache,
-                              uint32_t numDataCache) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getNumberOfCacheFilesNeeded failed with " << toString(status);
-        } else {
-            result = std::make_pair(numModelCache, numDataCache);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(numberOfCacheFilesNeededCallback);
 
     const auto ret = device->getNumberOfCacheFilesNeeded(cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<std::shared_ptr<const Device>> Device::create(std::string name,
@@ -163,11 +156,11 @@
                << "V1_2::utils::Device::create must have non-null device";
     }
 
-    auto versionString = NN_TRY(initVersionString(device.get()));
-    const auto deviceType = NN_TRY(initDeviceType(device.get()));
-    auto extensions = NN_TRY(initExtensions(device.get()));
-    auto capabilities = NN_TRY(initCapabilities(device.get()));
-    const auto numberOfCacheFilesNeeded = NN_TRY(initNumberOfCacheFilesNeeded(device.get()));
+    auto versionString = NN_TRY(getVersionStringFrom(device.get()));
+    const auto deviceType = NN_TRY(getDeviceTypeFrom(device.get()));
+    auto extensions = NN_TRY(getSupportedExtensionsFrom(device.get()));
+    auto capabilities = NN_TRY(getCapabilitiesFrom(device.get()));
+    const auto numberOfCacheFilesNeeded = NN_TRY(getNumberOfCacheFilesNeededFrom(device.get()));
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(device));
     return std::make_shared<const Device>(
@@ -232,28 +225,12 @@
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
 
-    nn::GeneralResult<std::vector<bool>> result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                                                  << "uninitialized";
-    auto cb = [&result, &model](V1_0::ErrorStatus status,
-                                const hidl_vec<bool>& supportedOperations) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical)
-                     << "getSupportedOperations_1_2 failed with " << toString(status);
-        } else if (supportedOperations.size() != model.main.operations.size()) {
-            result = NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                     << "getSupportedOperations_1_2 returned vector of size "
-                     << supportedOperations.size() << " but expected "
-                     << model.main.operations.size();
-        } else {
-            result = supportedOperations;
-        }
-    };
+    auto cb = hal::utils::CallbackValue(V1_0::utils::supportedOperationsCallback);
 
     const auto ret = kDevice->getSupportedOperations_1_2(hidlModel, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
@@ -266,10 +243,10 @@
             NN_TRY(hal::utils::flushDataFromPointerToShared(&model, &maybeModelInShared));
 
     const auto hidlModel = NN_TRY(convert(modelInShared));
-    const auto hidlPreference = NN_TRY(V1_1::utils::convert(preference));
+    const auto hidlPreference = NN_TRY(convert(preference));
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlToken = CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
@@ -277,10 +254,7 @@
     const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
                                                hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModel_1_2 failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
@@ -290,17 +264,14 @@
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
-    const auto hidlToken = token;
+    const auto hidlToken = CacheToken{token};
 
     const auto cb = sp<PreparedModelCallback>::make();
     const auto scoped = kDeathHandler.protectCallback(cb.get());
 
     const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "prepareModelFromCache failed with " << toString(status);
-    }
+    HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index b422ced..6d00082 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -41,54 +41,33 @@
 // lifetimes across processes and for protecting asynchronous calls across HIDL.
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
-namespace {
-
-nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-convertExecutionResultsHelper(const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
-    return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
-}
-
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
-        const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
-    return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
-}
-
-}  // namespace
 
 nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
-        sp<V1_2::IPreparedModel> preparedModel) {
+        sp<V1_2::IPreparedModel> preparedModel, bool executeSynchronously) {
     if (preparedModel == nullptr) {
-        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
-               << "V1_2::utils::PreparedModel::create must have non-null preparedModel";
+        return NN_ERROR() << "V1_2::utils::PreparedModel::create must have non-null preparedModel";
     }
 
     auto deathHandler = NN_TRY(hal::utils::DeathHandler::create(preparedModel));
-    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
-                                                 std::move(deathHandler));
+    return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, executeSynchronously,
+                                                 std::move(preparedModel), std::move(deathHandler));
 }
 
-PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, sp<V1_2::IPreparedModel> preparedModel,
+PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, bool executeSynchronously,
+                             sp<V1_2::IPreparedModel> preparedModel,
                              hal::utils::DeathHandler deathHandler)
-    : kPreparedModel(std::move(preparedModel)), kDeathHandler(std::move(deathHandler)) {}
+    : kExecuteSynchronously(executeSynchronously),
+      kPreparedModel(std::move(preparedModel)),
+      kDeathHandler(std::move(deathHandler)) {}
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 PreparedModel::executeSynchronously(const V1_0::Request& request, MeasureTiming measure) const {
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const auto cb = [&result](V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
-                              const Timing& timing) {
-        if (status != V1_0::ErrorStatus::NONE) {
-            const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-            result = NN_ERROR(canonical) << "executeSynchronously failed with " << toString(status);
-        } else {
-            result = convertExecutionResults(outputShapes, timing);
-        }
-    };
+    auto cb = hal::utils::CallbackValue(executionCallback);
 
     const auto ret = kPreparedModel->executeSynchronously(request, measure, cb);
     HANDLE_TRANSPORT_FAILURE(ret);
 
-    return result;
+    return cb.take();
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
@@ -98,9 +77,8 @@
 
     const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    if (status != V1_0::ErrorStatus::NONE) {
-        const auto canonical = nn::convert(status).value_or(nn::ErrorStatus::GENERAL_FAILURE);
-        return NN_ERROR(canonical) << "execute failed with " << toString(status);
+    if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
     }
 
     return cb->get();
@@ -115,31 +93,17 @@
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
             hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
 
-    const auto hidlRequest =
-            NN_TRY(hal::utils::makeExecutionFailure(V1_0::utils::convert(requestInShared)));
+    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
 
-    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> result =
-            NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "uninitialized";
-    const bool preferSynchronous = true;
+    auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure)
+                                        : executeAsynchronously(hidlRequest, hidlMeasure);
+    auto [outputShapes, timing] = NN_TRY(std::move(result));
 
-    // Execute synchronously if allowed.
-    if (preferSynchronous) {
-        result = executeSynchronously(hidlRequest, hidlMeasure);
-    }
+    NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
 
-    // Run asymchronous execution if execution has not already completed.
-    if (!result.has_value()) {
-        result = executeAsynchronously(hidlRequest, hidlMeasure);
-    }
-
-    // Flush output buffers if suxcessful execution.
-    if (result.has_value()) {
-        NN_TRY(hal::utils::makeExecutionFailure(
-                hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-    }
-
-    return result;
+    return std::make_pair(std::move(outputShapes), timing);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -154,7 +118,7 @@
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
-    sp<V1_0::IPreparedModel> resource = kPreparedModel;
+    sp<V1_2::IPreparedModel> resource = kPreparedModel;
     return resource;
 }