Allow implicit conversions for NN errors -- hal

This change allows GeneralErrors to be created from a string and allows
ExecutionErrors to be created from a string or a GeneralError.
This makes error handling more terse, removing the need for helper
functions such as makeGeneralFailure or makeExecutionFailure.

Bug: N/A
Test: mma
Change-Id: I8c5e80a2eb4f399fad64aab763fe6fa08cf8d1db
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 09691b6..5c3b8a7 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -61,8 +61,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
index 9f54bb1..01b5e12 100644
--- a/neuralnetworks/1.2/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -75,8 +75,7 @@
                << "execution failed with " << toString(status);
     }
     HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
-    return hal::utils::makeExecutionFailure(
-            convertExecutionGeneralResultsHelper(outputShapes, timing));
+    return convertExecutionGeneralResultsHelper(outputShapes, timing);
 }
 
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 29945b7..6a80b42 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -120,9 +120,8 @@
             NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
     auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
 
-    auto table = NN_TRY(hal::utils::makeGeneralFailure(
-            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
-            nn::ErrorStatus::GENERAL_FAILURE));
+    auto table =
+            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index b4b6f68..60dfe52 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -317,8 +317,7 @@
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
-    if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
-        version > nn::Version::ANDROID_Q) {
+    if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
         // fallback to another execution path if the packet could not be sent
         return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
     }
@@ -326,17 +325,15 @@
     // ensure that request is ready for IPC
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
             .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
-    auto hidlRequest = NN_TRY(
-            hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
-    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+    auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
+    const auto hidlMeasure = NN_TRY(convert(measure));
 
     std::vector<int32_t> slots;
     std::vector<OptionalCacheHold> holds;
@@ -364,8 +361,7 @@
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
-    if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
-        version > nn::Version::ANDROID_Q) {
+    if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
         // fallback to another execution path if the packet could not be sent
         return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
     }
@@ -427,8 +423,7 @@
     }
 
     // get result packet
-    const auto [status, outputShapes, timing] =
-            NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
+    const auto [status, outputShapes, timing] = NN_TRY(mResultChannelReceiver->getBlocking());
 
     if (relocation.output) {
         relocation.output->flush();
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
index c67159e..65ec7f5 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
@@ -45,8 +45,6 @@
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-using neuralnetworks::utils::makeExecutionFailure;
-
 constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
                                     std::numeric_limits<uint64_t>::max()};
 
@@ -241,28 +239,25 @@
                  "ExecutionBurstServer getting memory, executing, and returning results");
 
     // ensure executor with cache has required memory
-    const auto cacheEntries =
-            NN_TRY(makeExecutionFailure(mMemoryCache.getCacheEntries(slotsOfPools)));
+    const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
 
     // convert request, populating its pools
     // This code performs an unvalidated convert because the request object without its pools is
     // invalid because it is incomplete. Instead, the validation is performed after the memory pools
     // have been added to the request.
-    auto canonicalRequest =
-            NN_TRY(makeExecutionFailure(nn::unvalidatedConvert(requestWithoutPools)));
+    auto canonicalRequest = NN_TRY(nn::unvalidatedConvert(requestWithoutPools));
     CHECK(canonicalRequest.pools.empty());
     std::transform(cacheEntries.begin(), cacheEntries.end(),
                    std::back_inserter(canonicalRequest.pools),
                    [](const auto& cacheEntry) { return cacheEntry.first; });
-    NN_TRY(makeExecutionFailure(validate(canonicalRequest)));
+    NN_TRY(validate(canonicalRequest));
 
-    nn::MeasureTiming canonicalMeasure = NN_TRY(makeExecutionFailure(nn::convert(measure)));
+    nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
 
     const auto [outputShapes, timing] =
             NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
 
-    return std::make_pair(NN_TRY(makeExecutionFailure(convert(outputShapes))),
-                          NN_TRY(makeExecutionFailure(convert(timing))));
+    return std::make_pair(NN_TRY(convert(outputShapes)), NN_TRY(convert(timing)));
 }
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index d0ef36e..c261184 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -95,13 +95,12 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+    const auto hidlRequest = NN_TRY(convert(requestInShared));
+    const auto hidlMeasure = NN_TRY(convert(measure));
 
     return executeInternal(hidlRequest, hidlMeasure, relocation);
 }