Implement NNAPI canonical interfaces

This CL implements the canonical IDevice, IPreparedModel, and IBuffer
interfaces for the 1.0, 1.1, 1.2, and 1.3 NN HIDL HAL interfaces.
Further, it introduces "Resilient" adapter interfaces to automatically
retrieve a handle to a recovered interface object after it has died and
rebooted.

This CL also updates the conversion code from returning nn::Result to
nn::GeneralResult, which includes a ErrorStatus code in the case of an
error.

Finally, this CL introduces a new static library
neuralnetworks_utils_hal_service which consists of a single function
::android::nn::hal::getDevices which can be used by the NNAPI runtime to
retrieve the HIDL services without knowing the underlying HIDL types.

Bug: 160668438
Test: mma
Test: NeuralNetworksTest_static
Change-Id: Iec6ae739df196b4034ffb35ea76781fd541ffec3
Merged-In: Iec6ae739df196b4034ffb35ea76781fd541ffec3
(cherry picked from commit 3670c385c4b12aef975ab67e5d2b0f5fe79134c2)
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 4c54e3b..0dc0785 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -27,6 +27,7 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <chrono>
@@ -79,7 +80,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convertVec(const hidl_vec<Type>& arguments) {
     std::vector<ConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
@@ -89,25 +90,25 @@
 }
 
 template <typename Type>
-Result<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
+GeneralResult<std::vector<ConvertOutput<Type>>> convert(const hidl_vec<Type>& arguments) {
     return convertVec(arguments);
 }
 
 }  // anonymous namespace
 
-Result<OperandType> convert(const hal::V1_3::OperandType& operandType) {
+GeneralResult<OperandType> convert(const hal::V1_3::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-Result<OperationType> convert(const hal::V1_3::OperationType& operationType) {
+GeneralResult<OperationType> convert(const hal::V1_3::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-Result<Priority> convert(const hal::V1_3::Priority& priority) {
+GeneralResult<Priority> convert(const hal::V1_3::Priority& priority) {
     return static_cast<Priority>(priority);
 }
 
-Result<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
+GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) {
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
@@ -115,13 +116,14 @@
                 return !maybeType.has_value() ? false : validOperandType(maybeType.value());
             });
     if (!validOperandTypes) {
-        return NN_ERROR()
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                << "Invalid OperandType when converting OperandPerformance in Capabilities";
     }
 
     auto operandPerformance = NN_TRY(convert(capabilities.operandPerformance));
-    auto table =
-            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+    auto table = NN_TRY(hal::utils::makeGeneralFailure(
+            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
+            nn::ErrorStatus::GENERAL_FAILURE));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar =
@@ -134,7 +136,7 @@
     };
 }
 
-Result<Capabilities::OperandPerformance> convert(
+GeneralResult<Capabilities::OperandPerformance> convert(
         const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -142,7 +144,7 @@
     };
 }
 
-Result<Operation> convert(const hal::V1_3::Operation& operation) {
+GeneralResult<Operation> convert(const hal::V1_3::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -150,11 +152,11 @@
     };
 }
 
-Result<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
+GeneralResult<Operand::LifeTime> convert(const hal::V1_3::OperandLifeTime& operandLifeTime) {
     return static_cast<Operand::LifeTime>(operandLifeTime);
 }
 
-Result<Operand> convert(const hal::V1_3::Operand& operand) {
+GeneralResult<Operand> convert(const hal::V1_3::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -166,7 +168,7 @@
     };
 }
 
-Result<Model> convert(const hal::V1_3::Model& model) {
+GeneralResult<Model> convert(const hal::V1_3::Model& model) {
     return Model{
             .main = NN_TRY(convert(model.main)),
             .referenced = NN_TRY(convert(model.referenced)),
@@ -177,7 +179,7 @@
     };
 }
 
-Result<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
+GeneralResult<Model::Subgraph> convert(const hal::V1_3::Subgraph& subgraph) {
     auto operations = NN_TRY(convert(subgraph.operations));
 
     // Verify number of consumers.
@@ -186,9 +188,10 @@
     CHECK(subgraph.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < subgraph.operands.size(); ++i) {
         if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) {
-            return NN_ERROR() << "Invalid numberOfConsumers for operand " << i << ", expected "
-                              << numberOfConsumers[i] << " but found "
-                              << subgraph.operands[i].numberOfConsumers;
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Invalid numberOfConsumers for operand " << i << ", expected "
+                   << numberOfConsumers[i] << " but found "
+                   << subgraph.operands[i].numberOfConsumers;
         }
     }
 
@@ -200,11 +203,11 @@
     };
 }
 
-Result<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
+GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) {
     return BufferDesc{.dimensions = bufferDesc.dimensions};
 }
 
-Result<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
+GeneralResult<BufferRole> convert(const hal::V1_3::BufferRole& bufferRole) {
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
@@ -212,7 +215,7 @@
     };
 }
 
-Result<Request> convert(const hal::V1_3::Request& request) {
+GeneralResult<Request> convert(const hal::V1_3::Request& request) {
     return Request{
             .inputs = NN_TRY(convert(request.inputs)),
             .outputs = NN_TRY(convert(request.outputs)),
@@ -220,7 +223,7 @@
     };
 }
 
-Result<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
+GeneralResult<Request::MemoryPool> convert(const hal::V1_3::Request::MemoryPool& memoryPool) {
     using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
     switch (memoryPool.getDiscriminator()) {
         case Discriminator::hidlMemory:
@@ -228,15 +231,16 @@
         case Discriminator::token:
             return static_cast<Request::MemoryDomainToken>(memoryPool.token());
     }
-    return NN_ERROR() << "Invalid Request::MemoryPool discriminator "
-                      << underlyingType(memoryPool.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid Request::MemoryPool discriminator "
+           << underlyingType(memoryPool.getDiscriminator());
 }
 
-Result<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
+GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
     constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count();
-    const auto makeTimePoint = [](uint64_t count) -> Result<OptionalTimePoint> {
+    const auto makeTimePoint = [](uint64_t count) -> GeneralResult<OptionalTimePoint> {
         if (count > kTimePointMaxCount) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimePoint because the count exceeds the max";
         }
         const auto nanoseconds = std::chrono::nanoseconds{count};
@@ -250,16 +254,17 @@
         case Discriminator::nanosecondsSinceEpoch:
             return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch());
     }
-    return NN_ERROR() << "Invalid OptionalTimePoint discriminator "
-                      << underlyingType(optionalTimePoint.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid OptionalTimePoint discriminator "
+           << underlyingType(optionalTimePoint.getDiscriminator());
 }
 
-Result<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalTimeoutDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
     constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count();
-    const auto makeTimeoutDuration = [](uint64_t count) -> Result<OptionalTimeoutDuration> {
+    const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult<OptionalTimeoutDuration> {
         if (count > kTimeoutDurationMaxCount) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimeoutDuration because the count exceeds the max";
         }
         return TimeoutDuration{count};
@@ -272,11 +277,12 @@
         case Discriminator::nanoseconds:
             return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds());
     }
-    return NN_ERROR() << "Invalid OptionalTimeoutDuration discriminator "
-                      << underlyingType(optionalTimeoutDuration.getDiscriminator());
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid OptionalTimeoutDuration discriminator "
+           << underlyingType(optionalTimeoutDuration.getDiscriminator());
 }
 
-Result<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
+GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& status) {
     switch (status) {
         case hal::V1_3::ErrorStatus::NONE:
         case hal::V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
@@ -289,10 +295,11 @@
         case hal::V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
             return static_cast<ErrorStatus>(status);
     }
-    return NN_ERROR() << "Invalid ErrorStatus " << underlyingType(status);
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+           << "Invalid ErrorStatus " << underlyingType(status);
 }
 
-Result<std::vector<BufferRole>> convert(
+GeneralResult<std::vector<BufferRole>> convert(
         const hardware::hidl_vec<hal::V1_3::BufferRole>& bufferRoles) {
     return convertVec(bufferRoles);
 }
@@ -304,32 +311,32 @@
 
 using utils::convert;
 
-nn::Result<V1_0::PerformanceInfo> convert(
+nn::GeneralResult<V1_0::PerformanceInfo> convert(
         const nn::Capabilities::PerformanceInfo& performanceInfo) {
     return V1_0::utils::convert(performanceInfo);
 }
 
-nn::Result<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
+nn::GeneralResult<V1_0::DataLocation> convert(const nn::DataLocation& dataLocation) {
     return V1_0::utils::convert(dataLocation);
 }
 
-nn::Result<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
+nn::GeneralResult<hidl_vec<uint8_t>> convert(const nn::Model::OperandValues& operandValues) {
     return V1_0::utils::convert(operandValues);
 }
 
-nn::Result<hidl_memory> convert(const nn::Memory& memory) {
+nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
     return V1_0::utils::convert(memory);
 }
 
-nn::Result<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
+nn::GeneralResult<V1_0::RequestArgument> convert(const nn::Request::Argument& argument) {
     return V1_0::utils::convert(argument);
 }
 
-nn::Result<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
+nn::GeneralResult<V1_2::Operand::ExtraParams> convert(const nn::Operand::ExtraParams& extraParams) {
     return V1_2::utils::convert(extraParams);
 }
 
-nn::Result<V1_2::Model::ExtensionNameAndPrefix> convert(
+nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> convert(
         const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return V1_2::utils::convert(extensionNameAndPrefix);
 }
@@ -338,7 +345,7 @@
 using ConvertOutput = std::decay_t<decltype(convert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convertVec(const std::vector<Type>& arguments) {
     hidl_vec<ConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(convert(arguments[i]));
@@ -347,42 +354,41 @@
 }
 
 template <typename Type>
-nn::Result<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
+nn::GeneralResult<hidl_vec<ConvertOutput<Type>>> convert(const std::vector<Type>& arguments) {
     return convertVec(arguments);
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Memory& memory) {
     Request::MemoryPool ret;
     ret.hidlMemory(NN_TRY(convert(memory)));
     return ret;
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::Request::MemoryDomainToken& token) {
     Request::MemoryPool ret;
     ret.token(underlyingType(token));
     return ret;
 }
 
-nn::Result<Request::MemoryPool> makeMemoryPool(
-        const std::shared_ptr<const nn::IBuffer>& /*buffer*/) {
-    return NN_ERROR() << "Unable to make memory pool from IBuffer";
+nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::SharedBuffer& /*buffer*/) {
+    return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Unable to make memory pool from IBuffer";
 }
 
 }  // anonymous namespace
 
-nn::Result<OperandType> convert(const nn::OperandType& operandType) {
+nn::GeneralResult<OperandType> convert(const nn::OperandType& operandType) {
     return static_cast<OperandType>(operandType);
 }
 
-nn::Result<OperationType> convert(const nn::OperationType& operationType) {
+nn::GeneralResult<OperationType> convert(const nn::OperationType& operationType) {
     return static_cast<OperationType>(operationType);
 }
 
-nn::Result<Priority> convert(const nn::Priority& priority) {
+nn::GeneralResult<Priority> convert(const nn::Priority& priority) {
     return static_cast<Priority>(priority);
 }
 
-nn::Result<Capabilities> convert(const nn::Capabilities& capabilities) {
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
     std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
     operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
     std::copy_if(capabilities.operandPerformance.asVector().begin(),
@@ -403,7 +409,7 @@
     };
 }
 
-nn::Result<Capabilities::OperandPerformance> convert(
+nn::GeneralResult<Capabilities::OperandPerformance> convert(
         const nn::Capabilities::OperandPerformance& operandPerformance) {
     return Capabilities::OperandPerformance{
             .type = NN_TRY(convert(operandPerformance.type)),
@@ -411,7 +417,7 @@
     };
 }
 
-nn::Result<Operation> convert(const nn::Operation& operation) {
+nn::GeneralResult<Operation> convert(const nn::Operation& operation) {
     return Operation{
             .type = NN_TRY(convert(operation.type)),
             .inputs = operation.inputs,
@@ -419,14 +425,15 @@
     };
 }
 
-nn::Result<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
+nn::GeneralResult<OperandLifeTime> convert(const nn::Operand::LifeTime& operandLifeTime) {
     if (operandLifeTime == nn::Operand::LifeTime::POINTER) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
     return static_cast<OperandLifeTime>(operandLifeTime);
 }
 
-nn::Result<Operand> convert(const nn::Operand& operand) {
+nn::GeneralResult<Operand> convert(const nn::Operand& operand) {
     return Operand{
             .type = NN_TRY(convert(operand.type)),
             .dimensions = operand.dimensions,
@@ -439,9 +446,10 @@
     };
 }
 
-nn::Result<Model> convert(const nn::Model& model) {
+nn::GeneralResult<Model> convert(const nn::Model& model) {
     if (!hal::utils::hasNoPointerData(model)) {
-        return NN_ERROR() << "Model cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Model cannot be converted because it contains pointer-based memory";
     }
 
     return Model{
@@ -454,7 +462,7 @@
     };
 }
 
-nn::Result<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
+nn::GeneralResult<Subgraph> convert(const nn::Model::Subgraph& subgraph) {
     auto operands = NN_TRY(convert(subgraph.operands));
 
     // Update number of consumers.
@@ -473,11 +481,11 @@
     };
 }
 
-nn::Result<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
+nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
     return BufferDesc{.dimensions = bufferDesc.dimensions};
 }
 
-nn::Result<BufferRole> convert(const nn::BufferRole& bufferRole) {
+nn::GeneralResult<BufferRole> convert(const nn::BufferRole& bufferRole) {
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
@@ -485,9 +493,10 @@
     };
 }
 
-nn::Result<Request> convert(const nn::Request& request) {
+nn::GeneralResult<Request> convert(const nn::Request& request) {
     if (!hal::utils::hasNoPointerData(request)) {
-        return NN_ERROR() << "Request cannot be converted because it contains pointer-based memory";
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "Request cannot be converted because it contains pointer-based memory";
     }
 
     return Request{
@@ -497,30 +506,31 @@
     };
 }
 
-nn::Result<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
+nn::GeneralResult<Request::MemoryPool> convert(const nn::Request::MemoryPool& memoryPool) {
     return std::visit([](const auto& o) { return makeMemoryPool(o); }, memoryPool);
 }
 
-nn::Result<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
+nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint) {
     OptionalTimePoint ret;
     if (optionalTimePoint.has_value()) {
         const auto count = optionalTimePoint.value().time_since_epoch().count();
         if (count < 0) {
-            return NN_ERROR() << "Unable to convert OptionalTimePoint because time since epoch "
-                                 "count is negative";
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+                   << "Unable to convert OptionalTimePoint because time since epoch count is "
+                      "negative";
         }
         ret.nanosecondsSinceEpoch(count);
     }
     return ret;
 }
 
-nn::Result<OptionalTimeoutDuration> convert(
+nn::GeneralResult<OptionalTimeoutDuration> convert(
         const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
     OptionalTimeoutDuration ret;
     if (optionalTimeoutDuration.has_value()) {
         const auto count = optionalTimeoutDuration.value().count();
         if (count < 0) {
-            return NN_ERROR()
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                    << "Unable to convert OptionalTimeoutDuration because count is negative";
         }
         ret.nanoseconds(count);
@@ -528,7 +538,7 @@
     return ret;
 }
 
-nn::Result<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
+nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus) {
     switch (errorStatus) {
         case nn::ErrorStatus::NONE:
         case nn::ErrorStatus::DEVICE_UNAVAILABLE:
@@ -545,7 +555,7 @@
     }
 }
 
-nn::Result<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
+nn::GeneralResult<hidl_vec<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
     return convertVec(bufferRoles);
 }