Replace nn::NativeHandle with nn::SharedHandle

Bug: 160669116
Test: mma
Change-Id: I73b2b93aab6cbf37d3c145e15ee9ae45228954f1
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 2565972..c04c8df 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -19,6 +19,7 @@
 #include "HandleError.h"
 
 #include <android-base/logging.h>
+#include <android-base/unique_fd.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
@@ -247,4 +248,67 @@
     return nn::countNumberOfConsumers(numberOfOperands, operations);
 }
 
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
+    if (handle == nullptr) {
+        return {};
+    }
+
+    std::vector<base::unique_fd> fds;
+    fds.reserve(handle->fds.size());
+    for (const auto& fd : handle->fds) {
+        int dupFd = dup(fd);
+        if (dupFd == -1) {
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+    if (nativeHandle == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
+    }
+    for (size_t i = 0; i < fds.size(); ++i) {
+        nativeHandle->data[i] = fds[i].release();
+    }
+    std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+    hidl_handle hidlHandle;
+    hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
+    return hidlHandle;
+}
+
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+    if (handle == nullptr) {
+        return nullptr;
+    }
+
+    std::vector<base::unique_fd> fds;
+    fds.reserve(handle->numFds);
+    for (int i = 0; i < handle->numFds; ++i) {
+        int dupFd = dup(handle->data[i]);
+        if (dupFd == -1) {
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    std::vector<int> ints(&handle->data[handle->numFds],
+                          &handle->data[handle->numFds + handle->numInts]);
+
+    return std::make_shared<const nn::Handle>(nn::Handle{
+            .fds = std::move(fds),
+            .ints = std::move(ints),
+    });
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& syncFences) {
+    hidl_vec<hidl_handle> handles(syncFences.size());
+    for (size_t i = 0; i < syncFences.size(); ++i) {
+        handles[i] =
+                NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
+    }
+    return handles;
+}
+
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 95662d9..26025a5 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -161,8 +161,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
     ResilientPreparedModel::Factory makePreparedModel =
             [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
@@ -174,8 +174,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
     ResilientPreparedModel::Factory makePreparedModel =
             [device = std::move(self), deadline, modelCache, dataCache,
@@ -202,8 +202,8 @@
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
         bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
         nn::Priority priority, nn::OptionalTimePoint deadline,
-        const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
                      token](const nn::IDevice& device) {
         return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -214,8 +214,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
         bool blocking, nn::OptionalTimePoint deadline,
-        const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
         return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
     };