Replace nn::NativeHandle with nn::SharedHandle

Bug: 160669116
Test: mma
Change-Id: I73b2b93aab6cbf37d3c145e15ee9ae45228954f1
Merged-In: I73b2b93aab6cbf37d3c145e15ee9ae45228954f1
(cherry picked from commit d4290b8bf8f9da1b7b34db3963762810258b132e)
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index 4403a57..ee103ba 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -59,13 +59,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index f301065..6cf9073 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -290,10 +290,8 @@
 }
 
 nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
-    const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
-    // Copy memory to force the native_handle_t to be copied.
-    auto copiedMemory = hidlMemory;
-    return copiedMemory;
+    return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)),
+                       memory.size);
 }
 
 nn::GeneralResult<Model> convert(const nn::Model& model) {
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index 8292f17..671416b 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -157,8 +157,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -181,8 +181,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IDevice::prepareModelFromCache not supported on 1.0 HAL service";
 }