Replace nn::NativeHandle with nn::SharedHandle

Bug: 160669116
Test: mma
Change-Id: I73b2b93aab6cbf37d3c145e15ee9ae45228954f1
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index ca236f1..517d61f 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -257,8 +257,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -286,8 +286,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
     const auto hidlToken = token;