Merge "Audio: Rearrange types in V7"
diff --git a/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp b/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
index 0f349a4..092038b 100644
--- a/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
+++ b/bluetooth/audio/2.1/default/BluetoothAudioProvider.cpp
@@ -55,12 +55,17 @@
     const V2_0::AudioConfiguration& audioConfig, startSession_cb _hidl_cb) {
   AudioConfiguration audioConfig_2_1;
 
-  audioConfig_2_1.codecConfig() = audioConfig.codecConfig();
-  audioConfig_2_1.pcmConfig() = {
-      .sampleRate = static_cast<SampleRate>(audioConfig.pcmConfig().sampleRate),
-      .channelMode = audioConfig.pcmConfig().channelMode,
-      .bitsPerSample = audioConfig.pcmConfig().bitsPerSample,
-      .dataIntervalUs = 0};
+  if (audioConfig.getDiscriminator() ==
+      V2_0::AudioConfiguration::hidl_discriminator::pcmConfig) {
+    audioConfig_2_1.pcmConfig() = {
+        .sampleRate =
+            static_cast<SampleRate>(audioConfig.pcmConfig().sampleRate),
+        .channelMode = audioConfig.pcmConfig().channelMode,
+        .bitsPerSample = audioConfig.pcmConfig().bitsPerSample,
+        .dataIntervalUs = 0};
+  } else {
+    audioConfig_2_1.codecConfig() = audioConfig.codecConfig();
+  }
 
   return startSession_2_1(hostIf, audioConfig_2_1, _hidl_cb);
 }
diff --git a/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp b/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
index c0ec907..37d1281 100644
--- a/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
+++ b/bluetooth/audio/2.1/vts/functional/VtsHalBluetoothAudioV2_1TargetTest.cpp
@@ -1043,6 +1043,7 @@
     } else {
       EXPECT_EQ(status, BluetoothAudioStatus::UNSUPPORTED_CODEC_CONFIGURATION);
       EXPECT_FALSE(dataMQ.isHandleValid());
+      tempDataMQ.reset(nullptr);
     }
   };
   android::hardware::bluetooth::audio::V2_1::AudioConfiguration audio_config =
@@ -1064,6 +1065,8 @@
           ASSERT_TRUE(hidl_retval.isOk());
           if (is_codec_config_valid) {
             EXPECT_TRUE(tempDataMQ != nullptr && tempDataMQ->isValid());
+          } else {
+            EXPECT_TRUE(tempDataMQ == nullptr);
           }
           EXPECT_TRUE(audio_provider_2_1_->endSession().isOk());
         }  // uint32_t (data interval in microseconds)
@@ -1132,6 +1135,7 @@
     } else {
       EXPECT_EQ(status, BluetoothAudioStatus::UNSUPPORTED_CODEC_CONFIGURATION);
       EXPECT_FALSE(dataMQ.isHandleValid());
+      tempDataMQ.reset(nullptr);
     }
   };
   android::hardware::bluetooth::audio::V2_1::AudioConfiguration audio_config =
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index 4403a57..ee103ba 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -59,13 +59,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index f301065..6cf9073 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -290,10 +290,8 @@
 }
 
 nn::GeneralResult<hidl_memory> convert(const nn::Memory& memory) {
-    const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size);
-    // Copy memory to force the native_handle_t to be copied.
-    auto copiedMemory = hidlMemory;
-    return copiedMemory;
+    return hidl_memory(memory.name, NN_TRY(hal::utils::hidlHandleFromSharedHandle(memory.handle)),
+                       memory.size);
 }
 
 nn::GeneralResult<Model> convert(const nn::Model& model) {
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index 8292f17..671416b 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -157,8 +157,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -181,8 +181,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IDevice::prepareModelFromCache not supported on 1.0 HAL service";
 }
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index f55ac6c..c1e95fe1a 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -59,13 +59,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index 03b0d6e..a0378c9 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -159,8 +159,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -184,8 +184,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& /*modelCache*/,
-        const std::vector<nn::NativeHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IDevice::prepareModelFromCache not supported on 1.1 HAL service";
 }
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index e6de011..24911fe 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -44,11 +44,11 @@
 GeneralResult<Extension> convert(const hal::V1_2::Extension& extension);
 GeneralResult<Extension::OperandTypeInformation> convert(
         const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation);
-GeneralResult<NativeHandle> convert(const hardware::hidl_handle& handle);
+GeneralResult<SharedHandle> convert(const hardware::hidl_handle& handle);
 
 GeneralResult<std::vector<Extension>> convert(
         const hardware::hidl_vec<hal::V1_2::Extension>& extensions);
-GeneralResult<std::vector<NativeHandle>> convert(
+GeneralResult<std::vector<SharedHandle>> convert(
         const hardware::hidl_vec<hardware::hidl_handle>& handles);
 GeneralResult<std::vector<OutputShape>> convert(
         const hardware::hidl_vec<hal::V1_2::OutputShape>& outputShapes);
@@ -77,10 +77,10 @@
 nn::GeneralResult<Extension> convert(const nn::Extension& extension);
 nn::GeneralResult<Extension::OperandTypeInformation> convert(
         const nn::Extension::OperandTypeInformation& operandTypeInformation);
-nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle);
+nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle);
 
 nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions);
-nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles);
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles);
 nn::GeneralResult<hidl_vec<OutputShape>> convert(const std::vector<nn::OutputShape>& outputShapes);
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index eb317b1..bbd5343 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -68,13 +68,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 378719a..08c94de 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -257,16 +257,15 @@
     };
 }
 
-GeneralResult<NativeHandle> convert(const hidl_handle& handle) {
-    auto* cloned = native_handle_clone(handle.getNativeHandle());
-    return ::android::NativeHandle::create(cloned, /*ownsHandle=*/true);
+GeneralResult<SharedHandle> convert(const hidl_handle& hidlHandle) {
+    return hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle());
 }
 
 GeneralResult<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) {
     return convertVec(extensions);
 }
 
-GeneralResult<std::vector<NativeHandle>> convert(const hidl_vec<hidl_handle>& handles) {
+GeneralResult<std::vector<SharedHandle>> convert(const hidl_vec<hidl_handle>& handles) {
     return convertVec(handles);
 }
 
@@ -487,18 +486,15 @@
     };
 }
 
-nn::GeneralResult<hidl_handle> convert(const nn::NativeHandle& handle) {
-    const auto hidlHandle = hidl_handle(handle->handle());
-    // Copy memory to force the native_handle_t to be copied.
-    auto copiedHandle = hidlHandle;
-    return copiedHandle;
+nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle) {
+    return hal::utils::hidlHandleFromSharedHandle(handle);
 }
 
 nn::GeneralResult<hidl_vec<Extension>> convert(const std::vector<nn::Extension>& extensions) {
     return convertVec(extensions);
 }
 
-nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::NativeHandle>& handles) {
+nn::GeneralResult<hidl_vec<hidl_handle>> convert(const std::vector<nn::SharedHandle>& handles) {
     return convertVec(handles);
 }
 
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index ca236f1..517d61f 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -257,8 +257,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -286,8 +286,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlModelCache = NN_TRY(convert(modelCache));
     const auto hidlDataCache = NN_TRY(convert(dataCache));
     const auto hidlToken = token;
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index 2f6c46a..0f5234b 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -61,13 +61,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index c215f39..5e3d5c2 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -179,8 +179,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -211,8 +211,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto hidlDeadline = NN_TRY(convert(deadline));
     const auto hidlModelCache = NN_TRY(V1_2::utils::convert(modelCache));
     const auto hidlDataCache = NN_TRY(V1_2::utils::convert(dataCache));
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index df9b280..2781053 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -53,15 +53,6 @@
     return hal::utils::makeExecutionFailure(convertExecutionResultsHelper(outputShapes, timing));
 }
 
-nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
-        const std::vector<nn::SyncFence>& syncFences) {
-    hidl_vec<hidl_handle> handles(syncFences.size());
-    for (size_t i = 0; i < syncFences.size(); ++i) {
-        handles[i] = NN_TRY(V1_2::utils::convert(syncFences[i].getHandle()));
-    }
-    return handles;
-}
-
 nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
         const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
     return std::make_pair(NN_TRY(validatedConvertToCanonical(timingLaunched)),
@@ -221,7 +212,7 @@
             NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
 
     const auto hidlRequest = NN_TRY(convert(requestInShared));
-    const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
+    const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
     const auto hidlMeasure = NN_TRY(V1_2::utils::convert(measure));
     const auto hidlDeadline = NN_TRY(convert(deadline));
     const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 254a3d4..43bb0c6 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
 
+#include <cutils/native_handle.h>
+#include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <functional>
@@ -57,6 +59,13 @@
 std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
                                              const std::vector<nn::Operation>& operations);
 
+nn::GeneralResult<nn::Memory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
+
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle);
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle);
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& fences);
+
 }  // namespace android::hardware::neuralnetworks::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 4f1afb9..4a84e4d 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -63,13 +63,13 @@
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
-            nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
             const nn::CacheToken& token) const override;
 
     nn::GeneralResult<nn::SharedBuffer> allocate(
@@ -81,12 +81,12 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
             bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
             nn::Priority priority, nn::OptionalTimePoint deadline,
-            const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+            const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
             bool blocking, nn::OptionalTimePoint deadline,
-            const std::vector<nn::NativeHandle>& modelCache,
-            const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+            const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
     nn::GeneralResult<nn::SharedBuffer> allocateInternal(
             bool blocking, const nn::BufferDesc& desc,
             const std::vector<nn::SharedPreparedModel>& preparedModels,
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 2565972..c04c8df 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -19,6 +19,7 @@
 #include "HandleError.h"
 
 #include <android-base/logging.h>
+#include <android-base/unique_fd.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
@@ -247,4 +248,67 @@
     return nn::countNumberOfConsumers(numberOfOperands, operations);
 }
 
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
+    if (handle == nullptr) {
+        return {};
+    }
+
+    std::vector<base::unique_fd> fds;
+    fds.reserve(handle->fds.size());
+    for (const auto& fd : handle->fds) {
+        int dupFd = dup(fd);
+        if (dupFd == -1) {
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+    if (nativeHandle == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
+    }
+    for (size_t i = 0; i < fds.size(); ++i) {
+        nativeHandle->data[i] = fds[i].release();
+    }
+    std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+    hidl_handle hidlHandle;
+    hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
+    return hidlHandle;
+}
+
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+    if (handle == nullptr) {
+        return nullptr;
+    }
+
+    std::vector<base::unique_fd> fds;
+    fds.reserve(handle->numFds);
+    for (int i = 0; i < handle->numFds; ++i) {
+        int dupFd = dup(handle->data[i]);
+        if (dupFd == -1) {
+            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+        }
+        fds.emplace_back(dupFd);
+    }
+
+    std::vector<int> ints(&handle->data[handle->numFds],
+                          &handle->data[handle->numFds + handle->numInts]);
+
+    return std::make_shared<const nn::Handle>(nn::Handle{
+            .fds = std::move(fds),
+            .ints = std::move(ints),
+    });
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& syncFences) {
+    hidl_vec<hidl_handle> handles(syncFences.size());
+    for (size_t i = 0; i < syncFences.size(); ++i) {
+        handles[i] =
+                NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
+    }
+    return handles;
+}
+
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 95662d9..26025a5 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -161,8 +161,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
     ResilientPreparedModel::Factory makePreparedModel =
             [device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
@@ -174,8 +174,8 @@
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
-        nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     auto self = shared_from_this();
     ResilientPreparedModel::Factory makePreparedModel =
             [device = std::move(self), deadline, modelCache, dataCache,
@@ -202,8 +202,8 @@
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
         bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
         nn::Priority priority, nn::OptionalTimePoint deadline,
-        const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
                      token](const nn::IDevice& device) {
         return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -214,8 +214,8 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
         bool blocking, nn::OptionalTimePoint deadline,
-        const std::vector<nn::NativeHandle>& modelCache,
-        const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& modelCache,
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
     const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
         return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
     };
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index 87d27c7..402598c 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -26,6 +26,7 @@
         "neuralnetworks_utils_hal_1_1",
         "neuralnetworks_utils_hal_1_2",
         "neuralnetworks_utils_hal_1_3",
+        "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
         "android.hardware.neuralnetworks@1.0",
diff --git a/radio/1.6/types.hal b/radio/1.6/types.hal
index 32da295..da03190 100644
--- a/radio/1.6/types.hal
+++ b/radio/1.6/types.hal
@@ -18,7 +18,10 @@
 
 import @1.0::RadioError;
 import @1.0::RadioResponseType;
-import @1.5::SetupDataCallResult;
+import @1.4::DataCallFailCause;
+import @1.4::DataConnActiveStatus;
+import @1.4::PdpProtocolType;
+import @1.5::LinkAddress;
 
 import android.hidl.safe_union@1.0::Monostate;
 
@@ -240,8 +243,72 @@
     NO_FALLBACK_RETRY_SETUP_NORMAL = 3
 };
 
+/**
+ * Overwritten from @1.5::SetupDataCallResult in order to change the suggestedRetryTime
+ * to 64-bit value. In the future, this must be extended instead of overwritten.
+ * Also added defaultQos, qosSessions, and handoverFailureMode in this version.
+ */
 struct SetupDataCallResult {
-    @1.5::SetupDataCallResult base;
+    /** Data call fail cause. DataCallFailCause.NONE if no error. */
+    DataCallFailCause cause;
+
+    /**
+     * If cause is not DataCallFailCause.NONE, this field indicates the network suggested data
+     * retry back-off time in milliseconds. Negative value indicates network does not give any
+     * suggestion. 0 indicates retry should be performed immediately. 0x7fffffffffffffff indicates
+     * the device should not retry data setup anymore.
+     */
+    uint64_t suggestedRetryTime;
+
+    /** Context ID, uniquely identifies this data connection. */
+    int32_t cid;
+
+    /** Data connection active status. */
+    DataConnActiveStatus active;
+
+    /**
+     * PDP protocol type. If cause is DataCallFailCause.ONLY_SINGLE_BEARER_ALLOWED, this is the
+     * protocol type supported, such as "IP" or "IPV6".
+     */
+    PdpProtocolType type;
+
+    /** The network interface name. */
+    string ifname;
+
+    /**
+     * List of link address.
+     */
+    vec<LinkAddress> addresses;
+
+    /**
+     * List of DNS server addresses, e.g., "192.0.1.3" or "192.0.1.11 2001:db8::1". Empty if no dns
+     * server addresses returned.
+     */
+    vec<string> dnses;
+
+    /**
+     * List of default gateway addresses, e.g., "192.0.1.3" or "192.0.1.11 2001:db8::1".
+     * When empty, the addresses represent point to point connections.
+     */
+    vec<string> gateways;
+
+    /**
+     * List of P-CSCF(Proxy Call State Control Function) addresses via PCO(Protocol Configuration
+     * Option), e.g., "2001:db8::1 2001:db8::2 2001:db8::3". Empty if not IMS client.
+     */
+    vec<string> pcscf;
+
+    /**
+     * MTU received from network for IPv4.
+     * Value <= 0 means network has either not sent a value or sent an invalid value.
+     */
+    int32_t mtuV4;
+
+    /**
+     * MTU received from network for IPv6.
+     * Value <= 0 means network has either not sent a value or sent an invalid value.
+     */
+    int32_t mtuV6;
 
     /** Default bearer QoS. Applicable to LTE and NR */
     Qos defaultQos;