Replace nn::NativeHandle with nn::SharedHandle
Bug: 160669116
Test: mma
Change-Id: I73b2b93aab6cbf37d3c145e15ee9ae45228954f1
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 254a3d4..43bb0c6 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
+#include <cutils/native_handle.h>
+#include <hidl/HidlSupport.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <functional>
@@ -57,6 +59,13 @@
std::vector<uint32_t> countNumberOfConsumers(size_t numberOfOperands,
const std::vector<nn::Operation>& operations);
+nn::GeneralResult<nn::Memory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
+
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle);
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle);
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+ const std::vector<nn::SyncFence>& fences);
+
} // namespace android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 4f1afb9..4a84e4d 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -63,13 +63,13 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache,
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache,
const nn::CacheToken& token) const override;
nn::GeneralResult<nn::SharedBuffer> allocate(
@@ -81,12 +81,12 @@
nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
bool blocking, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const;
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
nn::GeneralResult<nn::SharedBuffer> allocateInternal(
bool blocking, const nn::BufferDesc& desc,
const std::vector<nn::SharedPreparedModel>& preparedModels,
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 2565972..c04c8df 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -19,6 +19,7 @@
#include "HandleError.h"
#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
#include <nnapi/TypeUtils.h>
@@ -247,4 +248,67 @@
return nn::countNumberOfConsumers(numberOfOperands, operations);
}
+nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::SharedHandle& handle) {
+ if (handle == nullptr) {
+ return {};
+ }
+
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->fds.size());
+ for (const auto& fd : handle->fds) {
+ int dupFd = dup(fd);
+ if (dupFd == -1) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
+
+ native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size());
+ if (nativeHandle == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle";
+ }
+ for (size_t i = 0; i < fds.size(); ++i) {
+ nativeHandle->data[i] = fds[i].release();
+ }
+ std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]);
+
+ hidl_handle hidlHandle;
+ hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true);
+ return hidlHandle;
+}
+
+nn::GeneralResult<nn::SharedHandle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
+ if (handle == nullptr) {
+ return nullptr;
+ }
+
+ std::vector<base::unique_fd> fds;
+ fds.reserve(handle->numFds);
+ for (int i = 0; i < handle->numFds; ++i) {
+ int dupFd = dup(handle->data[i]);
+ if (dupFd == -1) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd";
+ }
+ fds.emplace_back(dupFd);
+ }
+
+ std::vector<int> ints(&handle->data[handle->numFds],
+ &handle->data[handle->numFds + handle->numInts]);
+
+ return std::make_shared<const nn::Handle>(nn::Handle{
+ .fds = std::move(fds),
+ .ints = std::move(ints),
+ });
+}
+
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+ const std::vector<nn::SyncFence>& syncFences) {
+ hidl_vec<hidl_handle> handles(syncFences.size());
+ for (size_t i = 0; i < syncFences.size(); ++i) {
+ handles[i] =
+ NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle()));
+ }
+ return handles;
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 95662d9..26025a5 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -161,8 +161,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel =
[device = std::move(self), model, preference, priority, deadline, modelCache, dataCache,
@@ -174,8 +174,8 @@
}
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCache(
- nn::OptionalTimePoint deadline, const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
auto self = shared_from_this();
ResilientPreparedModel::Factory makePreparedModel =
[device = std::move(self), deadline, modelCache, dataCache,
@@ -202,8 +202,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
nn::Priority priority, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
token](const nn::IDevice& device) {
return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -214,8 +214,8 @@
nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelFromCacheInternal(
bool blocking, nn::OptionalTimePoint deadline,
- const std::vector<nn::NativeHandle>& modelCache,
- const std::vector<nn::NativeHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& modelCache,
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
};
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index 87d27c7..402598c 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -26,6 +26,7 @@
"neuralnetworks_utils_hal_1_1",
"neuralnetworks_utils_hal_1_2",
"neuralnetworks_utils_hal_1_3",
+ "neuralnetworks_utils_hal_common",
],
shared_libs: [
"android.hardware.neuralnetworks@1.0",