Create NN AIDL adapter
This change adds the following adapters:
* nn::IDevice -> BnDevice
* nn::IPreparedModel -> BnPreparedModel
* nn::IBurst -> BnBurst
* nn::IBuffer -> BnBuffer
Bug: N/A
Test: mma
Test: locally created a binderized service with this adapter code,
which passed VtsHalNeuralnetworksTargetTest
Change-Id: I966f65a1e4d75284c050b77f3f40c515e4970130
diff --git a/neuralnetworks/utils/adapter/aidl/Android.bp b/neuralnetworks/utils/adapter/aidl/Android.bp
new file mode 100644
index 0000000..8269a3d
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/Android.bp
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "hardware_interfaces_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_library_static {
+ name: "neuralnetworks_utils_hal_adapter_aidl",
+ defaults: [
+ "neuralnetworks_use_latest_utils_hal_aidl",
+ "neuralnetworks_utils_defaults",
+ ],
+ srcs: ["src/*"],
+ local_include_dirs: ["include/nnapi/hal/aidl/"],
+ export_include_dirs: ["include"],
+ static_libs: [
+ "neuralnetworks_types",
+ "neuralnetworks_utils_hal_common",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ ],
+}
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
new file mode 100644
index 0000000..4c0b328
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+/**
+ * A self-contained unit of work to be executed.
+ */
+using Task = std::function<void()>;
+
+/**
+ * A type-erased executor which executes a task asynchronously.
+ *
+ * This executor is also provided an optional deadline for when the caller expects is the upper
+ * bound for the amount of time to complete the task. If needed, the Executor can retrieve the
+ * Application ID (Android User ID) by calling AIBinder_getCallingUid in android/binder_ibinder.h.
+ */
+using Executor = std::function<void(Task, ::android::nn::OptionalTimePoint)>;
+
+/**
+ * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
+ *
+ * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
+ * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
+ *
+ * @param device NNAPI canonical IDevice interface object to be adapted.
+ * @param executor Type-erased executor to handle executing tasks asynchronously.
+ * @return AIDL NN HAL IDevice interface object.
+ */
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor);
+
+/**
+ * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
+ *
+ * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
+ * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
+ *
+ * This function uses a default executor, which will execute tasks from a detached thread.
+ *
+ * @param device NNAPI canonical IDevice interface object to be adapted.
+ * @return AIDL NN HAL IDevice interface object.
+ */
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device);
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h
new file mode 100644
index 0000000..701e43e
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBuffer.h>
+
+#include <memory>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IBuffer to BnBuffer.
+class Buffer : public BnBuffer {
+ public:
+ explicit Buffer(::android::nn::SharedBuffer buffer);
+
+ ndk::ScopedAStatus copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) override;
+ ndk::ScopedAStatus copyTo(const Memory& dst) override;
+
+ private:
+ const ::android::nn::SharedBuffer kBuffer;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
new file mode 100644
index 0000000..f2687c4
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android-base/thread_annotations.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::Burst to BnBurst.
+class Burst : public BnBurst {
+ public:
+ // Precondition: burst != nullptr
+ explicit Burst(::android::nn::SharedBurst burst);
+
+ ndk::ScopedAStatus executeSynchronously(const Request& request,
+ const std::vector<int64_t>& memoryIdentifierTokens,
+ bool measureTiming, int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs,
+ ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override;
+
+ class ThreadSafeMemoryCache {
+ public:
+ using Value =
+ std::pair<::android::nn::SharedMemory, ::android::nn::IBurst::OptionalCacheHold>;
+
+ Value add(int64_t token, const ::android::nn::SharedMemory& memory,
+ const ::android::nn::IBurst& burst) const;
+ void remove(int64_t token) const;
+
+ private:
+ mutable std::mutex mMutex;
+ mutable std::unordered_map<int64_t, Value> mCache GUARDED_BY(mMutex);
+ };
+
+ private:
+ const ::android::nn::SharedBurst kBurst;
+ const ThreadSafeMemoryCache kMemoryCache;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
new file mode 100644
index 0000000..aa29d63
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
+
+#include "nnapi/hal/aidl/Adapter.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IDevice.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IDevice to BnDevice.
+class Device : public BnDevice {
+ public:
+ Device(::android::nn::SharedDevice device, Executor executor);
+
+ ndk::ScopedAStatus allocate(const BufferDesc& desc,
+ const std::vector<IPreparedModelParcel>& preparedModels,
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles,
+ DeviceBuffer* buffer) override;
+ ndk::ScopedAStatus getCapabilities(Capabilities* capabilities) override;
+ ndk::ScopedAStatus getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) override;
+ ndk::ScopedAStatus getSupportedExtensions(std::vector<Extension>* extensions) override;
+ ndk::ScopedAStatus getSupportedOperations(const Model& model,
+ std::vector<bool>* supported) override;
+ ndk::ScopedAStatus getType(DeviceType* deviceType) override;
+ ndk::ScopedAStatus getVersionString(std::string* version) override;
+ ndk::ScopedAStatus prepareModel(
+ const Model& model, ExecutionPreference preference, Priority priority,
+ int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) override;
+ ndk::ScopedAStatus prepareModelFromCache(
+ int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) override;
+
+ protected:
+ const ::android::nn::SharedDevice kDevice;
+ const Executor kExecutor;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
new file mode 100644
index 0000000..93e0427
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
+
+#include "nnapi/hal/aidl/Adapter.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IPreparedModel to BnPreparedModel.
+class PreparedModel : public BnPreparedModel {
+ public:
+ explicit PreparedModel(::android::nn::SharedPreparedModel preparedModel);
+
+ ndk::ScopedAStatus executeSynchronously(const Request& request, bool measureTiming,
+ int64_t deadlineNs, int64_t loopTimeoutDurationNs,
+ ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus executeFenced(const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measureTiming, int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs, int64_t durationNs,
+ FencedExecutionResult* executionResult) override;
+ ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
+
+ ::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
+
+ protected:
+ const ::android::nn::SharedPreparedModel kPreparedModel;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
diff --git a/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp b/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
new file mode 100644
index 0000000..d0b56e8
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Adapter.h"
+
+#include "Device.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <android/binder_interface_utils.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <thread>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor) {
+ return ndk::SharedRefBase::make<Device>(std::move(device), std::move(executor));
+}
+
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device) {
+ Executor defaultExecutor = [](Task task, ::android::nn::OptionalTimePoint /*deadline*/) {
+ std::thread(std::move(task)).detach();
+ };
+ return adapt(std::move(device), std::move(defaultExecutor));
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp b/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
new file mode 100644
index 0000000..c15ab65
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Buffer.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::GeneralResult<std::vector<uint32_t>> inputToUnsigned(const std::vector<int32_t>& dims) {
+ auto result = nn::toUnsigned(dims);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::GeneralResult<void> copyTo(const nn::IBuffer& buffer, const Memory& dst) {
+ const auto nnDst = NN_TRY(convertInput(dst));
+ return buffer.copyTo(nnDst);
+}
+
+nn::GeneralResult<void> copyFrom(const nn::IBuffer& buffer, const Memory& src,
+ const std::vector<int32_t>& dimensions) {
+ const auto nnSrc = NN_TRY(convertInput(src));
+ const auto nnDims = NN_TRY(inputToUnsigned(dimensions));
+ return buffer.copyFrom(nnSrc, nnDims);
+}
+
+} // namespace
+
+Buffer::Buffer(nn::SharedBuffer buffer) : kBuffer(std::move(buffer)) {
+ CHECK(kBuffer != nullptr);
+}
+
+ndk::ScopedAStatus Buffer::copyTo(const Memory& dst) {
+ const auto result = adapter::copyTo(*kBuffer, dst);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Buffer::copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) {
+ const auto result = adapter::copyFrom(*kBuffer, src, dimensions);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
new file mode 100644
index 0000000..4fabb20
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Burst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <algorithm>
+#include <chrono>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <utility>
+#include <variant>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+using Value = Burst::ThreadSafeMemoryCache::Value;
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+ return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
+ if (durationNs < -1) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
+ }
+ return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+ if (durationNs < -1) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+ }
+ return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+std::vector<nn::IBurst::OptionalCacheHold> ensureAllMemoriesAreCached(
+ nn::Request* request, const std::vector<int64_t>& memoryIdentifierTokens,
+ const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache) {
+ std::vector<nn::IBurst::OptionalCacheHold> holds;
+ holds.reserve(memoryIdentifierTokens.size());
+
+ for (size_t i = 0; i < memoryIdentifierTokens.size(); ++i) {
+ const auto& pool = request->pools[i];
+ const auto token = memoryIdentifierTokens[i];
+ constexpr int64_t kNoToken = -1;
+ if (token == kNoToken || !std::holds_alternative<nn::SharedMemory>(pool)) {
+ continue;
+ }
+
+ const auto& memory = std::get<nn::SharedMemory>(pool);
+ auto [storedMemory, hold] = cache.add(token, memory, burst);
+
+ request->pools[i] = std::move(storedMemory);
+ holds.push_back(std::move(hold));
+ }
+
+ return holds;
+}
+
+nn::ExecutionResult<ExecutionResult> executeSynchronously(
+ const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache, const Request& request,
+ const std::vector<int64_t>& memoryIdentifierTokens, bool measureTiming, int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs) {
+ if (request.pools.size() != memoryIdentifierTokens.size()) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+ << "request.pools.size() != memoryIdentifierTokens.size()";
+ }
+ if (!std::all_of(memoryIdentifierTokens.begin(), memoryIdentifierTokens.end(),
+ [](int64_t token) { return token >= -1; })) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid memoryIdentifierTokens";
+ }
+
+ auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+
+ const auto hold = ensureAllMemoriesAreCached(&nnRequest, memoryIdentifierTokens, burst, cache);
+
+ const auto result =
+ burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+
+ if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ const auto& [message, code, outputShapes] = result.error();
+ return ExecutionResult{.outputSufficientSize = false,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
+ }
+
+ const auto& [outputShapes, timing] = NN_TRY(result);
+ return ExecutionResult{.outputSufficientSize = true,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = utils::convert(timing).value()};
+}
+
+} // namespace
+
+Value Burst::ThreadSafeMemoryCache::add(int64_t token, const nn::SharedMemory& memory,
+ const nn::IBurst& burst) const {
+ std::lock_guard guard(mMutex);
+ if (const auto it = mCache.find(token); it != mCache.end()) {
+ return it->second;
+ }
+ auto hold = burst.cacheMemory(memory);
+ auto [it, _] = mCache.emplace(token, std::make_pair(memory, std::move(hold)));
+ return it->second;
+}
+
+void Burst::ThreadSafeMemoryCache::remove(int64_t token) const {
+ std::lock_guard guard(mMutex);
+ mCache.erase(token);
+}
+
+Burst::Burst(nn::SharedBurst burst) : kBurst(std::move(burst)) {
+ CHECK(kBurst != nullptr);
+}
+
+ndk::ScopedAStatus Burst::executeSynchronously(const Request& request,
+ const std::vector<int64_t>& memoryIdentifierTokens,
+ bool measureTiming, int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs,
+ ExecutionResult* executionResult) {
+ auto result =
+ adapter::executeSynchronously(*kBurst, kMemoryCache, request, memoryIdentifierTokens,
+ measureTiming, deadlineNs, loopTimeoutDurationNs);
+ if (!result.has_value()) {
+ auto [message, code, _] = std::move(result).error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Burst::releaseMemoryResource(int64_t memoryIdentifierToken) {
+ if (memoryIdentifierToken < -1) {
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(ErrorStatus::INVALID_ARGUMENT),
+ "Invalid memoryIdentifierToken");
+ }
+ kMemoryCache.remove(memoryIdentifierToken);
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Device.cpp b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
new file mode 100644
index 0000000..763be7f
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Adapter.h"
+#include "Buffer.h"
+#include "PreparedModel.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_interface_utils.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <chrono>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+ return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+ if (durationNs < -1) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+ }
+ return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+nn::GeneralResult<nn::CacheToken> convertCacheToken(const std::vector<uint8_t>& token) {
+ nn::CacheToken nnToken;
+ if (token.size() != nnToken.size()) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid token";
+ }
+ std::copy(token.begin(), token.end(), nnToken.begin());
+ return nnToken;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> downcast(const IPreparedModelParcel& preparedModel) {
+ if (preparedModel.preparedModel == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
+ }
+ if (preparedModel.preparedModel->isRemote()) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
+ }
+
+ // This static_cast is safe because adapter::PreparedModel is the only class that implements
+ // the IPreparedModel interface in the adapter service code.
+ const auto* casted = static_cast<const PreparedModel*>(preparedModel.preparedModel.get());
+ return casted->getUnderlyingPreparedModel();
+}
+
+nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
+ const std::vector<IPreparedModelParcel>& preparedModels) {
+ std::vector<nn::SharedPreparedModel> canonical;
+ canonical.reserve(preparedModels.size());
+ for (const auto& preparedModel : preparedModels) {
+ canonical.push_back(NN_TRY(downcast(preparedModel)));
+ }
+ return canonical;
+}
+
+nn::GeneralResult<DeviceBuffer> allocate(const nn::IDevice& device, const BufferDesc& desc,
+ const std::vector<IPreparedModelParcel>& preparedModels,
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles) {
+ auto nnDesc = NN_TRY(convertInput(desc));
+ auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
+ auto nnInputRoles = NN_TRY(convertInput(inputRoles));
+ auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
+
+ auto buffer = NN_TRY(device.allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
+ CHECK(buffer != nullptr);
+
+ const nn::Request::MemoryDomainToken token = buffer->getToken();
+ auto aidlBuffer = ndk::SharedRefBase::make<Buffer>(std::move(buffer));
+ return DeviceBuffer{.buffer = std::move(aidlBuffer), .token = static_cast<int32_t>(token)};
+}
+
+nn::GeneralResult<std::vector<bool>> getSupportedOperations(const nn::IDevice& device,
+ const Model& model) {
+ const auto nnModel = NN_TRY(convertInput(model));
+ return device.getSupportedOperations(nnModel);
+}
+
+using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
+
+std::shared_ptr<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
+ if (preparedModel == nullptr) {
+ return nullptr;
+ }
+ return ndk::SharedRefBase::make<PreparedModel>(std::move(preparedModel));
+}
+
+void notify(IPreparedModelCallback* callback, PrepareModelResult result) {
+ if (!result.has_value()) {
+ const auto& [message, status] = result.error();
+ LOG(ERROR) << message;
+ const auto aidlCode = utils::convert(status).value_or(ErrorStatus::GENERAL_FAILURE);
+ callback->notify(aidlCode, nullptr);
+ } else {
+ auto preparedModel = std::move(result).value();
+ auto aidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
+ callback->notify(ErrorStatus::NONE, std::move(aidlPreparedModel));
+ }
+}
+
+nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
+ const Model& model, ExecutionPreference preference,
+ Priority priority, int64_t deadlineNs,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModel = NN_TRY(convertInput(model));
+ const auto nnPreference = NN_TRY(convertInput(preference));
+ const auto nnPriority = NN_TRY(convertInput(priority));
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = NN_TRY(convertCacheToken(token));
+
+ Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
+ nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
+ nnToken, callback] {
+ auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
+ nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result));
+ };
+ executor(std::move(task), nnDeadline);
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModelFromCache(
+ const nn::SharedDevice& device, const Executor& executor, int64_t deadlineNs,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = NN_TRY(convertCacheToken(token));
+
+ auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
+ nnDataCache = std::move(nnDataCache), nnToken, callback] {
+ auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result));
+ };
+ executor(std::move(task), nnDeadline);
+
+ return {};
+}
+
+} // namespace
+
+Device::Device(::android::nn::SharedDevice device, Executor executor)
+ : kDevice(std::move(device)), kExecutor(std::move(executor)) {
+ CHECK(kDevice != nullptr);
+ CHECK(kExecutor != nullptr);
+}
+
+ndk::ScopedAStatus Device::allocate(const BufferDesc& desc,
+ const std::vector<IPreparedModelParcel>& preparedModels,
+ const std::vector<BufferRole>& inputRoles,
+ const std::vector<BufferRole>& outputRoles,
+ DeviceBuffer* buffer) {
+ auto result = adapter::allocate(*kDevice, desc, preparedModels, inputRoles, outputRoles);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *buffer = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getCapabilities(Capabilities* capabilities) {
+ *capabilities = utils::convert(kDevice->getCapabilities()).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) {
+ const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
+ *numberOfCacheFiles = NumberOfCacheFiles{.numModelCache = static_cast<int32_t>(numModelCache),
+ .numDataCache = static_cast<int32_t>(numDataCache)};
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getSupportedExtensions(std::vector<Extension>* extensions) {
+ *extensions = utils::convert(kDevice->getSupportedExtensions()).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getSupportedOperations(const Model& model,
+ std::vector<bool>* supported) {
+ auto result = adapter::getSupportedOperations(*kDevice, model);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *supported = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getType(DeviceType* deviceType) {
+ *deviceType = utils::convert(kDevice->getType()).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getVersionString(std::string* version) {
+ *version = kDevice->getVersionString();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::prepareModel(const Model& model, ExecutionPreference preference,
+ Priority priority, int64_t deadlineNs,
+ const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+ const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ const auto result = adapter::prepareModel(kDevice, kExecutor, model, preference, priority,
+ deadlineNs, modelCache, dataCache, token, callback);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ callback->notify(aidlCode, nullptr);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::prepareModelFromCache(
+ int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+ const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+ const std::shared_ptr<IPreparedModelCallback>& callback) {
+ const auto result = adapter::prepareModelFromCache(kDevice, kExecutor, deadlineNs, modelCache,
+ dataCache, token, callback);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ callback->notify(aidlCode, nullptr);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
new file mode 100644
index 0000000..71ed1a8
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Burst.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+class FencedExecutionCallback : public BnFencedExecutionCallback {
+ public:
+ FencedExecutionCallback(nn::ExecuteFencedInfoCallback callback)
+ : kCallback(std::move(callback)) {}
+
+ ndk::ScopedAStatus getExecutionInfo(Timing* timingLaunched, Timing* timingFenced,
+ ErrorStatus* errorStatus) override {
+ const auto result = kCallback();
+ if (result.ok()) {
+ const auto& [nnTimingLaunched, nnTimingFenced] = result.value();
+ *timingLaunched = utils::convert(nnTimingLaunched).value();
+ *timingFenced = utils::convert(nnTimingFenced).value();
+ *errorStatus = ErrorStatus::NONE;
+ } else {
+ constexpr auto kNoTiming = Timing{.timeOnDeviceNs = -1, .timeInDriverNs = -1};
+ const auto& [message, code] = result.error();
+ LOG(ERROR) << "getExecutionInfo failed with " << code << ": " << message;
+ const auto aidlStatus = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ *timingLaunched = kNoTiming;
+ *timingFenced = kNoTiming;
+ *errorStatus = aidlStatus;
+ }
+ return ndk::ScopedAStatus::ok();
+ }
+
+ private:
+ const nn::ExecuteFencedInfoCallback kCallback;
+};
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor) {
+ auto handles = NN_TRY(convertInput(waitFor));
+
+ constexpr auto valid = [](const nn::SharedHandle& handle) {
+ return handle != nullptr && handle->ok();
+ };
+ if (!std::all_of(handles.begin(), handles.end(), valid)) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid sync fence";
+ }
+
+ std::vector<nn::SyncFence> syncFences;
+ syncFences.reserve(waitFor.size());
+ for (auto& handle : handles) {
+ syncFences.push_back(nn::SyncFence::create(std::move(handle)).value());
+ }
+ return syncFences;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+ return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
+ if (durationNs < -1) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
+ }
+ return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+ if (durationNs < -1) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+ }
+ return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IPreparedModel& preparedModel,
+ const Request& request,
+ bool measureTiming, int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+
+ const auto result =
+ preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+
+ if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ const auto& [message, code, outputShapes] = result.error();
+ LOG(ERROR) << "executeSynchronously failed with " << code << ": " << message;
+ return ExecutionResult{.outputSufficientSize = false,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
+ }
+
+ const auto& [outputShapes, timing] = NN_TRY(result);
+ return ExecutionResult{.outputSufficientSize = true,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = utils::convert(timing).value()};
+}
+
+nn::GeneralResult<FencedExecutionResult> executeFenced(
+ const nn::IPreparedModel& preparedModel, const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measureTiming,
+ int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
+ const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+ const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
+
+ auto [syncFence, executeFencedInfoCallback] = NN_TRY(preparedModel.executeFenced(
+ nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+
+ ndk::ScopedFileDescriptor fileDescriptor;
+ if (syncFence.hasFd()) {
+ auto uniqueFd = NN_TRY(nn::dupFd(syncFence.getFd()));
+ fileDescriptor = ndk::ScopedFileDescriptor(uniqueFd.release());
+ }
+
+ return FencedExecutionResult{.callback = ndk::SharedRefBase::make<FencedExecutionCallback>(
+ std::move(executeFencedInfoCallback)),
+ .syncFence = std::move(fileDescriptor)};
+}
+
+} // namespace
+
+PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel)
+ : kPreparedModel(std::move(preparedModel)) {
+ CHECK(kPreparedModel != nullptr);
+}
+
+ndk::ScopedAStatus PreparedModel::executeSynchronously(const Request& request, bool measureTiming,
+ int64_t deadlineNs,
+ int64_t loopTimeoutDurationNs,
+ ExecutionResult* executionResult) {
+ auto result = adapter::executeSynchronously(*kPreparedModel, request, measureTiming, deadlineNs,
+ loopTimeoutDurationNs);
+ if (!result.has_value()) {
+ const auto& [message, code, _] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeFenced(
+ const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
+ FencedExecutionResult* executionResult) {
+ auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, measureTiming,
+ deadlineNs, loopTimeoutDurationNs, durationNs);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::configureExecutionBurst(std::shared_ptr<IBurst>* burst) {
+ auto result = kPreparedModel->configureExecutionBurst();
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *burst = ndk::SharedRefBase::make<Burst>(std::move(result).value());
+ return ndk::ScopedAStatus::ok();
+}
+
+nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
+ return kPreparedModel;
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/Android.bp b/neuralnetworks/utils/adapter/hidl/Android.bp
similarity index 100%
rename from neuralnetworks/utils/adapter/Android.bp
rename to neuralnetworks/utils/adapter/hidl/Android.bp
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Buffer.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Buffer.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Burst.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Burst.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Device.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Device.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Device.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Device.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
diff --git a/neuralnetworks/utils/adapter/src/Adapter.cpp b/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Adapter.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
diff --git a/neuralnetworks/utils/adapter/src/Buffer.cpp b/neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Buffer.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
diff --git a/neuralnetworks/utils/adapter/src/Burst.cpp b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Burst.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Burst.cpp
diff --git a/neuralnetworks/utils/adapter/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Device.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Device.cpp
diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/PreparedModel.cpp
rename to neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp