Create NN AIDL Burst adapter
Bug: 180492058
Bug: 177267324
Test: mma
Test: NeuralNetworksTest_static
Change-Id: I2947faeb3820faa963e4df1eaf7aefec57b66c79
Merged-In: I2947faeb3820faa963e4df1eaf7aefec57b66c79
(cherry picked from commit 504f44926c0e0ce0a5b5a591dbb848fdb732d8b2)
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 476dac9..ad961cf 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -34,7 +34,6 @@
"libarect",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
- "neuralnetworks_utils_hal_1_0",
],
shared_libs: [
"android.hardware.neuralnetworks-V1-ndk_platform",
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
new file mode 100644
index 0000000..008e4e4
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <android-base/scopeguard.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+#include <utility>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+// Class that adapts aidl_hal::IBurst to nn::IBurst.
+class Burst final : public nn::IBurst {
+ struct PrivateConstructorTag {};
+
+ public:
+ /**
+ * Thread-safe, self-cleaning cache that relates an nn::Memory object to a unique int64_t
+ * identifier.
+ */
+ class MemoryCache : public std::enable_shared_from_this<MemoryCache> {
+ public:
+ using Task = std::function<void()>;
+ using Cleanup = ::android::base::ScopeGuard<Task>;
+ using SharedCleanup = std::shared_ptr<const Cleanup>;
+ using WeakCleanup = std::weak_ptr<const Cleanup>;
+
+ explicit MemoryCache(std::shared_ptr<aidl_hal::IBurst> burst);
+
+ /**
+ * Get or cache a memory object in the MemoryCache object.
+ *
+ * @param memory Memory object to be cached while the returned `SharedCleanup` is alive.
+ * @return A pair of (1) a unique identifier for the cache entry and (2) a ref-counted
+ * "hold" object which preserves the cache as long as the hold object is alive.
+ */
+ std::pair<int64_t, SharedCleanup> getOrCacheMemory(const nn::SharedMemory& memory);
+
+ /**
+ * Get a cached memory object in the MemoryCache object if it exists, otherwise
+ * std::nullopt.
+ *
+ * @param memory Memory object to be cached while the returned `SharedCleanup` is alive.
+ * @return A pair of (1) a unique identifier for the cache entry and (2) a ref-counted
+ * "hold" object which preserves the cache as long as the hold object is alive. IF the
+ * cache entry is not present, std::nullopt is returned instead.
+ */
+ std::optional<std::pair<int64_t, SharedCleanup>> getMemoryIfAvailable(
+ const nn::SharedMemory& memory);
+
+ private:
+ void tryFreeMemory(const nn::SharedMemory& memory, int64_t identifier);
+
+ const std::shared_ptr<aidl_hal::IBurst> kBurst;
+ std::mutex mMutex;
+ int64_t mUnusedIdentifier GUARDED_BY(mMutex) = 0;
+ std::unordered_map<nn::SharedMemory, std::pair<int64_t, WeakCleanup>> mCache
+ GUARDED_BY(mMutex);
+ };
+
+ static nn::GeneralResult<std::shared_ptr<const Burst>> create(
+ std::shared_ptr<aidl_hal::IBurst> burst);
+
+ Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst);
+
+ // See IBurst::cacheMemory for information.
+ OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
+
+ // See IBurst::execute for information.
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ private:
+ mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
+ const std::shared_ptr<aidl_hal::IBurst> kBurst;
+ const std::shared_ptr<MemoryCache> kMemoryCache;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 9b28588..abce6cc 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -22,7 +22,6 @@
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/aidl/ProtectCallback.h>
#include <memory>
#include <tuple>
@@ -35,8 +34,7 @@
namespace aidl::android::hardware::neuralnetworks::utils {
// Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel.
-class PreparedModel final : public nn::IPreparedModel,
- public std::enable_shared_from_this<PreparedModel> {
+class PreparedModel final : public nn::IPreparedModel {
struct PrivateConstructorTag {};
public:
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
new file mode 100644
index 0000000..0b475bc
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Burst.h"
+
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <utility>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+namespace {
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
+ const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+ return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
+}
+
+} // namespace
+
+Burst::MemoryCache::MemoryCache(std::shared_ptr<aidl_hal::IBurst> burst)
+ : kBurst(std::move(burst)) {}
+
+std::pair<int64_t, Burst::MemoryCache::SharedCleanup> Burst::MemoryCache::getOrCacheMemory(
+ const nn::SharedMemory& memory) {
+ std::lock_guard lock(mMutex);
+
+ // Get the cache payload or create it (with default values) if it does not exist.
+ auto& cachedPayload = mCache[memory];
+ {
+ const auto& [identifier, maybeCleaner] = cachedPayload;
+ // If cache payload already exists, reuse it.
+ if (auto cleaner = maybeCleaner.lock()) {
+ return std::make_pair(identifier, std::move(cleaner));
+ }
+ }
+
+ // If the code reaches this point, the cached payload either did not exist or expired prior to
+ // this call.
+
+ // Allocate a new identifier.
+ CHECK_LT(mUnusedIdentifier, std::numeric_limits<int64_t>::max());
+ const int64_t identifier = mUnusedIdentifier++;
+
+ // Create reference-counted self-cleaning cache object.
+ auto self = weak_from_this();
+ Task cleanup = [memory, identifier, maybeMemoryCache = std::move(self)] {
+ if (const auto memoryCache = maybeMemoryCache.lock()) {
+ memoryCache->tryFreeMemory(memory, identifier);
+ }
+ };
+ auto cleaner = std::make_shared<const Cleanup>(std::move(cleanup));
+
+ // Store the result in the cache and return it.
+ auto result = std::make_pair(identifier, std::move(cleaner));
+ cachedPayload = result;
+ return result;
+}
+
+std::optional<std::pair<int64_t, Burst::MemoryCache::SharedCleanup>>
+Burst::MemoryCache::getMemoryIfAvailable(const nn::SharedMemory& memory) {
+ std::lock_guard lock(mMutex);
+
+ // Get the existing cached entry if it exists.
+ const auto iter = mCache.find(memory);
+ if (iter != mCache.end()) {
+ const auto& [identifier, maybeCleaner] = iter->second;
+ if (auto cleaner = maybeCleaner.lock()) {
+ return std::make_pair(identifier, std::move(cleaner));
+ }
+ }
+
+ // If the code reaches this point, the cached payload did not exist or was actively being
+ // deleted.
+ return std::nullopt;
+}
+
+void Burst::MemoryCache::tryFreeMemory(const nn::SharedMemory& memory, int64_t identifier) {
+ {
+ std::lock_guard guard(mMutex);
+ // Remove the cached memory and payload if it is present but expired. Note that it may not
+ // be present or may not be expired because another thread may have removed or cached the
+ // same memory object before the current thread locked mMutex in tryFreeMemory.
+ const auto iter = mCache.find(memory);
+ if (iter != mCache.end()) {
+ if (std::get<WeakCleanup>(iter->second).expired()) {
+ mCache.erase(iter);
+ }
+ }
+ }
+ kBurst->releaseMemoryResource(identifier);
+}
+
+nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
+ std::shared_ptr<aidl_hal::IBurst> burst) {
+ if (burst == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "aidl_hal::utils::Burst::create must have non-null burst";
+ }
+
+ return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst));
+}
+
+Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst)
+ : kBurst(std::move(burst)), kMemoryCache(std::make_shared<MemoryCache>(kBurst)) {
+ CHECK(kBurst != nullptr);
+}
+
+Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) const {
+ auto [identifier, hold] = kMemoryCache->getOrCacheMemory(memory);
+ return hold;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that at most one execution is in flight at any given time.
+ const bool alreadyInFlight = mExecutionInFlight.test_and_set();
+ if (alreadyInFlight) {
+ return NN_ERROR() << "IBurst already has an execution in flight";
+ }
+ const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
+
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
+ hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+
+ const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+ const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+ const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ const auto aidlLoopTimeoutDuration =
+ NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+
+ std::vector<int64_t> memoryIdentifierTokens;
+ std::vector<OptionalCacheHold> holds;
+ memoryIdentifierTokens.reserve(request.pools.size());
+ holds.reserve(request.pools.size());
+ for (const auto& memoryPool : request.pools) {
+ if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
+ if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
+ auto& [identifier, hold] = *cached;
+ memoryIdentifierTokens.push_back(identifier);
+ holds.push_back(std::move(hold));
+ continue;
+ }
+ }
+ memoryIdentifierTokens.push_back(-1);
+ }
+ CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
+
+ ExecutionResult executionResult;
+ const auto ret =
+ kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
+ aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+ HANDLE_ASTATUS(ret) << "execute failed";
+ if (!executionResult.outputSufficientSize) {
+ auto canonicalOutputShapes =
+ nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
+ return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+ << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ }
+ auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
+ convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
+
+ NN_TRY(hal::utils::makeExecutionFailure(
+ hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
+
+ return std::make_pair(std::move(outputShapes), timing);
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index aee4d90..003965b 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -16,9 +16,9 @@
#include "PreparedModel.h"
+#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
-#include "ProtectCallback.h"
#include "Utils.h"
#include <android/binder_auto_utils.h>
@@ -26,7 +26,6 @@
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
-#include <nnapi/hal/1.0/Burst.h>
#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
@@ -161,7 +160,10 @@
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
- return hal::V1_0::utils::Burst::create(shared_from_this());
+ std::shared_ptr<IBurst> burst;
+ const auto ret = kPreparedModel->configureExecutionBurst(&burst);
+ HANDLE_ASTATUS(ret) << "configureExecutionBurst failed";
+ return Burst::create(std::move(burst));
}
std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/aidl/utils/test/MockBurst.h b/neuralnetworks/aidl/utils/test/MockBurst.h
new file mode 100644
index 0000000..5083bbd
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/MockBurst.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
+#include <android/binder_interface_utils.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/Status.h>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class MockBurst final : public BnBurst {
+ public:
+ MOCK_METHOD(ndk::ScopedAStatus, executeSynchronously,
+ (const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
+ ExecutionResult* executionResult),
+ (override));
+ MOCK_METHOD(ndk::ScopedAStatus, releaseMemoryResource, (int64_t memoryIdentifierToken),
+ (override));
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index 83304bc..630a460 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "MockBurst.h"
#include "MockFencedExecutionCallback.h"
#include "MockPreparedModel.h"
@@ -255,6 +256,10 @@
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
+ const auto mockBurst = ndk::SharedRefBase::make<MockBurst>();
+ EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(mockBurst), Invoke(makeStatusOk)));
const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
// run test
@@ -266,6 +271,54 @@
EXPECT_NE(result.value(), nullptr);
}
+TEST(PreparedModelTest, configureExecutionBurstError) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+ // run test
+ const auto result = preparedModel->configureExecutionBurst();
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+ // run test
+ const auto result = preparedModel->configureExecutionBurst();
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstDeadObject) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+ // run test
+ const auto result = preparedModel->configureExecutionBurst();
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
TEST(PreparedModelTest, getUnderlyingResource) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();