Introduce reusable execution to canonical interface -- HAL.
This CL modifies the canonical interface for reusable executions:
- Add new interface: IExecution with compute and computeFenced methods
- Add new method IPreparedModel::createExecution
In NNAPI runtime, the new interface IExecution is used to
memoize request-specific execution resources (e.g. converted HAL
request). The expected usage is that, IPreparedModel::createExecution
will be invoked in the first computation of a reusable NDK ANNExecution
object, and IExecution::compute* will be invoked repeatedly.
The IPreparedModel::execute* methods are preserved to avoid redundant
object creation and memoization overhead for a single-time
(non-reusable) execution.
For a vendor implementing the canonical interfaces, only the
IPreparedModel::execute* methods will be called because there is
currently no reusable execution at HAL interface. A DefaultExecution
implementation is provided to reduce the work needed on the vendor side.
Bug: 184073769
Test: NNT_static
Test: neuralnetworks_utils_hal_1_0_test
Test: neuralnetworks_utils_hal_1_1_test
Test: neuralnetworks_utils_hal_1_2_test
Test: neuralnetworks_utils_hal_1_3_test
Test: neuralnetworks_utils_hal_common_test
Test: neuralnetworks_utils_hal_aidl_test
Change-Id: I91790bb5ccf5ae648687fe603f88ffda2c9fd2b2
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 4d26795..eaeb9ad 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -200,10 +200,31 @@
return **maybeModelInSharedOut;
}
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
- const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
+template <>
+void InputRelocationTracker::flush() const {
+ // Copy from pointers to shared memory.
+ uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(memoryPtr + offset, data, length);
+ }
+}
+
+template <>
+void OutputRelocationTracker::flush() const {
+ // Copy from shared memory to pointers.
+ const uint8_t* memoryPtr = static_cast<const uint8_t*>(
+ std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(data, memoryPtr + offset, length);
+ }
+}
+
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+ const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
+ RequestRelocation* relocationOut) {
CHECK(request != nullptr);
CHECK(maybeRequestInSharedOut != nullptr);
+ CHECK(relocationOut != nullptr);
if (hasNoPointerData(*request)) {
return *request;
@@ -213,8 +234,11 @@
// to the caller through `maybeRequestInSharedOut` if the function succeeds.
nn::Request requestInShared = *request;
+ RequestRelocation relocation;
+
// Change input pointers to shared memory.
- nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
+ nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
+ std::vector<InputRelocationInfo> inputRelocationInfos;
for (auto& input : requestInShared.inputs) {
const auto& location = input.location;
if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -225,17 +249,21 @@
const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
location.pointer);
CHECK(data != nullptr);
- input.location = inputBuilder.append(data, location.length);
+ input.location = inputBuilder.append(location.length);
+ inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
}
// Allocate input memory.
if (!inputBuilder.empty()) {
auto memory = NN_TRY(inputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.input = NN_TRY(
+ InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
}
// Change output pointers to shared memory.
nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
+ std::vector<OutputRelocationInfo> outputRelocationInfos;
for (auto& output : requestInShared.outputs) {
const auto& location = output.location;
if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -243,62 +271,25 @@
}
output.lifetime = nn::Request::Argument::LifeTime::POOL;
+ void* data = std::get<void*>(location.pointer);
+ CHECK(data != nullptr);
output.location = outputBuilder.append(location.length);
+ outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
}
// Allocate output memory.
if (!outputBuilder.empty()) {
auto memory = NN_TRY(outputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
+ std::move(memory)));
}
*maybeRequestInSharedOut = requestInShared;
+ *relocationOut = std::move(relocation);
return **maybeRequestInSharedOut;
}
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
- const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
- if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
- !std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
- return {};
- }
- const auto& requestInShared = *maybeRequestInShared;
-
- // Map the memory.
- const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
- const auto [pointer, size, context] = NN_TRY(map(outputMemory));
- const uint8_t* constantPointer =
- std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
-
- // Flush each output pointer.
- CHECK_EQ(request.outputs.size(), requestInShared.outputs.size());
- for (size_t i = 0; i < request.outputs.size(); ++i) {
- const auto& location = request.outputs[i].location;
- const auto& locationInShared = requestInShared.outputs[i].location;
- if (!std::holds_alternative<void*>(location.pointer)) {
- continue;
- }
-
- // Get output pointer and size.
- void* data = std::get<void*>(location.pointer);
- CHECK(data != nullptr);
- const size_t length = location.length;
-
- // Get output pool location.
- CHECK(requestInShared.outputs[i].lifetime == nn::Request::Argument::LifeTime::POOL);
- const size_t index = locationInShared.poolIndex;
- const size_t offset = locationInShared.offset;
- const size_t outputPoolIndex = requestInShared.pools.size() - 1;
- CHECK(locationInShared.length == length);
- CHECK(index == outputPoolIndex);
-
- // Flush memory.
- std::memcpy(data, constantPointer + offset, length);
- }
-
- return {};
-}
-
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
diff --git a/neuralnetworks/utils/common/src/InvalidExecution.cpp b/neuralnetworks/utils/common/src/InvalidExecution.cpp
new file mode 100644
index 0000000..c4edd25
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidExecution.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidExecution.h"
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidExecution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+InvalidExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9081e1f..8195462 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -42,6 +42,12 @@
return NN_ERROR() << "InvalidPreparedModel";
}
+nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ return NN_ERROR() << "InvalidPreparedModel";
+}
+
nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
return NN_ERROR() << "InvalidPreparedModel";
}
diff --git a/neuralnetworks/utils/common/src/ResilientExecution.cpp b/neuralnetworks/utils/common/src/ResilientExecution.cpp
new file mode 100644
index 0000000..46b404a
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientExecution.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientExecution.h"
+
+#include "InvalidBurst.h"
+#include "ResilientBurst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientExecution& resilientExecution, const FnType& fn)
+ -> decltype(fn(*resilientExecution.getExecution())) {
+ auto execution = resilientExecution.getExecution();
+ auto result = fn(*execution);
+
+ // Immediately return if prepared model is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybeExecution = resilientExecution.recover(execution.get());
+ if (!maybeExecution.has_value()) {
+ const auto& [message, code] = maybeExecution.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
+ }
+ execution = std::move(maybeExecution).value();
+
+ return fn(*execution);
+}
+
+} // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientExecution>> ResilientExecution::create(
+ Factory makeExecution) {
+ if (makeExecution == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+ << "utils::ResilientExecution::create must have non-empty makeExecution";
+ }
+ auto execution = NN_TRY(makeExecution());
+ CHECK(execution != nullptr);
+ return std::make_shared<ResilientExecution>(PrivateConstructorTag{}, std::move(makeExecution),
+ std::move(execution));
+}
+
+ResilientExecution::ResilientExecution(PrivateConstructorTag /*tag*/, Factory makeExecution,
+ nn::SharedExecution execution)
+ : kMakeExecution(std::move(makeExecution)), mExecution(std::move(execution)) {
+ CHECK(kMakeExecution != nullptr);
+ CHECK(mExecution != nullptr);
+}
+
+nn::SharedExecution ResilientExecution::getExecution() const {
+ std::lock_guard guard(mMutex);
+ return mExecution;
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientExecution::recover(
+ const nn::IExecution* failingExecution) const {
+ std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing prepared model.
+ if (mExecution.get() != failingExecution) {
+ return mExecution;
+ }
+
+ mExecution = NN_TRY(kMakeExecution());
+ return mExecution;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ResilientExecution::compute(const nn::OptionalTimePoint& deadline) const {
+ const auto fn = [&deadline](const nn::IExecution& execution) {
+ return execution.compute(deadline);
+ };
+ return protect(*this, fn);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ResilientExecution::computeFenced(const std::vector<nn::SyncFence>& waitFor,
+ const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto fn = [&waitFor, &deadline,
+ &timeoutDurationAfterFence](const nn::IExecution& execution) {
+ return execution.computeFenced(waitFor, deadline, timeoutDurationAfterFence);
+ };
+ return protect(*this, fn);
+}
+
+bool ResilientExecution::isValidInternal() const {
+ return true;
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 5dd5f99..1ae19bc 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -17,7 +17,9 @@
#include "ResilientPreparedModel.h"
#include "InvalidBurst.h"
+#include "InvalidExecution.h"
#include "ResilientBurst.h"
+#include "ResilientExecution.h"
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
@@ -127,6 +129,21 @@
return protect(*this, fn);
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+ auto self = shared_from_this();
+ ResilientExecution::Factory makeExecution =
+ [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
+ return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ };
+ return ResilientExecution::create(std::move(makeExecution));
+#else
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
#if 0
auto self = shared_from_this();
@@ -140,6 +157,19 @@
#endif
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidExecution>();
+ }
+ const auto fn = [&request, measure,
+ &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
+}
+
std::any ResilientPreparedModel::getUnderlyingResource() const {
return getPreparedModel()->getUnderlyingResource();
}