Introduce reusable execution to canonical interface -- HAL.

This CL modifies the canonical interface for reusable executions:
- Add new interface: IExecution with compute and computeFenced methods
- Add new method IPreparedModel::createExecution

In NNAPI runtime, the new interface IExecution is used to
memoize request-specific execution resources (e.g. converted HAL
request). The expected usage is that, IPreparedModel::createExecution
will be invoked in the first computation of a reusable NDK ANNExecution
object, and IExecution::compute* will be invoked repeatedly.

The IPreparedModel::execute* methods are preserved to avoid redundant
object creation and memoization overhead for a single-time
(non-reusable) execution.

For a vendor implementing the canonical interfaces, only the
IPreparedModel::execute* methods will be called because there is
currently no reusable execution at HAL interface. A DefaultExecution
implementation is provided to reduce the work needed on the vendor side.

Bug: 184073769
Test: NNT_static
Test: neuralnetworks_utils_hal_1_0_test
Test: neuralnetworks_utils_hal_1_1_test
Test: neuralnetworks_utils_hal_1_2_test
Test: neuralnetworks_utils_hal_1_3_test
Test: neuralnetworks_utils_hal_common_test
Test: neuralnetworks_utils_hal_aidl_test
Change-Id: I91790bb5ccf5ae648687fe603f88ffda2c9fd2b2
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 8fe6b90..fdc90df 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -20,6 +20,7 @@
 #include <cutils/native_handle.h>
 #include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
 #include <nnapi/Types.h>
 #include <functional>
 #include <vector>
@@ -59,19 +60,70 @@
 nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
         const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
 
+// Record a relocation mapping between pointer-based data and shared memory.
+// Only two specializations of this template may exist:
+// - RelocationInfo<const void*> for request inputs
+// - RelocationInfo<void*> for request outputs
+template <typename PointerType>
+struct RelocationInfo {
+    PointerType data;
+    size_t length;
+    size_t offset;
+};
+using InputRelocationInfo = RelocationInfo<const void*>;
+using OutputRelocationInfo = RelocationInfo<void*>;
+
+// Keep track of the relocation mapping between pointer-based data and shared memory pool,
+// and provide method to copy the data between pointers and the shared memory pool.
+// Only two specializations of this template may exist:
+// - RelocationTracker<InputRelocationInfo> for request inputs
+// - RelocationTracker<OutputRelocationInfo> for request outputs
+template <typename RelocationInfoType>
+class RelocationTracker {
+  public:
+    static nn::GeneralResult<std::unique_ptr<RelocationTracker>> create(
+            std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory) {
+        auto mapping = NN_TRY(map(memory));
+        return std::make_unique<RelocationTracker<RelocationInfoType>>(
+                std::move(relocationInfos), std::move(memory), std::move(mapping));
+    }
+
+    RelocationTracker(std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory,
+                      nn::Mapping mapping)
+        : kRelocationInfos(std::move(relocationInfos)),
+          kMemory(std::move(memory)),
+          kMapping(std::move(mapping)) {}
+
+    // Specializations defined in CommonUtils.cpp.
+    // For InputRelocationTracker, this method will copy pointer data to the shared memory pool.
+    // For OutputRelocationTracker, this method will copy shared memory data to the pointers.
+    void flush() const;
+
+  private:
+    const std::vector<RelocationInfoType> kRelocationInfos;
+    const nn::SharedMemory kMemory;
+    const nn::Mapping kMapping;
+};
+using InputRelocationTracker = RelocationTracker<InputRelocationInfo>;
+using OutputRelocationTracker = RelocationTracker<OutputRelocationInfo>;
+
+struct RequestRelocation {
+    std::unique_ptr<InputRelocationTracker> input;
+    std::unique_ptr<OutputRelocationTracker> output;
+};
+
 // Relocate pointer-based data to shared memory. If `request` has no
 // Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If
 // `request` has Request::Argument::LifeTime::POINTER data, the request is copied to
 // `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function
-// returns with a reference to `*maybeRequestInSharedOut`.
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
-
-// Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
-// `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
-// Request object back to the output pointer-based memory in the original Request object.
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
-        const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
+// returns with a reference to `*maybeRequestInSharedOut`. The `relocationOut` will be set to track
+// the input and output relocations.
+//
+// Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
+// shared memory pool. Use `relocationOut` to flush the input or output data after the call.
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
+        RequestRelocation* relocationOut);
 
 nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
         size_t numberOfOperands, const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
new file mode 100644
index 0000000..5b00221
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidExecution final : public nn::IExecution {
+  public:
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+            const nn::OptionalTimePoint& deadline) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+            const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index 3e1dca7..de30aae 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -40,6 +40,10 @@
             const nn::OptionalDuration& loopTimeoutDuration,
             const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
+    nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
+
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
     std::any getUnderlyingResource() const override;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
new file mode 100644
index 0000000..d0084e8
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientExecution final : public nn::IExecution,
+                                 public std::enable_shared_from_this<ResilientExecution> {
+    struct PrivateConstructorTag {};
+
+  public:
+    using Factory = std::function<nn::GeneralResult<nn::SharedExecution>()>;
+
+    static nn::GeneralResult<std::shared_ptr<const ResilientExecution>> create(
+            Factory makeExecution);
+
+    ResilientExecution(PrivateConstructorTag tag, Factory makeExecution,
+                       nn::SharedExecution execution);
+
+    nn::SharedExecution getExecution() const;
+    nn::GeneralResult<nn::SharedExecution> recover(const nn::IExecution* failingExecution) const;
+
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+            const nn::OptionalTimePoint& deadline) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+            const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+  private:
+    bool isValidInternal() const EXCLUDES(mMutex);
+
+    const Factory kMakeExecution;
+    mutable std::mutex mMutex;
+    mutable nn::SharedExecution mExecution GUARDED_BY(mMutex);
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index a6c1b19..86533ed 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -58,12 +58,19 @@
             const nn::OptionalDuration& loopTimeoutDuration,
             const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
+    nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
+
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
     std::any getUnderlyingResource() const override;
 
   private:
     bool isValidInternal() const EXCLUDES(mMutex);
+    nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalDuration& loopTimeoutDuration) const;
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
 
     const Factory kMakePreparedModel;
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 4d26795..eaeb9ad 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -200,10 +200,31 @@
     return **maybeModelInSharedOut;
 }
 
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
+template <>
+void InputRelocationTracker::flush() const {
+    // Copy from pointers to shared memory.
+    uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
+    for (const auto& [data, length, offset] : kRelocationInfos) {
+        std::memcpy(memoryPtr + offset, data, length);
+    }
+}
+
+template <>
+void OutputRelocationTracker::flush() const {
+    // Copy from shared memory to pointers.
+    const uint8_t* memoryPtr = static_cast<const uint8_t*>(
+            std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
+    for (const auto& [data, length, offset] : kRelocationInfos) {
+        std::memcpy(data, memoryPtr + offset, length);
+    }
+}
+
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
+        RequestRelocation* relocationOut) {
     CHECK(request != nullptr);
     CHECK(maybeRequestInSharedOut != nullptr);
+    CHECK(relocationOut != nullptr);
 
     if (hasNoPointerData(*request)) {
         return *request;
@@ -213,8 +234,11 @@
     // to the caller through `maybeRequestInSharedOut` if the function succeeds.
     nn::Request requestInShared = *request;
 
+    RequestRelocation relocation;
+
     // Change input pointers to shared memory.
-    nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
+    nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
+    std::vector<InputRelocationInfo> inputRelocationInfos;
     for (auto& input : requestInShared.inputs) {
         const auto& location = input.location;
         if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -225,17 +249,21 @@
         const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
                                       location.pointer);
         CHECK(data != nullptr);
-        input.location = inputBuilder.append(data, location.length);
+        input.location = inputBuilder.append(location.length);
+        inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
     }
 
     // Allocate input memory.
     if (!inputBuilder.empty()) {
         auto memory = NN_TRY(inputBuilder.finish());
-        requestInShared.pools.push_back(std::move(memory));
+        requestInShared.pools.push_back(memory);
+        relocation.input = NN_TRY(
+                InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
     }
 
     // Change output pointers to shared memory.
     nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
+    std::vector<OutputRelocationInfo> outputRelocationInfos;
     for (auto& output : requestInShared.outputs) {
         const auto& location = output.location;
         if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -243,62 +271,25 @@
         }
 
         output.lifetime = nn::Request::Argument::LifeTime::POOL;
+        void* data = std::get<void*>(location.pointer);
+        CHECK(data != nullptr);
         output.location = outputBuilder.append(location.length);
+        outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
     }
 
     // Allocate output memory.
     if (!outputBuilder.empty()) {
         auto memory = NN_TRY(outputBuilder.finish());
-        requestInShared.pools.push_back(std::move(memory));
+        requestInShared.pools.push_back(memory);
+        relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
+                                                                   std::move(memory)));
     }
 
     *maybeRequestInSharedOut = requestInShared;
+    *relocationOut = std::move(relocation);
     return **maybeRequestInSharedOut;
 }
 
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
-        const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
-    if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
-        !std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
-        return {};
-    }
-    const auto& requestInShared = *maybeRequestInShared;
-
-    // Map the memory.
-    const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
-    const auto [pointer, size, context] = NN_TRY(map(outputMemory));
-    const uint8_t* constantPointer =
-            std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
-
-    // Flush each output pointer.
-    CHECK_EQ(request.outputs.size(), requestInShared.outputs.size());
-    for (size_t i = 0; i < request.outputs.size(); ++i) {
-        const auto& location = request.outputs[i].location;
-        const auto& locationInShared = requestInShared.outputs[i].location;
-        if (!std::holds_alternative<void*>(location.pointer)) {
-            continue;
-        }
-
-        // Get output pointer and size.
-        void* data = std::get<void*>(location.pointer);
-        CHECK(data != nullptr);
-        const size_t length = location.length;
-
-        // Get output pool location.
-        CHECK(requestInShared.outputs[i].lifetime == nn::Request::Argument::LifeTime::POOL);
-        const size_t index = locationInShared.poolIndex;
-        const size_t offset = locationInShared.offset;
-        const size_t outputPoolIndex = requestInShared.pools.size() - 1;
-        CHECK(locationInShared.length == length);
-        CHECK(index == outputPoolIndex);
-
-        // Flush memory.
-        std::memcpy(data, constantPointer + offset, length);
-    }
-
-    return {};
-}
-
 nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
         size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
     return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
diff --git a/neuralnetworks/utils/common/src/InvalidExecution.cpp b/neuralnetworks/utils/common/src/InvalidExecution.cpp
new file mode 100644
index 0000000..c4edd25
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidExecution.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidExecution.h"
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidExecution::compute(
+        const nn::OptionalTimePoint& /*deadline*/) const {
+    return NN_ERROR() << "InvalidExecution";
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+InvalidExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+                                const nn::OptionalTimePoint& /*deadline*/,
+                                const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+    return NN_ERROR() << "InvalidExecution";
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9081e1f..8195462 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -42,6 +42,12 @@
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
+nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
+        const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+    return NN_ERROR() << "InvalidPreparedModel";
+}
+
 nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
diff --git a/neuralnetworks/utils/common/src/ResilientExecution.cpp b/neuralnetworks/utils/common/src/ResilientExecution.cpp
new file mode 100644
index 0000000..46b404a
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientExecution.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientExecution.h"
+
+#include "InvalidBurst.h"
+#include "ResilientBurst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientExecution& resilientExecution, const FnType& fn)
+        -> decltype(fn(*resilientExecution.getExecution())) {
+    auto execution = resilientExecution.getExecution();
+    auto result = fn(*execution);
+
+    // Immediately return if prepared model is not dead.
+    if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+        return result;
+    }
+
+    // Attempt recovery and return if it fails.
+    auto maybeExecution = resilientExecution.recover(execution.get());
+    if (!maybeExecution.has_value()) {
+        const auto& [message, code] = maybeExecution.error();
+        std::ostringstream oss;
+        oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
+        result.error().message += oss.str();
+        return result;
+    }
+    execution = std::move(maybeExecution).value();
+
+    return fn(*execution);
+}
+
+}  // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientExecution>> ResilientExecution::create(
+        Factory makeExecution) {
+    if (makeExecution == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "utils::ResilientExecution::create must have non-empty makeExecution";
+    }
+    auto execution = NN_TRY(makeExecution());
+    CHECK(execution != nullptr);
+    return std::make_shared<ResilientExecution>(PrivateConstructorTag{}, std::move(makeExecution),
+                                                std::move(execution));
+}
+
+ResilientExecution::ResilientExecution(PrivateConstructorTag /*tag*/, Factory makeExecution,
+                                       nn::SharedExecution execution)
+    : kMakeExecution(std::move(makeExecution)), mExecution(std::move(execution)) {
+    CHECK(kMakeExecution != nullptr);
+    CHECK(mExecution != nullptr);
+}
+
+nn::SharedExecution ResilientExecution::getExecution() const {
+    std::lock_guard guard(mMutex);
+    return mExecution;
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientExecution::recover(
+        const nn::IExecution* failingExecution) const {
+    std::lock_guard guard(mMutex);
+
+    // Another caller updated the failing prepared model.
+    if (mExecution.get() != failingExecution) {
+        return mExecution;
+    }
+
+    mExecution = NN_TRY(kMakeExecution());
+    return mExecution;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ResilientExecution::compute(const nn::OptionalTimePoint& deadline) const {
+    const auto fn = [&deadline](const nn::IExecution& execution) {
+        return execution.compute(deadline);
+    };
+    return protect(*this, fn);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ResilientExecution::computeFenced(const std::vector<nn::SyncFence>& waitFor,
+                                  const nn::OptionalTimePoint& deadline,
+                                  const nn::OptionalDuration& timeoutDurationAfterFence) const {
+    const auto fn = [&waitFor, &deadline,
+                     &timeoutDurationAfterFence](const nn::IExecution& execution) {
+        return execution.computeFenced(waitFor, deadline, timeoutDurationAfterFence);
+    };
+    return protect(*this, fn);
+}
+
+bool ResilientExecution::isValidInternal() const {
+    return true;
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 5dd5f99..1ae19bc 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -17,7 +17,9 @@
 #include "ResilientPreparedModel.h"
 
 #include "InvalidBurst.h"
+#include "InvalidExecution.h"
 #include "ResilientBurst.h"
+#include "ResilientExecution.h"
 
 #include <android-base/logging.h>
 #include <android-base/thread_annotations.h>
@@ -127,6 +129,21 @@
     return protect(*this, fn);
 }
 
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+    auto self = shared_from_this();
+    ResilientExecution::Factory makeExecution =
+            [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
+        return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+    };
+    return ResilientExecution::create(std::move(makeExecution));
+#else
+    return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
 nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
 #if 0
     auto self = shared_from_this();
@@ -140,6 +157,19 @@
 #endif
 }
 
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
+    if (!isValidInternal()) {
+        return std::make_shared<const InvalidExecution>();
+    }
+    const auto fn = [&request, measure,
+                     &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
+        return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+    };
+    return protect(*this, fn);
+}
+
 std::any ResilientPreparedModel::getUnderlyingResource() const {
     return getPreparedModel()->getUnderlyingResource();
 }
diff --git a/neuralnetworks/utils/common/test/MockExecution.h b/neuralnetworks/utils/common/test/MockExecution.h
new file mode 100644
index 0000000..91e3428
--- /dev/null
+++ b/neuralnetworks/utils/common/test/MockExecution.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
+
+namespace android::nn {
+
+class MockExecution final : public IExecution {
+  public:
+    MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), compute,
+                (const OptionalTimePoint& deadline), (const, override));
+    MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), computeFenced,
+                (const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline,
+                 const OptionalDuration& timeoutDurationAfterFence),
+                (const, override));
+};
+
+}  // namespace android::nn
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c004861..c8ce006 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -35,6 +35,10 @@
                  const OptionalDuration& loopTimeoutDuration,
                  const OptionalDuration& timeoutDurationAfterFence),
                 (const, override));
+    MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
+                (const nn::Request& request, nn::MeasureTiming measure,
+                 const nn::OptionalDuration& loopTimeoutDuration),
+                (const, override));
     MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
     MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
 };
diff --git a/neuralnetworks/utils/common/test/ResilientExecution.cpp b/neuralnetworks/utils/common/test/ResilientExecution.cpp
new file mode 100644
index 0000000..c0737fb
--- /dev/null
+++ b/neuralnetworks/utils/common/test/ResilientExecution.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientExecution.h>
+#include <utility>
+#include "MockExecution.h"
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+using ::testing::_;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+using SharedMockExecution = std::shared_ptr<const nn::MockExecution>;
+using MockExecutionFactory = ::testing::MockFunction<nn::GeneralResult<nn::SharedExecution>()>;
+
+SharedMockExecution createMockExecution() {
+    return std::make_shared<const nn::MockExecution>();
+}
+
+std::tuple<SharedMockExecution, std::unique_ptr<MockExecutionFactory>,
+           std::shared_ptr<const ResilientExecution>>
+setup() {
+    auto mockExecution = std::make_shared<const nn::MockExecution>();
+
+    auto mockExecutionFactory = std::make_unique<MockExecutionFactory>();
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(mockExecution));
+
+    auto buffer = ResilientExecution::create(mockExecutionFactory->AsStdFunction()).value();
+    return std::make_tuple(std::move(mockExecution), std::move(mockExecutionFactory),
+                           std::move(buffer));
+}
+
+constexpr auto makeError = [](nn::ErrorStatus status) {
+    return [status](const auto&... /*args*/) { return nn::error(status); };
+};
+const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
+const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
+
+const auto kNoExecutionError =
+        nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
+const auto kNoFencedExecutionError =
+        nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>(
+                std::make_pair(nn::SyncFence::createAsSignaled(), nullptr));
+
+}  // namespace
+
+TEST(ResilientExecutionTest, invalidExecutionFactory) {
+    // setup call
+    const auto invalidExecutionFactory = ResilientExecution::Factory{};
+
+    // run test
+    const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST(ResilientExecutionTest, executionFactoryFailure) {
+    // setup call
+    const auto invalidExecutionFactory = kReturnGeneralFailure;
+
+    // run test
+    const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, getExecution) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+
+    // run test
+    const auto result = execution->getExecution();
+
+    // verify result
+    EXPECT_TRUE(result == mockExecution);
+}
+
+TEST(ResilientExecutionTest, compute) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+
+    // run test
+    const auto result = execution->compute({});
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeError) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = execution->compute({});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectFailedRecovery) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = execution->compute({});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectSuccessfulRecovery) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+    const auto recoveredMockExecution = createMockExecution();
+    EXPECT_CALL(*recoveredMockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+    // run test
+    const auto result = execution->compute({});
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFenced) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, computeFenced(_, _, _))
+            .Times(1)
+            .WillOnce(Return(kNoFencedExecutionError));
+
+    // run test
+    const auto result = execution->computeFenced({}, {}, {});
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFencedError) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = execution->computeFenced({}, {}, {});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectFailedRecovery) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = execution->computeFenced({}, {}, {});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectSuccessfulRecovery) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    const auto recoveredMockExecution = createMockExecution();
+    EXPECT_CALL(*recoveredMockExecution, computeFenced(_, _, _))
+            .Times(1)
+            .WillOnce(Return(kNoFencedExecutionError));
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+    // run test
+    const auto result = execution->computeFenced({}, {}, {});
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, recover) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    const auto recoveredMockExecution = createMockExecution();
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+    // run test
+    const auto result = execution->recover(mockExecution.get());
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+TEST(ResilientExecutionTest, recoverFailure) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    const auto recoveredMockExecution = createMockExecution();
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = execution->recover(mockExecution.get());
+
+    // verify result
+    EXPECT_FALSE(result.has_value());
+}
+
+TEST(ResilientExecutionTest, someoneElseRecovered) {
+    // setup call
+    const auto [mockExecution, mockExecutionFactory, execution] = setup();
+    const auto recoveredMockExecution = createMockExecution();
+    EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+    execution->recover(mockExecution.get());
+
+    // run test
+    const auto result = execution->recover(mockExecution.get());
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index 6d86e10..d396ca8 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -55,6 +55,7 @@
 const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
 const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
 
+const auto kNoCreateReusableExecutionError = nn::GeneralResult<nn::SharedExecution>{};
 const auto kNoExecutionError =
         nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
 const auto kNoFencedExecutionError =
@@ -231,6 +232,36 @@
             << "Failed with " << result.error().code << ": " << result.error().message;
 }
 
+TEST(ResilientPreparedModelTest, createReusableExecution) {
+    // setup call
+    const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+            .Times(1)
+            .WillOnce(Return(kNoCreateReusableExecutionError));
+
+    // run test
+    const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientPreparedModelTest, createReusableExecutionError) {
+    // setup call
+    const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+            .Times(1)
+            .WillOnce(kReturnGeneralFailure);
+
+    // run test
+    const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
 TEST(ResilientPreparedModelTest, getUnderlyingResource) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();