Reusable execution at HAL level -- HAL.
This CL modifies the canonical/AIDL adapter to use IExecution object
if available.
Bug: 202405342
Bug: 202431255
Test: NNT_static
Test: CtsNNAPITestCases
Test: VtsHalNeuralnetworksTargetTest
Change-Id: I6aac3c57f97ac87a5ba3f78cfd843fcc403decff
Merged-In: I6aac3c57f97ac87a5ba3f78cfd843fcc403decff
(cherry picked from commit 7f5c7d293c2dad462dc9c0f1f1a160fb2c2c9a9b)
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 37ad6d6..9437d5c 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -26,7 +26,14 @@
cc_defaults {
name: "neuralnetworks_utils_hal_aidl_defaults",
defaults: ["neuralnetworks_utils_defaults"],
- srcs: ["src/*"],
+ srcs: [
+ // AIDL utils that a driver may depend on.
+ "src/BufferTracker.cpp",
+ "src/Conversions.cpp",
+ "src/HalUtils.cpp",
+ "src/Utils.cpp",
+ "src/ValidateHal.cpp",
+ ],
local_include_dirs: ["include/nnapi/hal/aidl/"],
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
@@ -47,6 +54,7 @@
},
}
+// Deprecated. Remove once all modules depending on this are migrated away.
cc_library_static {
name: "neuralnetworks_utils_hal_aidl_v1",
defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
@@ -56,19 +64,25 @@
}
cc_library_static {
- name: "neuralnetworks_utils_hal_aidl_v2",
- defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
- shared_libs: [
- "android.hardware.neuralnetworks-V2-ndk",
- ],
-}
-
-cc_library_static {
name: "neuralnetworks_utils_hal_aidl",
defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
- shared_libs: [
- "android.hardware.neuralnetworks-V3-ndk",
+ srcs: [
+ // Additional AIDL utils for the runtime.
+ "src/Assertions.cpp",
+ "src/Buffer.cpp",
+ "src/Burst.cpp",
+ "src/Callbacks.cpp",
+ "src/Device.cpp",
+ "src/Execution.cpp",
+ "src/InvalidDevice.cpp",
+ "src/PreparedModel.cpp",
+ "src/ProtectCallback.cpp",
+ "src/Service.cpp",
],
+ shared_libs: [
+ "android.hardware.neuralnetworks-V4-ndk",
+ ],
+ cflags: ["-DNN_AIDL_V4_OR_ABOVE"],
}
// A cc_defaults that includes the latest non-experimental AIDL utilities and other AIDL libraries
@@ -79,9 +93,10 @@
static_libs: [
"android.hardware.common-V2-ndk",
"android.hardware.graphics.common-V2-ndk",
- "android.hardware.neuralnetworks-V3-ndk",
+ "android.hardware.neuralnetworks-V4-ndk",
"neuralnetworks_utils_hal_aidl",
],
+ cflags: ["-DNN_AIDL_V4_OR_ABOVE"],
}
cc_test {
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
index 168264b..960be2b 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
@@ -36,6 +36,8 @@
public:
using Data = nn::GeneralResult<nn::SharedPreparedModel>;
+ PreparedModelCallback(nn::Version featureLevel) : kFeatureLevel(featureLevel) {}
+
ndk::ScopedAStatus notify(ErrorStatus status,
const std::shared_ptr<IPreparedModel>& preparedModel) override;
@@ -44,6 +46,7 @@
Data get();
private:
+ const nn::Version kFeatureLevel;
hal::utils::TransferValue<Data> mData;
};
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
index a77ea98..14802b9 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+
#include <nnapi/IExecution.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
@@ -33,17 +35,22 @@
namespace aidl::android::hardware::neuralnetworks::utils {
-class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+// A reusable execution implementation with a cached Request, internally it is still passing the
+// request to the driver in every computation.
+class ExecutionWithCachedRequest final
+ : public nn::IExecution,
+ public std::enable_shared_from_this<ExecutionWithCachedRequest> {
struct PrivateConstructorTag {};
public:
- static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ static nn::GeneralResult<std::shared_ptr<const ExecutionWithCachedRequest>> create(
std::shared_ptr<const PreparedModel> preparedModel, Request request,
hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration);
- Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
- Request request, hal::utils::RequestRelocation relocation, bool measure,
- int64_t loopTimeoutDuration);
+ ExecutionWithCachedRequest(PrivateConstructorTag tag,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
const nn::OptionalTimePoint& deadline) const override;
@@ -60,6 +67,30 @@
const int64_t kLoopTimeoutDuration;
};
+// A reusable execution implementation that is backed by an actual AIDL IExecution object.
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<aidl_hal::IExecution> kExecution;
+ const hal::utils::RequestRelocation kRelocation;
+};
+
} // namespace aidl::android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
index 3fb443c..205d428 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
@@ -61,6 +61,11 @@
#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
#include <aidl/android/hardware/neuralnetworks/Timing.h>
+#ifdef NN_AIDL_V4_OR_ABOVE
+#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+#endif // NN_AIDL_V4_OR_ABOVE
+
namespace android::nn {
namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 4035764..24cd681 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -41,10 +41,11 @@
public:
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel);
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel);
PreparedModel(PrivateConstructorTag tag,
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel);
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel,
+ nn::Version featureLevel);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
@@ -78,6 +79,7 @@
private:
const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel;
+ const nn::Version kFeatureLevel;
};
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index a27487e..beca38b 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -38,6 +38,8 @@
return nn::kVersionFeatureLevel6;
case 3:
return nn::kVersionFeatureLevel7;
+ case 4:
+ return nn::kVersionFeatureLevel8;
default:
return std::nullopt;
}
diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp
index 8084970..554f3fa 100644
--- a/neuralnetworks/aidl/utils/src/Callbacks.cpp
+++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp
@@ -38,16 +38,17 @@
// nn::kVersionFeatureLevel5. On failure, this function returns with the appropriate
// nn::GeneralError.
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
- ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) {
+ ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel,
+ nn::Version featureLevel) {
HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status);
- return NN_TRY(PreparedModel::create(preparedModel));
+ return NN_TRY(PreparedModel::create(preparedModel, featureLevel));
}
} // namespace
ndk::ScopedAStatus PreparedModelCallback::notify(
ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) {
- mData.put(prepareModelCallback(status, preparedModel));
+ mData.put(prepareModelCallback(status, preparedModel, kFeatureLevel));
return ndk::ScopedAStatus::ok();
}
diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp
index 5b7ec4e..bad10ed 100644
--- a/neuralnetworks/aidl/utils/src/Device.cpp
+++ b/neuralnetworks/aidl/utils/src/Device.cpp
@@ -229,7 +229,7 @@
const auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
- const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline,
@@ -247,7 +247,7 @@
const auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
- const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModelFromCache(aidlDeadline, aidlModelCache, aidlDataCache,
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index 94edd90..c4add63 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -35,36 +35,39 @@
namespace aidl::android::hardware::neuralnetworks::utils {
-nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
- std::shared_ptr<const PreparedModel> preparedModel, Request request,
- hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) {
+nn::GeneralResult<std::shared_ptr<const ExecutionWithCachedRequest>>
+ExecutionWithCachedRequest::create(std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation,
+ bool measure, int64_t loopTimeoutDuration) {
if (preparedModel == nullptr) {
- return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel";
+ return NN_ERROR() << "aidl::utils::ExecutionWithCachedRequest::create must have non-null "
+ "preparedModel";
}
- return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
- std::move(request), std::move(relocation), measure,
- loopTimeoutDuration);
+ return std::make_shared<const ExecutionWithCachedRequest>(
+ PrivateConstructorTag{}, std::move(preparedModel), std::move(request),
+ std::move(relocation), measure, loopTimeoutDuration);
}
-Execution::Execution(PrivateConstructorTag /*tag*/,
- std::shared_ptr<const PreparedModel> preparedModel, Request request,
- hal::utils::RequestRelocation relocation, bool measure,
- int64_t loopTimeoutDuration)
+ExecutionWithCachedRequest::ExecutionWithCachedRequest(
+ PrivateConstructorTag /*tag*/, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration)
: kPreparedModel(std::move(preparedModel)),
kRequest(std::move(request)),
kRelocation(std::move(relocation)),
kMeasure(measure),
kLoopTimeoutDuration(loopTimeoutDuration) {}
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
- const nn::OptionalTimePoint& deadline) const {
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ExecutionWithCachedRequest::compute(const nn::OptionalTimePoint& deadline) const {
const auto aidlDeadline = NN_TRY(convert(deadline));
return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
kRelocation);
}
-nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ExecutionWithCachedRequest::computeFenced(
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& timeoutDurationAfterFence) const {
const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -75,4 +78,18 @@
aidlTimeoutDurationAfterFence, kRelocation);
}
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<aidl_hal::IExecution> execution, hal::utils::RequestRelocation relocation) {
+ if (execution == nullptr) {
+ return NN_ERROR() << "aidl::utils::Execution::create must have non-null execution";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(execution),
+ std::move(relocation));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation)
+ : kExecution(std::move(execution)), kRelocation(std::move(relocation)) {}
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index f25c2c8..6d1de56 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -54,21 +54,77 @@
return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
}
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> handleExecutionResult(
+ const ExecutionResult& result, const hal::utils::RequestRelocation& relocation) {
+ if (!result.outputSufficientSize) {
+ auto canonicalOutputShapes =
+ nn::convert(result.outputShapes).value_or(std::vector<nn::OutputShape>{});
+ return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+ << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ }
+ auto [outputShapes, timing] =
+ NN_TRY(convertExecutionResults(result.outputShapes, result.timing));
+
+ if (relocation.output) {
+ relocation.output->flush();
+ }
+ return std::make_pair(std::move(outputShapes), timing);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+handleFencedExecutionResult(const FencedExecutionResult& result,
+ const hal::utils::RequestRelocation& relocation) {
+ auto resultSyncFence = nn::SyncFence::createAsSignaled();
+ if (result.syncFence.get() != -1) {
+ resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value();
+ }
+
+ auto callback = result.callback;
+ if (callback == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
+ }
+
+ // If computeFenced required the request memory to be moved into shared memory, block here until
+ // the fenced execution has completed and flush the memory back.
+ if (relocation.output) {
+ const auto state = resultSyncFence.syncWait({});
+ if (state != nn::SyncFence::FenceState::SIGNALED) {
+ return NN_ERROR() << "syncWait failed with " << state;
+ }
+ relocation.output->flush();
+ }
+
+ // Create callback which can be used to retrieve the execution error status and timings.
+ nn::ExecuteFencedInfoCallback resultCallback =
+ [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
+ ErrorStatus errorStatus;
+ Timing timingLaunched;
+ Timing timingFenced;
+ const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus);
+ HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed";
+ return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced);
+ };
+
+ return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
+}
+
} // namespace
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) {
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel) {
if (preparedModel == nullptr) {
return NN_ERROR()
<< "aidl_hal::utils::PreparedModel::create must have non-null preparedModel";
}
- return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel));
+ return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
+ featureLevel);
}
PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/,
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel)
- : kPreparedModel(std::move(preparedModel)) {}
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel,
+ nn::Version featureLevel)
+ : kPreparedModel(std::move(preparedModel)), kFeatureLevel(featureLevel) {}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
@@ -101,19 +157,7 @@
const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "executeSynchronously failed";
- if (!executionResult.outputSufficientSize) {
- auto canonicalOutputShapes =
- nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
- return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
- << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
- }
- auto [outputShapes, timing] =
- NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
-
- if (relocation.output) {
- relocation.output->flush();
- }
- return std::make_pair(std::move(outputShapes), timing);
+ return handleExecutionResult(executionResult, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -154,39 +198,7 @@
kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
timeoutDurationAfterFence, &result);
HANDLE_ASTATUS(ret) << "executeFenced failed";
-
- auto resultSyncFence = nn::SyncFence::createAsSignaled();
- if (result.syncFence.get() != -1) {
- resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value();
- }
-
- auto callback = result.callback;
- if (callback == nullptr) {
- return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
- }
-
- // If executeFenced required the request memory to be moved into shared memory, block here until
- // the fenced execution has completed and flush the memory back.
- if (relocation.output) {
- const auto state = resultSyncFence.syncWait({});
- if (state != nn::SyncFence::FenceState::SIGNALED) {
- return NN_ERROR() << "syncWait failed with " << state;
- }
- relocation.output->flush();
- }
-
- // Create callback which can be used to retrieve the execution error status and timings.
- nn::ExecuteFencedInfoCallback resultCallback =
- [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
- ErrorStatus errorStatus;
- Timing timingLaunched;
- Timing timingFenced;
- const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus);
- HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed";
- return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced);
- };
-
- return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
+ return handleFencedExecutionResult(result, relocation);
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
@@ -202,8 +214,18 @@
auto aidlRequest = NN_TRY(convert(requestInShared));
auto aidlMeasure = NN_TRY(convert(measure));
auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
- return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation),
- aidlMeasure, aidlLoopTimeoutDuration);
+
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ std::shared_ptr<IExecution> execution;
+ const auto ret = kPreparedModel->createReusableExecution(
+ aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution);
+ HANDLE_ASTATUS(ret) << "createReusableExecution failed";
+ return Execution::create(std::move(execution), std::move(relocation));
+ }
+
+ return ExecutionWithCachedRequest::create(shared_from_this(), std::move(aidlRequest),
+ std::move(relocation), aidlMeasure,
+ aidlLoopTimeoutDuration);
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
@@ -218,4 +240,36 @@
return resource;
}
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+
+ if (kRelocation.input) {
+ kRelocation.input->flush();
+ }
+
+ ExecutionResult executionResult;
+ auto ret = kExecution->executeSynchronously(aidlDeadline, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ return handleExecutionResult(executionResult, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto aidlWaitFor = NN_TRY(convert(waitFor));
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+ const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+
+ if (kRelocation.input) {
+ kRelocation.input->flush();
+ }
+
+ FencedExecutionResult result;
+ const auto ret = kExecution->executeFenced(aidlWaitFor, aidlDeadline,
+ aidlTimeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFenced failed";
+ return handleFencedExecutionResult(result, kRelocation);
+}
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
index 0366e7d..fb13af8 100644
--- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
@@ -17,6 +17,7 @@
#include "MockBuffer.h"
#include "MockDevice.h"
#include "MockPreparedModel.h"
+#include "TestUtils.h"
#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
#include <android/binder_auto_utils.h>
@@ -146,26 +147,7 @@
return ndk::ScopedAStatus::fromStatus(STATUS_DEAD_OBJECT);
};
-class DeviceTest : public ::testing::TestWithParam<nn::Version> {
- protected:
- const nn::Version kVersion = GetParam();
-};
-
-std::string printDeviceTest(const testing::TestParamInfo<nn::Version>& info) {
- const nn::Version version = info.param;
- CHECK(!version.runtimeOnlyFeatures);
- switch (version.level) {
- case nn::Version::Level::FEATURE_LEVEL_5:
- return "v1";
- case nn::Version::Level::FEATURE_LEVEL_6:
- return "v2";
- case nn::Version::Level::FEATURE_LEVEL_7:
- return "v3";
- default:
- LOG(FATAL) << "Invalid AIDL version: " << version;
- return "invalid";
- }
-}
+class DeviceTest : public VersionedAidlUtilsTestBase {};
} // namespace
@@ -894,9 +876,6 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-INSTANTIATE_TEST_SUITE_P(TestDevice, DeviceTest,
- ::testing::Values(nn::kVersionFeatureLevel5, nn::kVersionFeatureLevel6,
- nn::kVersionFeatureLevel7),
- printDeviceTest);
+INSTANTIATE_VERSIONED_AIDL_UTILS_TEST(DeviceTest, kAllAidlVersions);
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/test/ExecutionTest.cpp b/neuralnetworks/aidl/utils/test/ExecutionTest.cpp
new file mode 100644
index 0000000..8519290
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/ExecutionTest.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MockExecution.h"
+#include "MockFencedExecutionCallback.h"
+
+#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/aidl/Execution.h>
+
+#include <functional>
+#include <memory>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+namespace {
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
+using ::testing::SetArgPointee;
+
+const std::shared_ptr<IExecution> kInvalidExecution;
+constexpr auto kNoTiming = Timing{.timeOnDeviceNs = -1, .timeInDriverNs = -1};
+
+constexpr auto makeStatusOk = [] { return ndk::ScopedAStatus::ok(); };
+
+constexpr auto makeGeneralFailure = [] {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+};
+constexpr auto makeGeneralTransportFailure = [] {
+ return ndk::ScopedAStatus::fromStatus(STATUS_NO_MEMORY);
+};
+constexpr auto makeDeadObjectFailure = [] {
+ return ndk::ScopedAStatus::fromStatus(STATUS_DEAD_OBJECT);
+};
+
+auto makeFencedExecutionResult(const std::shared_ptr<MockFencedExecutionCallback>& callback) {
+ return [callback](const std::vector<ndk::ScopedFileDescriptor>& /*waitFor*/,
+ int64_t /*deadline*/, int64_t /*duration*/,
+ FencedExecutionResult* fencedExecutionResult) {
+ *fencedExecutionResult = FencedExecutionResult{.callback = callback,
+ .syncFence = ndk::ScopedFileDescriptor(-1)};
+ return ndk::ScopedAStatus::ok();
+ };
+}
+
+} // namespace
+
+TEST(ExecutionTest, invalidExecution) {
+ // run test
+ const auto result = Execution::create(kInvalidExecution, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeSync) {
+ // setup call
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ const auto mockExecutionResult = ExecutionResult{
+ .outputSufficientSize = true,
+ .outputShapes = {},
+ .timing = kNoTiming,
+ };
+ EXPECT_CALL(*mockExecution, executeSynchronously(_, _))
+ .Times(1)
+ .WillOnce(
+ DoAll(SetArgPointee<1>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ EXPECT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ExecutionTest, executeSyncError) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeSynchronously(_, _))
+ .Times(1)
+ .WillOnce(Invoke(makeGeneralFailure));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeSyncTransportFailure) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeSynchronously(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeSyncDeadObject) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeSynchronously(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ExecutionTest, executeFenced) {
+ // setup call
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
+ EXPECT_CALL(*mockExecution, executeFenced(_, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ const auto& [syncFence, callback] = result.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code << ": "
+ << callbackResult.error().message;
+}
+
+TEST(ExecutionTest, executeFencedCallbackError) {
+ // setup call
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
+ Invoke(makeStatusOk))));
+ EXPECT_CALL(*mockExecution, executeFenced(_, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ const auto& [syncFence, callback] = result.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeFencedError) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeFenced(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeFencedTransportFailure) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeFenced(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ExecutionTest, executeFencedDeadObject) {
+ // setup test
+ const auto mockExecution = MockExecution::create();
+ const auto execution = Execution::create(mockExecution, {}).value();
+ EXPECT_CALL(*mockExecution, executeFenced(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/test/MockExecution.h b/neuralnetworks/aidl/utils/test/MockExecution.h
new file mode 100644
index 0000000..216f569
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/MockExecution.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_EXECUTION_H
+
+#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <android/binder_interface_utils.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/Status.h>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class MockExecution final : public BnExecution {
+ public:
+ static std::shared_ptr<MockExecution> create();
+
+ MOCK_METHOD(ndk::ScopedAStatus, executeSynchronously,
+ (int64_t deadline, ExecutionResult* executionResult), (override));
+ MOCK_METHOD(ndk::ScopedAStatus, executeFenced,
+ (const std::vector<ndk::ScopedFileDescriptor>& waitFor, int64_t deadline,
+ int64_t duration, FencedExecutionResult* fencedExecutionResult),
+ (override));
+};
+
+inline std::shared_ptr<MockExecution> MockExecution::create() {
+ return ndk::SharedRefBase::make<MockExecution>();
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_EXECUTION_H
diff --git a/neuralnetworks/aidl/utils/test/MockPreparedModel.h b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
index a4ae2b7..0ed9af9 100644
--- a/neuralnetworks/aidl/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
@@ -41,6 +42,10 @@
(override));
MOCK_METHOD(ndk::ScopedAStatus, configureExecutionBurst, (std::shared_ptr<IBurst> * burst),
(override));
+ MOCK_METHOD(ndk::ScopedAStatus, createReusableExecution,
+ (const Request& request, bool measureTiming, int64_t loopTimeoutDuration,
+ std::shared_ptr<IExecution>* execution),
+ (override));
};
inline std::shared_ptr<MockPreparedModel> MockPreparedModel::create() {
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index 8bb5c90..8cfb7c1 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -15,8 +15,10 @@
*/
#include "MockBurst.h"
+#include "MockExecution.h"
#include "MockFencedExecutionCallback.h"
#include "MockPreparedModel.h"
+#include "TestUtils.h"
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
#include <gmock/gmock.h>
@@ -66,21 +68,23 @@
};
}
+class PreparedModelTest : public VersionedAidlUtilsTestBase {};
+
} // namespace
-TEST(PreparedModelTest, invalidPreparedModel) {
+TEST_P(PreparedModelTest, invalidPreparedModel) {
// run test
- const auto result = PreparedModel::create(kInvalidPreparedModel);
+ const auto result = PreparedModel::create(kInvalidPreparedModel, kVersion);
// verify result
ASSERT_FALSE(result.has_value());
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeSync) {
+TEST_P(PreparedModelTest, executeSync) {
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockExecutionResult = ExecutionResult{
.outputSufficientSize = true,
.outputShapes = {},
@@ -99,10 +103,10 @@
<< "Failed with " << result.error().code << ": " << result.error().message;
}
-TEST(PreparedModelTest, executeSyncError) {
+TEST_P(PreparedModelTest, executeSyncError) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(makeGeneralFailure));
@@ -115,10 +119,10 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeSyncTransportFailure) {
+TEST_P(PreparedModelTest, executeSyncTransportFailure) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -131,10 +135,10 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeSyncDeadObject) {
+TEST_P(PreparedModelTest, executeSyncDeadObject) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -147,10 +151,10 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-TEST(PreparedModelTest, executeFenced) {
+TEST_P(PreparedModelTest, executeFenced) {
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockCallback = MockFencedExecutionCallback::create();
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
.Times(1)
@@ -176,10 +180,10 @@
<< callbackResult.error().message;
}
-TEST(PreparedModelTest, executeFencedCallbackError) {
+TEST_P(PreparedModelTest, executeFencedCallbackError) {
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockCallback = MockFencedExecutionCallback::create();
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
.Times(1)
@@ -206,10 +210,10 @@
EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeFencedError) {
+TEST_P(PreparedModelTest, executeFencedError) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
@@ -222,10 +226,10 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeFencedTransportFailure) {
+TEST_P(PreparedModelTest, executeFencedTransportFailure) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -238,10 +242,10 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, executeFencedDeadObject) {
+TEST_P(PreparedModelTest, executeFencedDeadObject) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -254,11 +258,13 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-TEST(PreparedModelTest, reusableExecuteSync) {
+TEST_P(PreparedModelTest, reusableExecuteSync) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const uint32_t kNumberOfComputations = 2;
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockExecutionResult = ExecutionResult{
.outputSufficientSize = true,
.outputShapes = {},
@@ -283,10 +289,12 @@
}
}
-TEST(PreparedModelTest, reusableExecuteSyncError) {
+TEST_P(PreparedModelTest, reusableExecuteSyncError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(Invoke(makeGeneralFailure));
@@ -303,10 +311,12 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+TEST_P(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -323,10 +333,12 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+TEST_P(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -343,11 +355,13 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-TEST(PreparedModelTest, reusableExecuteFenced) {
+TEST_P(PreparedModelTest, reusableExecuteFenced) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const uint32_t kNumberOfComputations = 2;
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockCallback = MockFencedExecutionCallback::create();
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
.Times(kNumberOfComputations)
@@ -379,10 +393,12 @@
}
}
-TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
+TEST_P(PreparedModelTest, reusableExecuteFencedCallbackError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup call
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
const auto mockCallback = MockFencedExecutionCallback::create();
EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
.Times(1)
@@ -413,10 +429,12 @@
EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, reusableExecuteFencedError) {
+TEST_P(PreparedModelTest, reusableExecuteFencedError) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
@@ -433,10 +451,12 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+TEST_P(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -453,10 +473,12 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
+TEST_P(PreparedModelTest, reusableExecuteFencedDeadObject) {
+ if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -473,14 +495,14 @@
EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-TEST(PreparedModelTest, configureExecutionBurst) {
+TEST_P(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
const auto mockBurst = ndk::SharedRefBase::make<MockBurst>();
EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
.Times(1)
.WillOnce(DoAll(SetArgPointee<0>(mockBurst), Invoke(makeStatusOk)));
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
const auto result = preparedModel->configureExecutionBurst();
@@ -491,13 +513,13 @@
EXPECT_NE(result.value(), nullptr);
}
-TEST(PreparedModelTest, configureExecutionBurstError) {
+TEST_P(PreparedModelTest, configureExecutionBurstError) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralFailure));
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
const auto result = preparedModel->configureExecutionBurst();
@@ -507,13 +529,13 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, configureExecutionBurstTransportFailure) {
+TEST_P(PreparedModelTest, configureExecutionBurstTransportFailure) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
const auto result = preparedModel->configureExecutionBurst();
@@ -523,13 +545,13 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
-TEST(PreparedModelTest, configureExecutionBurstDeadObject) {
+TEST_P(PreparedModelTest, configureExecutionBurstDeadObject) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
.Times(1)
.WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
const auto result = preparedModel->configureExecutionBurst();
@@ -539,10 +561,84 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
-TEST(PreparedModelTest, getUnderlyingResource) {
+TEST_P(PreparedModelTest, createReusableExecution) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
- const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockExecution = ndk::SharedRefBase::make<MockExecution>();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<3>(mockExecution), Invoke(makeStatusOk)));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_NE(result.value(), nullptr);
+}
+
+TEST_P(PreparedModelTest, createReusableExecutionError) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, createReusableExecutionTransportFailure) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, createReusableExecutionDeadObject) {
+ if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(PreparedModelTest, getUnderlyingResource) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
// run test
const auto resource = preparedModel->getUnderlyingResource();
@@ -554,4 +650,6 @@
EXPECT_EQ(maybeMock->get(), mockPreparedModel.get());
}
+INSTANTIATE_VERSIONED_AIDL_UTILS_TEST(PreparedModelTest, kAllAidlVersions);
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/test/TestUtils.cpp b/neuralnetworks/aidl/utils/test/TestUtils.cpp
new file mode 100644
index 0000000..9abec88
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/TestUtils.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TestUtils.h"
+
+#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <string>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+std::string printTestVersion(const testing::TestParamInfo<nn::Version>& info) {
+ switch (info.param.level) {
+ case nn::Version::Level::FEATURE_LEVEL_5:
+ return "v1";
+ case nn::Version::Level::FEATURE_LEVEL_6:
+ return "v2";
+ case nn::Version::Level::FEATURE_LEVEL_7:
+ return "v3";
+ case nn::Version::Level::FEATURE_LEVEL_8:
+ return "v4";
+ default:
+ LOG(FATAL) << "Invalid AIDL version: " << info.param;
+ return "invalid";
+ }
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/test/TestUtils.h b/neuralnetworks/aidl/utils/test/TestUtils.h
new file mode 100644
index 0000000..23f734a
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/TestUtils.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_TEST_UTILS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_TEST_UTILS_H
+
+#include <gtest/gtest.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <string>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class VersionedAidlUtilsTestBase : public ::testing::TestWithParam<nn::Version> {
+ protected:
+ const nn::Version kVersion = GetParam();
+};
+
+std::string printTestVersion(const testing::TestParamInfo<nn::Version>& info);
+
+inline const auto kAllAidlVersions =
+ ::testing::Values(nn::kVersionFeatureLevel5, nn::kVersionFeatureLevel6,
+ nn::kVersionFeatureLevel7, nn::kVersionFeatureLevel8);
+
+#define INSTANTIATE_VERSIONED_AIDL_UTILS_TEST(TestSuite, versions) \
+ INSTANTIATE_TEST_SUITE_P(Versioned, TestSuite, versions, printTestVersion)
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_TEST_UTILS_H
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index cd5475c..b3e9c63 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -208,6 +208,11 @@
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
+ ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, bool, int64_t,
+ std::shared_ptr<aidl_hal::IExecution>*) override {
+ return ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+ }
};
template <typename... Args>
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Execution.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Execution.h
new file mode 100644
index 0000000..6a9ac57
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Execution.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_EXECUTION_H
+
+#include "nnapi/hal/aidl/Adapter.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IExecution to BnExecution.
+class Execution : public BnExecution {
+ public:
+ explicit Execution(::android::nn::SharedExecution execution);
+
+ ndk::ScopedAStatus executeSynchronously(int64_t deadlineNs,
+ ExecutionResult* executionResult) override;
+ ndk::ScopedAStatus executeFenced(const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ int64_t deadlineNs, int64_t durationNs,
+ FencedExecutionResult* fencedExecutionResult) override;
+
+ protected:
+ const ::android::nn::SharedExecution kExecution;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_EXECUTION_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
index 93e0427..f92b0bc 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
@@ -23,6 +23,7 @@
#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IPreparedModel.h>
@@ -50,6 +51,9 @@
int64_t loopTimeoutDurationNs, int64_t durationNs,
FencedExecutionResult* executionResult) override;
ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
+ ndk::ScopedAStatus createReusableExecution(const Request& request, bool measureTiming,
+ int64_t loopTimeoutDurationNs,
+ std::shared_ptr<IExecution>* execution) override;
::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
diff --git a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
index 71ed1a8..5cab62c 100644
--- a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
@@ -17,6 +17,7 @@
#include "PreparedModel.h"
#include "Burst.h"
+#include "Execution.h"
#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
@@ -26,6 +27,7 @@
#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
@@ -167,6 +169,56 @@
.syncFence = std::move(fileDescriptor)};
}
+nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::IPreparedModel& preparedModel, const Request& request, bool measureTiming,
+ int64_t loopTimeoutDurationNs) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+ const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+ return preparedModel.createReusableExecution(nnRequest, nnMeasureTiming, nnLoopTimeoutDuration);
+}
+
+nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IExecution& execution,
+ int64_t deadlineNs) {
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+
+ const auto result = execution.compute(nnDeadline);
+
+ if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+ const auto& [message, code, outputShapes] = result.error();
+ LOG(ERROR) << "executeSynchronously failed with " << code << ": " << message;
+ return ExecutionResult{.outputSufficientSize = false,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
+ }
+
+ const auto& [outputShapes, timing] = NN_TRY(result);
+ return ExecutionResult{.outputSufficientSize = true,
+ .outputShapes = utils::convert(outputShapes).value(),
+ .timing = utils::convert(timing).value()};
+}
+
+nn::GeneralResult<FencedExecutionResult> executeFenced(
+ const nn::IExecution& execution, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ int64_t deadlineNs, int64_t durationNs) {
+ const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
+ const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+ const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
+
+ auto [syncFence, executeFencedInfoCallback] =
+ NN_TRY(execution.computeFenced(nnWaitFor, nnDeadline, nnDuration));
+
+ ndk::ScopedFileDescriptor fileDescriptor;
+ if (syncFence.hasFd()) {
+ auto uniqueFd = NN_TRY(nn::dupFd(syncFence.getFd()));
+ fileDescriptor = ndk::ScopedFileDescriptor(uniqueFd.release());
+ }
+
+ return FencedExecutionResult{.callback = ndk::SharedRefBase::make<FencedExecutionCallback>(
+ std::move(executeFencedInfoCallback)),
+ .syncFence = std::move(fileDescriptor)};
+}
+
} // namespace
PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel)
@@ -222,4 +274,51 @@
return kPreparedModel;
}
+ndk::ScopedAStatus PreparedModel::createReusableExecution(const Request& request,
+ bool measureTiming,
+ int64_t loopTimeoutDurationNs,
+ std::shared_ptr<IExecution>* execution) {
+ auto result = adapter::createReusableExecution(*kPreparedModel, request, measureTiming,
+ loopTimeoutDurationNs);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *execution = ndk::SharedRefBase::make<Execution>(std::move(result).value());
+ return ndk::ScopedAStatus::ok();
+}
+
+Execution::Execution(nn::SharedExecution execution) : kExecution(std::move(execution)) {
+ CHECK(kExecution != nullptr);
+}
+
+ndk::ScopedAStatus Execution::executeSynchronously(int64_t deadlineNs,
+ ExecutionResult* executionResult) {
+ auto result = adapter::executeSynchronously(*kExecution, deadlineNs);
+ if (!result.has_value()) {
+ const auto& [message, code, _] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Execution::executeFenced(const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ int64_t deadlineNs, int64_t durationNs,
+ FencedExecutionResult* executionResult) {
+ auto result = adapter::executeFenced(*kExecution, waitFor, deadlineNs, durationNs);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ static_cast<int32_t>(aidlCode), message.c_str());
+ }
+ *executionResult = std::move(result).value();
+ return ndk::ScopedAStatus::ok();
+}
+
} // namespace aidl::android::hardware::neuralnetworks::adapter