Add NNAPI loop timeout API
Bug: 145906499
Bug: 136735929
Test: NNT_static
Change-Id: Ie40c248b174964676985403f9f1a5127b408a00a
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 404c2a1..89edfb7 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -496,7 +496,7 @@
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
sp<ExecutionCallback>& callback) {
- return preparedModel->execute_1_3(request, measure, {}, callback);
+ return preparedModel->execute_1_3(request, measure, {}, {}, callback);
}
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
@@ -504,7 +504,7 @@
Timing* timing) {
ErrorStatus result;
Return<void> ret = preparedModel->executeSynchronously_1_3(
- request, measure, {},
+ request, measure, {}, {},
[&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
const Timing& time) {
result = error;
@@ -612,7 +612,7 @@
hidl_handle syncFenceHandle;
sp<IFencedExecutionCallback> fencedCallback;
Return<void> ret = preparedModel->executeFenced(
- request, {}, testConfig.measureTiming, {}, {},
+ request, {}, testConfig.measureTiming, {}, {}, {},
[&result, &syncFenceHandle, &fencedCallback](
ErrorStatus error, const hidl_handle& handle,
const sp<IFencedExecutionCallback>& callback) {
diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
index 8271135..fccc612 100644
--- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
@@ -171,7 +171,7 @@
// launch execution
const sp<ExecutionCallback> callback = new ExecutionCallback();
- Return<ErrorStatus> ret = preparedModel->execute_1_3(request, measure, deadline, callback);
+ Return<ErrorStatus> ret = preparedModel->execute_1_3(request, measure, deadline, {}, callback);
EXPECT_TRUE(ret.isOk());
EXPECT_EQ(ErrorStatus::NONE, ret.withDefault(ErrorStatus::GENERAL_FAILURE));
if (!ret.isOk() || ret != ErrorStatus::NONE) return std::nullopt;
@@ -198,7 +198,7 @@
// run execution
const Return<void> ret =
- preparedModel->executeSynchronously_1_3(request, measure, deadline, cb);
+ preparedModel->executeSynchronously_1_3(request, measure, deadline, {}, cb);
EXPECT_TRUE(ret.isOk());
if (!ret.isOk()) return std::nullopt;
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index 2a4269f..20f4fe2 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -70,7 +70,7 @@
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
Return<ErrorStatus> executeLaunchStatus =
- preparedModel->execute_1_3(request, measure, deadline, executionCallback);
+ preparedModel->execute_1_3(request, measure, deadline, {}, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
@@ -88,7 +88,7 @@
SCOPED_TRACE(message + " [executeSynchronously_1_3]");
Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
- request, measure, deadline,
+ request, measure, deadline, {},
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
const Timing& timing) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@@ -143,7 +143,7 @@
{
SCOPED_TRACE(message + " [executeFenced]");
Return<void> ret =
- preparedModel->executeFenced(request, {}, MeasureTiming::NO, deadline, {},
+ preparedModel->executeFenced(request, {}, MeasureTiming::NO, deadline, {}, {},
[](ErrorStatus error, const hidl_handle& handle,
const sp<IFencedExecutionCallback>& callback) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@@ -196,7 +196,7 @@
void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
SCOPED_TRACE("Expecting request to fail [executeSynchronously_1_3]");
Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
- request, MeasureTiming::NO, {},
+ request, MeasureTiming::NO, {}, {},
[](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
ASSERT_NE(ErrorStatus::NONE, error);
EXPECT_EQ(outputShapes.size(), 0);
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 9a87569..16341da 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -137,7 +137,7 @@
void validateExecuteFenced(const sp<IPreparedModel>& preparedModel, const Request& request) {
SCOPED_TRACE("Expecting request to fail [executeFenced]");
Return<void> ret_null = preparedModel->executeFenced(
- request, {hidl_handle(nullptr)}, V1_2::MeasureTiming::NO, {}, {},
+ request, {hidl_handle(nullptr)}, V1_2::MeasureTiming::NO, {}, {}, {},
[](ErrorStatus error, const hidl_handle& handle,
const sp<IFencedExecutionCallback>& callback) {
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);