Add tests to make sure executeFenced validate unspecified output shapes
Bug: 148979873
Test: mm
Test: VtsHalNeuralnetworksV1_3TargetTest
Change-Id: Ib4960c71ca46ca034777cc7b02d7d2885a98691d
Merged-In: Ib4960c71ca46ca034777cc7b02d7d2885a98691d
(cherry picked from commit fa161c7a0a2d1719680248ccc6d8978209a13d19)
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 3e2be8a..83a8d94 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -640,7 +640,7 @@
if (result != ErrorStatus::NONE) {
ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
ASSERT_EQ(fencedCallback, nullptr);
- executionStatus = ErrorStatus::GENERAL_FAILURE;
+ executionStatus = result;
} else if (syncFenceHandle.getNativeHandle()) {
// If a sync fence is returned, try start another run waiting for the sync fence.
ret = preparedModel->executeFenced(request, {syncFenceHandle},
@@ -663,9 +663,7 @@
}
}
- // The driver is allowed to reject executeFenced, and if they do, we should skip.
- if ((testConfig.outputType != OutputType::FULLY_SPECIFIED ||
- testConfig.executor == Executor::FENCED) &&
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
if (skipped != nullptr) {
*skipped = true;
@@ -698,12 +696,22 @@
outputShapes.size() == testModel.main.outputIndexes.size());
break;
case OutputType::UNSPECIFIED:
+ if (testConfig.executor == Executor::FENCED) {
+ // For Executor::FENCED, the output shape must be fully specified.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
+ }
// If the model output operands are not fully specified, outputShapes must have
// the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
break;
case OutputType::INSUFFICIENT:
+ if (testConfig.executor == Executor::FENCED) {
+ // For Executor::FENCED, the output shape must be fully specified.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
+ }
ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
ASSERT_FALSE(outputShapes[0].isSufficient);
@@ -746,7 +754,7 @@
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
- executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
} break;
case TestKind::MEMORY_DOMAIN: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
@@ -928,8 +936,13 @@
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
-INSTANTIATE_GENERATED_TEST(FencedComputeTest,
- [](const TestModel& testModel) { return !testModel.expectFailure; });
+INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
+ return !testModel.expectFailure &&
+ std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
+ [&testModel](uint32_t index) {
+ return testModel.main.operands[index].data.size() > 0;
+ });
+});
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1;