Add VTS test for dynamic output shape.
Test dynamic output shape with generated models when
- Dimensions of output operands are fully specified
- Dimensions of output operands are unspecified with sufficient buffer
- Dimensions of output operands are unspecified with insufficient buffer
Test: VTS on 1.2 sample driver
Change-Id: I4d26395ce443687ccbd47445b36e3356d70035cc
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 8d427b1..1ef9c94 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -120,13 +120,14 @@
return ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
}
enum class Executor { ASYNC, SYNC, BURST };
+enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
const float kDefaultAtol = 1e-5f;
const float kDefaultRtol = 1e-5f;
template <typename T_IPreparedModel>
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
- Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
+ Executor executor, MeasureTiming measure, OutputType outputType) {
const uint32_t INPUT = 0;
const uint32_t OUTPUT = 1;
@@ -174,8 +175,20 @@
// Go through all outputs, initialize RequestArgument descriptors
resize_accordingly(golden, test);
- for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
+ bool sizeLargerThanOne = true;
+ for_all(golden, [&outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
+ int index, auto, auto s) {
if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+ if (index == 0) {
+ // On OutputType::INSUFFICIENT, set the output operand with index 0 with
+ // buffer size one byte less than needed.
+ if (outputType == OutputType::INSUFFICIENT) {
+ if (s > 1)
+ s -= 1;
+ else
+ sizeLargerThanOne = false;
+ }
+ }
RequestArgument arg = {
.location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
.dimensions = {},
@@ -183,6 +196,9 @@
outputs_info[index] = arg;
outputSize += s;
});
+ // If output0 does not have size larger than one byte,
+ // we can not provide an insufficient buffer
+ if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
// Compute offset for outputs 1 and so on
{
size_t offset = 0;
@@ -277,15 +293,15 @@
}
}
- if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) {
+ if (outputType != OutputType::FULLY_SPECIFIED &&
+ executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
"execute model that it does not support."
<< std::endl;
- return;
+ GTEST_SKIP();
}
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
if (measure == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
@@ -295,9 +311,28 @@
}
}
+ switch (outputType) {
+ case OutputType::FULLY_SPECIFIED:
+ // If the model output operands are fully specified, outputShapes must be either
+ // either empty, or have the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_TRUE(outputShapes.size() == 0 ||
+ outputShapes.size() == test.operandDimensions.size());
+ break;
+ case OutputType::UNSPECIFIED:
+ // If the model output operands are not fully specified, outputShapes must have
+ // the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+ break;
+ case OutputType::INSUFFICIENT:
+ ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+ ASSERT_FALSE(outputShapes[0].isSufficient);
+ return;
+ }
// Go through all outputs, overwrite output dimensions with returned output shapes
- if (testDynamicOutputShape) {
- ASSERT_NE(outputShapes.size(), 0);
+ if (outputShapes.size() > 0) {
for_each<uint32_t>(test.operandDimensions,
[&outputShapes](int idx, std::vector<uint32_t>& dim) {
dim = outputShapes[idx].dimensions;
@@ -324,9 +359,9 @@
void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
- bool testDynamicOutputShape) {
+ OutputType outputType) {
EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
- kDefaultRtol, executor, measure, testDynamicOutputShape);
+ kDefaultRtol, executor, measure, outputType);
}
static void getPreparedModel(sp<PreparedModelCallback> callback,
@@ -383,7 +418,7 @@
float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
EvaluatePreparedModel(preparedModel, is_ignored, examples,
/*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC,
- MeasureTiming::NO, /*testDynamicOutputShape=*/false);
+ MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
}
void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -430,7 +465,7 @@
EvaluatePreparedModel(preparedModel, is_ignored, examples,
model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
- MeasureTiming::NO, /*testDynamicOutputShape=*/false);
+ MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
}
// TODO: Reduce code duplication.
@@ -477,24 +512,63 @@
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::NO, testDynamicOutputShape);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::NO,
- testDynamicOutputShape);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::NO, testDynamicOutputShape);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::YES, testDynamicOutputShape);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::YES, testDynamicOutputShape);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::YES, testDynamicOutputShape);
+ if (testDynamicOutputShape) {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::YES, OutputType::INSUFFICIENT);
+ } else {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::ASYNC,
+ MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::SYNC,
+ MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, Executor::BURST,
+ MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ }
}
} // namespace generated_tests