More tests for graph validation.
- detect cycle (CycleTest)
- detect bad execution order (mutateExecutionOrderTest)
- detect lifetime inconsistent with whether operand is written (mutateOperandLifeTimeTest)
- detect lifetime inconsistent with Model inputIndexes/outputIndexes (mutateOperandInputOutputTest)
- detect incorrect number of consumers (mutateOperandNumberOfConsumersTest)
- detect operand written multiple times (mutateOperandAddWriterTest)
- detect operand never written (mutateOperationRemoveWriteTest)
Bug: 66478689
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
Merged-In: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
(cherry picked from commit af51663e9980265853750a51fa2f4bb1cd4e48c1)
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
index 44836f0..baadd1b 100644
--- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -18,10 +18,16 @@
#include "VtsHalNeuralnetworks.h"
+#include "1.0/Callbacks.h"
+
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
+using V1_0::Operand;
+using V1_0::OperandLifeTime;
+using V1_0::OperandType;
+using V1_0::implementation::PreparedModelCallback;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -48,4 +54,137 @@
EXPECT_TRUE(ret.isOk());
}
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ const Model model = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations_1_1() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_1(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel_1_1() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional