Refactor NNAPI VTS to remove unreasonable dependence between versions
To make it easier to create the next version of NNAPI, this change
removes the following nonsensical dependence:
- NNAPI 1.0 VTS depends on NNAPI 1.1 and 1.2
- NNAPI 1.1 VTS depends on NNAPI 1.2
In particular, I made the following changes:
- split GeneratedTestHarness.cpp into three separate implementations,
- created a restricted version of Callbacks.h for 1.0 and 1.1,
- removed the dependency on frameworks/ml/nn/HalInterfaces.h,
- refactored Android.bp files for more autonomy between 1.0, 1.1, and 1.2,
- consolidated some common code into Utils.h,
- created structure for sharing code between VTS versions (VtsHalNeuralNetworksV1_0_utils).
Bug: 74827824
Bug: 124462414
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
Change-Id: I4243d0b5e574255cef1070850f4d0a284f65f54e
Merged-In: I4243d0b5e574255cef1070850f4d0a284f65f54e
(cherry picked from commit 1d6b4659972010b9999dc77fbe65892b8b69d6da)
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index acf3cb6..ad992c5 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -16,10 +16,10 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
#include "VtsHalNeuralnetworks.h"
-#include "Callbacks.h"
-
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -41,10 +41,10 @@
const Model& model) {
SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
- Return<void> ret =
- device->getSupportedOperations_1_2(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
- EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
- });
+ Return<void> ret = device->getSupportedOperations_1_2(
+ model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ });
EXPECT_TRUE(ret.isOk());
}
@@ -87,36 +87,16 @@
validatePrepareModel(device, message, model, preference);
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
});
}
@@ -256,7 +236,7 @@
static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::vector<int32_t> invalidZeroPoints =
- getInvalidZeroPoints(model.operands[operand].type);
+ getInvalidZeroPoints(model.operands[operand].type);
for (int32_t invalidZeroPoint : invalidZeroPoints) {
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
@@ -292,13 +272,13 @@
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_INT32:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
@@ -306,19 +286,20 @@
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
newOperand.dimensions =
- operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
SymmPerChannelQuantParams channelQuant;
channelQuant.channelDim = 0;
channelQuant.scales = hidl_vec<float>(
- operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) : 0);
+ operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
+ : 0);
for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
channelQuant.scales[i] = 1.0f;
}
@@ -435,7 +416,7 @@
std::to_string(invalidOperationType);
validate(device, message, model, [operation, invalidOperationType](Model* model) {
model->operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
+ static_cast<OperationType>(invalidOperationType);
});
}
}
@@ -690,7 +671,7 @@
static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
- "addOperationOutputTest: operation " + std::to_string(operation);
+ "addOperationOutputTest: operation " + std::to_string(operation);
validate(device, message, model, [operation](Model* model) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -702,14 +683,14 @@
///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
static const int32_t invalidExecutionPreferences[] = {
- static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
- static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
+ static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
+ static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
for (int32_t preference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+ "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
validate(device, message, model, [](Model*) {},
static_cast<ExecutionPreference>(preference));
}