Merge changes from topic "nnapi_memory_domain_sample_driver"
* changes:
Fix VTS of fenced execution with zero-sized output.
NNAPI VTS: Add validation for Priority
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index cc15263..79d8594 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -24,6 +24,8 @@
using implementation::PreparedModelCallback;
+using PrepareModelMutation = std::function<void(Model*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
@@ -54,12 +56,13 @@
}
// Primary validation function. This function will take a valid model, apply a
-// mutation to it to invalidate the model, then pass it to interface calls that
-// use the model. Note that the model here is passed by value, and any mutation
-// to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, Model model,
- const std::function<void(Model*)>& mutation) {
- mutation(&model);
+// mutation to invalidate the model, then pass these to supportedOperations and
+// prepareModel.
+static void validate(const sp<IDevice>& device, const std::string& message,
+ const Model& originalModel, const PrepareModelMutation& mutate) {
+ Model model = originalModel;
+ mutate(&model);
+
validateGetSupportedOperations(device, message, model);
validatePrepareModel(device, message, model);
}
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 05eefd1..0baa85b 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -24,15 +24,17 @@
using implementation::ExecutionCallback;
+using ExecutionMutation = std::function<void(Request*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
-// that use the request. Note that the request here is passed by value, and any
-// mutation to the request does not leave this function.
+// that use the request.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
- Request request, const std::function<void(Request*)>& mutation) {
- mutation(&request);
+ const Request& originalRequest, const ExecutionMutation& mutate) {
+ Request request = originalRequest;
+ mutate(&request);
SCOPED_TRACE(message + " [execute]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index 0629a1e..3b6f0f8 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -30,6 +30,8 @@
using V1_0::OperandType;
using V1_0::implementation::PreparedModelCallback;
+using PrepareModelMutation = std::function<void(Model*, ExecutionPreference*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
@@ -67,16 +69,19 @@
}
// Primary validation function. This function will take a valid model, apply a
-// mutation to it to invalidate the model, then pass it to interface calls that
-// use the model. Note that the model here is passed by value, and any mutation
-// to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, Model model,
- const std::function<void(Model*)>& mutation,
- ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
- mutation(&model);
+// mutation to invalidate either the model or the execution preference, then
+// pass these to supportedOperations and/or prepareModel if that method is
+// called with an invalid argument.
+static void validate(const sp<IDevice>& device, const std::string& message,
+ const Model& originalModel, const PrepareModelMutation& mutate) {
+ Model model = originalModel;
+ ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER;
+ mutate(&model, &preference);
+
if (validExecutionPreference(preference)) {
validateGetSupportedOperations(device, message, model);
}
+
validatePrepareModel(device, message, model, preference);
}
@@ -115,9 +120,11 @@
const std::string message = "mutateOperandTypeTest: operand " +
std::to_string(operand) + " set to value " +
std::to_string(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*) {
+ model->operands[operand].type =
+ static_cast<OperandType>(invalidOperandType);
+ });
}
}
}
@@ -144,9 +151,10 @@
const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
" has rank of " + std::to_string(invalidRank);
- validate(device, message, model, [operand, invalidRank](Model* model) {
- model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
- });
+ validate(device, message, model,
+ [operand, invalidRank](Model* model, ExecutionPreference*) {
+ model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
+ });
}
}
@@ -173,9 +181,10 @@
const float invalidScale = getInvalidScale(model.operands[operand].type);
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
" has scale of " + std::to_string(invalidScale);
- validate(device, message, model, [operand, invalidScale](Model* model) {
- model->operands[operand].scale = invalidScale;
- });
+ validate(device, message, model,
+ [operand, invalidScale](Model* model, ExecutionPreference*) {
+ model->operands[operand].scale = invalidScale;
+ });
}
}
@@ -204,9 +213,10 @@
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
std::to_string(invalidZeroPoint);
- validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
- model->operands[operand].zeroPoint = invalidZeroPoint;
- });
+ validate(device, message, model,
+ [operand, invalidZeroPoint](Model* model, ExecutionPreference*) {
+ model->operands[operand].zeroPoint = invalidZeroPoint;
+ });
}
}
}
@@ -282,9 +292,10 @@
const std::string message = "mutateOperationOperandTypeTest: operand " +
std::to_string(operand) + " set to type " +
toString(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- mutateOperand(&model->operands[operand], invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*) {
+ mutateOperand(&model->operands[operand], invalidOperandType);
+ });
}
}
}
@@ -304,10 +315,11 @@
const std::string message = "mutateOperationTypeTest: operation " +
std::to_string(operation) + " set to value " +
std::to_string(invalidOperationType);
- validate(device, message, model, [operation, invalidOperationType](Model* model) {
- model->operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
- });
+ validate(device, message, model,
+ [operation, invalidOperationType](Model* model, ExecutionPreference*) {
+ model->operations[operation].type =
+ static_cast<OperationType>(invalidOperationType);
+ });
}
}
}
@@ -321,9 +333,10 @@
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
std::to_string(operation) + " input " +
std::to_string(input);
- validate(device, message, model, [operation, input, invalidOperand](Model* model) {
- model->operations[operation].inputs[input] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, input, invalidOperand](Model* model, ExecutionPreference*) {
+ model->operations[operation].inputs[input] = invalidOperand;
+ });
}
}
}
@@ -337,9 +350,10 @@
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
std::to_string(operation) + " output " +
std::to_string(output);
- validate(device, message, model, [operation, output, invalidOperand](Model* model) {
- model->operations[operation].outputs[output] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, output, invalidOperand](Model* model, ExecutionPreference*) {
+ model->operations[operation].outputs[output] = invalidOperand;
+ });
}
}
}
@@ -372,7 +386,7 @@
for (size_t operand = 0; operand < model.operands.size(); ++operand) {
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
validate(device, message, model,
- [operand](Model* model) { removeOperand(model, operand); });
+ [operand](Model* model, ExecutionPreference*) { removeOperand(model, operand); });
}
}
@@ -388,8 +402,9 @@
static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
- validate(device, message, model,
- [operation](Model* model) { removeOperation(model, operation); });
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ removeOperation(model, operation);
+ });
}
}
@@ -409,11 +424,12 @@
const std::string message = "removeOperationInputTest: operation " +
std::to_string(operation) + ", input " +
std::to_string(input);
- validate(device, message, model, [operation, input](Model* model) {
- uint32_t operand = model->operations[operation].inputs[input];
- model->operands[operand].numberOfConsumers--;
- hidl_vec_removeAt(&model->operations[operation].inputs, input);
- });
+ validate(device, message, model,
+ [operation, input](Model* model, ExecutionPreference*) {
+ uint32_t operand = model->operations[operation].inputs[input];
+ model->operands[operand].numberOfConsumers--;
+ hidl_vec_removeAt(&model->operations[operation].inputs, input);
+ });
}
}
}
@@ -426,9 +442,10 @@
const std::string message = "removeOperationOutputTest: operation " +
std::to_string(operation) + ", output " +
std::to_string(output);
- validate(device, message, model, [operation, output](Model* model) {
- hidl_vec_removeAt(&model->operations[operation].outputs, output);
- });
+ validate(device, message, model,
+ [operation, output](Model* model, ExecutionPreference*) {
+ hidl_vec_removeAt(&model->operations[operation].outputs, output);
+ });
}
}
}
@@ -444,7 +461,7 @@
static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
hidl_vec_push_back(&model->operations[operation].inputs, index);
hidl_vec_push_back(&model->inputIndexes, index);
@@ -458,7 +475,7 @@
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
hidl_vec_push_back(&model->outputIndexes, index);
@@ -474,12 +491,13 @@
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
- for (int32_t preference : invalidExecutionPreferences) {
+ for (int32_t invalidPreference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
- validate(
- device, message, model, [](Model*) {},
- static_cast<ExecutionPreference>(preference));
+ "mutateExecutionPreferenceTest: preference " + std::to_string(invalidPreference);
+ validate(device, message, model,
+ [invalidPreference](Model*, ExecutionPreference* preference) {
+ *preference = static_cast<ExecutionPreference>(invalidPreference);
+ });
}
}
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index 9684eb2..2914335 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -28,15 +28,17 @@
using V1_0::Request;
using V1_0::implementation::ExecutionCallback;
+using ExecutionMutation = std::function<void(Request*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
-// that use the request. Note that the request here is passed by value, and any
-// mutation to the request does not leave this function.
+// that use the request.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
- Request request, const std::function<void(Request*)>& mutation) {
- mutation(&request);
+ const Request& originalRequest, const ExecutionMutation& mutate) {
+ Request request = originalRequest;
+ mutate(&request);
SCOPED_TRACE(message + " [execute]");
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index cc9d804..4476266 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -37,6 +37,8 @@
using V1_0::Request;
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
+using BurstExecutionMutation = std::function<void(std::vector<FmqRequestDatum>*)>;
+
// This constant value represents the length of an FMQ that is large enough to
// return a result from a burst execution for all of the generated test cases.
constexpr size_t kExecutionBurstChannelLength = 1024;
@@ -115,13 +117,13 @@
// Primary validation function. This function will take a valid serialized
// request, apply a mutation to it to invalidate the serialized request, then
-// pass it to interface calls that use the serialized request. Note that the
-// serialized request here is passed by value, and any mutation to the
-// serialized request does not leave this function.
+// pass it to interface calls that use the serialized request.
static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiver,
- const std::string& message, std::vector<FmqRequestDatum> serialized,
- const std::function<void(std::vector<FmqRequestDatum>*)>& mutation) {
- mutation(&serialized);
+ const std::string& message,
+ const std::vector<FmqRequestDatum>& originalSerialized,
+ const BurstExecutionMutation& mutate) {
+ std::vector<FmqRequestDatum> serialized = originalSerialized;
+ mutate(&serialized);
// skip if packet is too large to send
if (serialized.size() > kExecutionBurstChannelLength) {
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index a14b86b..e9fc6e9 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -29,6 +29,8 @@
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using PrepareModelMutation = std::function<void(Model*, ExecutionPreference*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
@@ -67,16 +69,19 @@
}
// Primary validation function. This function will take a valid model, apply a
-// mutation to it to invalidate the model, then pass it to interface calls that
-// use the model. Note that the model here is passed by value, and any mutation
-// to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, Model model,
- const std::function<void(Model*)>& mutation,
- ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
- mutation(&model);
+// mutation to invalidate either the model or the execution preference, then
+// pass these to supportedOperations and/or prepareModel if that method is
+// called with an invalid argument.
+static void validate(const sp<IDevice>& device, const std::string& message,
+ const Model& originalModel, const PrepareModelMutation& mutate) {
+ Model model = originalModel;
+ ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER;
+ mutate(&model, &preference);
+
if (validExecutionPreference(preference)) {
validateGetSupportedOperations(device, message, model);
}
+
validatePrepareModel(device, message, model, preference);
}
@@ -115,9 +120,11 @@
const std::string message = "mutateOperandTypeTest: operand " +
std::to_string(operand) + " set to value " +
std::to_string(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*) {
+ model->operands[operand].type =
+ static_cast<OperandType>(invalidOperandType);
+ });
}
}
}
@@ -155,9 +162,10 @@
}
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
" has rank of " + std::to_string(invalidRank);
- validate(device, message, model, [operand, invalidRank](Model* model) {
- model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
- });
+ validate(device, message, model,
+ [operand, invalidRank](Model* model, ExecutionPreference*) {
+ model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
+ });
}
}
@@ -192,9 +200,10 @@
const float invalidScale = getInvalidScale(model.operands[operand].type);
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
" has scale of " + std::to_string(invalidScale);
- validate(device, message, model, [operand, invalidScale](Model* model) {
- model->operands[operand].scale = invalidScale;
- });
+ validate(device, message, model,
+ [operand, invalidScale](Model* model, ExecutionPreference*) {
+ model->operands[operand].scale = invalidScale;
+ });
}
}
@@ -234,9 +243,10 @@
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
std::to_string(invalidZeroPoint);
- validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
- model->operands[operand].zeroPoint = invalidZeroPoint;
- });
+ validate(device, message, model,
+ [operand, invalidZeroPoint](Model* model, ExecutionPreference*) {
+ model->operands[operand].zeroPoint = invalidZeroPoint;
+ });
}
}
}
@@ -386,9 +396,10 @@
const std::string message = "mutateOperationOperandTypeTest: operand " +
std::to_string(operand) + " set to type " +
toString(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- mutateOperand(&model->operands[operand], invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*) {
+ mutateOperand(&model->operands[operand], invalidOperandType);
+ });
}
}
}
@@ -407,10 +418,11 @@
const std::string message = "mutateOperationTypeTest: operation " +
std::to_string(operation) + " set to value " +
std::to_string(invalidOperationType);
- validate(device, message, model, [operation, invalidOperationType](Model* model) {
- model->operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
- });
+ validate(device, message, model,
+ [operation, invalidOperationType](Model* model, ExecutionPreference*) {
+ model->operations[operation].type =
+ static_cast<OperationType>(invalidOperationType);
+ });
}
}
}
@@ -424,9 +436,10 @@
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
std::to_string(operation) + " input " +
std::to_string(input);
- validate(device, message, model, [operation, input, invalidOperand](Model* model) {
- model->operations[operation].inputs[input] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, input, invalidOperand](Model* model, ExecutionPreference*) {
+ model->operations[operation].inputs[input] = invalidOperand;
+ });
}
}
}
@@ -440,9 +453,10 @@
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
std::to_string(operation) + " output " +
std::to_string(output);
- validate(device, message, model, [operation, output, invalidOperand](Model* model) {
- model->operations[operation].outputs[output] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, output, invalidOperand](Model* model, ExecutionPreference*) {
+ model->operations[operation].outputs[output] = invalidOperand;
+ });
}
}
}
@@ -503,7 +517,7 @@
}
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
validate(device, message, model,
- [operand](Model* model) { removeOperand(model, operand); });
+ [operand](Model* model, ExecutionPreference*) { removeOperand(model, operand); });
}
}
@@ -519,8 +533,9 @@
static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
- validate(device, message, model,
- [operation](Model* model) { removeOperation(model, operation); });
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ removeOperation(model, operation);
+ });
}
}
@@ -601,11 +616,12 @@
const std::string message = "removeOperationInputTest: operation " +
std::to_string(operation) + ", input " +
std::to_string(input);
- validate(device, message, model, [operation, input](Model* model) {
- uint32_t operand = model->operations[operation].inputs[input];
- model->operands[operand].numberOfConsumers--;
- hidl_vec_removeAt(&model->operations[operation].inputs, input);
- });
+ validate(device, message, model,
+ [operation, input](Model* model, ExecutionPreference*) {
+ uint32_t operand = model->operations[operation].inputs[input];
+ model->operands[operand].numberOfConsumers--;
+ hidl_vec_removeAt(&model->operations[operation].inputs, input);
+ });
}
}
}
@@ -618,9 +634,10 @@
const std::string message = "removeOperationOutputTest: operation " +
std::to_string(operation) + ", output " +
std::to_string(output);
- validate(device, message, model, [operation, output](Model* model) {
- hidl_vec_removeAt(&model->operations[operation].outputs, output);
- });
+ validate(device, message, model,
+ [operation, output](Model* model, ExecutionPreference*) {
+ hidl_vec_removeAt(&model->operations[operation].outputs, output);
+ });
}
}
}
@@ -651,7 +668,7 @@
continue;
}
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
hidl_vec_push_back(&model->operations[operation].inputs, index);
hidl_vec_push_back(&model->inputIndexes, index);
@@ -665,7 +682,7 @@
for (size_t operation = 0; operation < model.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
hidl_vec_push_back(&model->operations[operation].outputs, index);
hidl_vec_push_back(&model->outputIndexes, index);
@@ -681,12 +698,13 @@
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
- for (int32_t preference : invalidExecutionPreferences) {
+ for (int32_t invalidPreference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
- validate(
- device, message, model, [](Model*) {},
- static_cast<ExecutionPreference>(preference));
+ "mutateExecutionPreferenceTest: preference " + std::to_string(invalidPreference);
+ validate(device, message, model,
+ [invalidPreference](Model*, ExecutionPreference* preference) {
+ *preference = static_cast<ExecutionPreference>(invalidPreference);
+ });
}
}
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 8498cb0..934d893 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -30,6 +30,8 @@
using V1_0::ErrorStatus;
using V1_0::Request;
+using ExecutionMutation = std::function<void(Request*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static bool badTiming(Timing timing) {
@@ -38,11 +40,11 @@
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
-// that use the request. Note that the request here is passed by value, and any
-// mutation to the request does not leave this function.
+// that use the request.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
- Request request, const std::function<void(Request*)>& mutation) {
- mutation(&request);
+ const Request& originalRequest, const ExecutionMutation& mutate) {
+ Request request = originalRequest;
+ mutate(&request);
// We'd like to test both with timing requested and without timing
// requested. Rather than running each test both ways, we'll decide whether
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 83a8d94..aae58bf 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -493,6 +493,13 @@
return outputBuffers;
}
+static bool hasZeroSizedOutput(const TestModel& testModel) {
+ return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
+ [&testModel](uint32_t index) {
+ return testModel.main.operands[index].data.size() == 0;
+ });
+}
+
static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
const Request& request, MeasureTiming measure,
const OptionalTimeoutDuration& loopTimeoutDuration,
@@ -689,6 +696,11 @@
switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
+ if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
+ // Executor::FENCED does not support zero-sized output.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
+ }
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
ASSERT_EQ(ErrorStatus::NONE, executionStatus);
@@ -936,13 +948,8 @@
INSTANTIATE_GENERATED_TEST(MemoryDomainTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
-INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) {
- return !testModel.expectFailure &&
- std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
- [&testModel](uint32_t index) {
- return testModel.main.operands[index].data.size() > 0;
- });
-});
+INSTANTIATE_GENERATED_TEST(FencedComputeTest,
+ [](const TestModel& testModel) { return !testModel.expectFailure; });
INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) {
return testModel.hasQuant8CoupledOperands() && testModel.main.operations.size() == 1;
diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
index aecb7b7..c78439c 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
@@ -42,6 +42,8 @@
using V1_2::Timing;
using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
+using BurstExecutionMutation = std::function<void(std::vector<FmqRequestDatum>*)>;
+
// This constant value represents the length of an FMQ that is large enough to
// return a result from a burst execution for all of the generated test cases.
constexpr size_t kExecutionBurstChannelLength = 1024;
@@ -121,13 +123,13 @@
// Primary validation function. This function will take a valid serialized
// request, apply a mutation to it to invalidate the serialized request, then
-// pass it to interface calls that use the serialized request. Note that the
-// serialized request here is passed by value, and any mutation to the
-// serialized request does not leave this function.
+// pass it to interface calls that use the serialized request.
static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiver,
- const std::string& message, std::vector<FmqRequestDatum> serialized,
- const std::function<void(std::vector<FmqRequestDatum>*)>& mutation) {
- mutation(&serialized);
+ const std::string& message,
+ const std::vector<FmqRequestDatum>& originalSerialized,
+ const BurstExecutionMutation& mutate) {
+ std::vector<FmqRequestDatum> serialized = originalSerialized;
+ mutate(&serialized);
// skip if packet is too large to send
if (serialized.size() > kExecutionBurstChannelLength) {
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 7da2da9..4c0100e 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -30,6 +30,8 @@
using HidlToken =
hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using PrepareModelMutation = std::function<void(Model*, ExecutionPreference*, Priority*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
@@ -44,13 +46,14 @@
}
static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
- const Model& model, ExecutionPreference preference) {
+ const Model& model, ExecutionPreference preference,
+ Priority priority) {
SCOPED_TRACE(message + " [prepareModel_1_3]");
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
- model, preference, kDefaultPriority, {}, hidl_vec<hidl_handle>(),
- hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModel_1_3(model, preference, priority, {}, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -67,18 +70,26 @@
preference == ExecutionPreference::SUSTAINED_SPEED;
}
+static bool validExecutionPriority(Priority priority) {
+ return priority == Priority::LOW || priority == Priority::MEDIUM || priority == Priority::HIGH;
+}
+
// Primary validation function. This function will take a valid model, apply a
-// mutation to it to invalidate the model, then pass it to interface calls that
-// use the model. Note that the model here is passed by value, and any mutation
-// to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, Model model,
- const std::function<void(Model*)>& mutation,
- ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
- mutation(&model);
- if (validExecutionPreference(preference)) {
+// mutation to invalidate the model, the execution preference, or the priority,
+// then pass these to supportedOperations and/or prepareModel if that method is
+// called with an invalid argument.
+static void validate(const sp<IDevice>& device, const std::string& message,
+ const Model& originalModel, const PrepareModelMutation& mutate) {
+ Model model = originalModel;
+ ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER;
+ Priority priority = kDefaultPriority;
+ mutate(&model, &preference, &priority);
+
+ if (validExecutionPreference(preference) && validExecutionPriority(priority)) {
validateGetSupportedOperations(device, message, model);
}
- validatePrepareModel(device, message, model, preference);
+
+ validatePrepareModel(device, message, model, preference, priority);
}
static uint32_t addOperand(Model* model) {
@@ -116,9 +127,11 @@
const std::string message = "mutateOperandTypeTest: operand " +
std::to_string(operand) + " set to value " +
std::to_string(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- model->main.operands[operand].type = static_cast<OperandType>(invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*, Priority*) {
+ model->main.operands[operand].type =
+ static_cast<OperandType>(invalidOperandType);
+ });
}
}
}
@@ -156,9 +169,11 @@
}
const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
" has rank of " + std::to_string(invalidRank);
- validate(device, message, model, [operand, invalidRank](Model* model) {
- model->main.operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
- });
+ validate(device, message, model,
+ [operand, invalidRank](Model* model, ExecutionPreference*, Priority*) {
+ model->main.operands[operand].dimensions =
+ std::vector<uint32_t>(invalidRank, 0);
+ });
}
}
@@ -194,9 +209,10 @@
const float invalidScale = getInvalidScale(model.main.operands[operand].type);
const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
" has scale of " + std::to_string(invalidScale);
- validate(device, message, model, [operand, invalidScale](Model* model) {
- model->main.operands[operand].scale = invalidScale;
- });
+ validate(device, message, model,
+ [operand, invalidScale](Model* model, ExecutionPreference*, Priority*) {
+ model->main.operands[operand].scale = invalidScale;
+ });
}
}
@@ -237,9 +253,10 @@
const std::string message = "mutateOperandZeroPointTest: operand " +
std::to_string(operand) + " has zero point of " +
std::to_string(invalidZeroPoint);
- validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
- model->main.operands[operand].zeroPoint = invalidZeroPoint;
- });
+ validate(device, message, model,
+ [operand, invalidZeroPoint](Model* model, ExecutionPreference*, Priority*) {
+ model->main.operands[operand].zeroPoint = invalidZeroPoint;
+ });
}
}
}
@@ -425,9 +442,10 @@
const std::string message = "mutateOperationOperandTypeTest: operand " +
std::to_string(operand) + " set to type " +
toString(invalidOperandType);
- validate(device, message, model, [operand, invalidOperandType](Model* model) {
- mutateOperand(&model->main.operands[operand], invalidOperandType);
- });
+ validate(device, message, model,
+ [operand, invalidOperandType](Model* model, ExecutionPreference*, Priority*) {
+ mutateOperand(&model->main.operands[operand], invalidOperandType);
+ });
}
}
}
@@ -446,10 +464,12 @@
const std::string message = "mutateOperationTypeTest: operation " +
std::to_string(operation) + " set to value " +
std::to_string(invalidOperationType);
- validate(device, message, model, [operation, invalidOperationType](Model* model) {
- model->main.operations[operation].type =
- static_cast<OperationType>(invalidOperationType);
- });
+ validate(device, message, model,
+ [operation, invalidOperationType](Model* model, ExecutionPreference*,
+ Priority*) {
+ model->main.operations[operation].type =
+ static_cast<OperationType>(invalidOperationType);
+ });
}
}
}
@@ -463,9 +483,11 @@
const std::string message = "mutateOperationInputOperandIndexTest: operation " +
std::to_string(operation) + " input " +
std::to_string(input);
- validate(device, message, model, [operation, input, invalidOperand](Model* model) {
- model->main.operations[operation].inputs[input] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, input, invalidOperand](Model* model, ExecutionPreference*,
+ Priority*) {
+ model->main.operations[operation].inputs[input] = invalidOperand;
+ });
}
}
}
@@ -480,9 +502,11 @@
const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
std::to_string(operation) + " output " +
std::to_string(output);
- validate(device, message, model, [operation, output, invalidOperand](Model* model) {
- model->main.operations[operation].outputs[output] = invalidOperand;
- });
+ validate(device, message, model,
+ [operation, output, invalidOperand](Model* model, ExecutionPreference*,
+ Priority*) {
+ model->main.operations[operation].outputs[output] = invalidOperand;
+ });
}
}
}
@@ -548,8 +572,9 @@
continue;
}
const std::string message = "removeOperandTest: operand " + std::to_string(operand);
- validate(device, message, model,
- [operand](Model* model) { removeOperand(model, operand); });
+ validate(device, message, model, [operand](Model* model, ExecutionPreference*, Priority*) {
+ removeOperand(model, operand);
+ });
}
}
@@ -566,7 +591,9 @@
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const std::string message = "removeOperationTest: operation " + std::to_string(operation);
validate(device, message, model,
- [operation](Model* model) { removeOperation(model, operation); });
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ removeOperation(model, operation);
+ });
}
}
@@ -654,11 +681,12 @@
const std::string message = "removeOperationInputTest: operation " +
std::to_string(operation) + ", input " +
std::to_string(input);
- validate(device, message, model, [operation, input](Model* model) {
- uint32_t operand = model->main.operations[operation].inputs[input];
- model->main.operands[operand].numberOfConsumers--;
- hidl_vec_removeAt(&model->main.operations[operation].inputs, input);
- });
+ validate(device, message, model,
+ [operation, input](Model* model, ExecutionPreference*, Priority*) {
+ uint32_t operand = model->main.operations[operation].inputs[input];
+ model->main.operands[operand].numberOfConsumers--;
+ hidl_vec_removeAt(&model->main.operations[operation].inputs, input);
+ });
}
}
}
@@ -672,9 +700,10 @@
const std::string message = "removeOperationOutputTest: operation " +
std::to_string(operation) + ", output " +
std::to_string(output);
- validate(device, message, model, [operation, output](Model* model) {
- hidl_vec_removeAt(&model->main.operations[operation].outputs, output);
- });
+ validate(device, message, model,
+ [operation, output](Model* model, ExecutionPreference*, Priority*) {
+ hidl_vec_removeAt(&model->main.operations[operation].outputs, output);
+ });
}
}
}
@@ -707,11 +736,12 @@
continue;
}
const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
- uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_INPUT);
- hidl_vec_push_back(&model->main.operations[operation].inputs, index);
- hidl_vec_push_back(&model->main.inputIndexes, index);
- });
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_INPUT);
+ hidl_vec_push_back(&model->main.operations[operation].inputs, index);
+ hidl_vec_push_back(&model->main.inputIndexes, index);
+ });
}
}
@@ -721,11 +751,12 @@
for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
const std::string message =
"addOperationOutputTest: operation " + std::to_string(operation);
- validate(device, message, model, [operation](Model* model) {
- uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_OUTPUT);
- hidl_vec_push_back(&model->main.operations[operation].outputs, index);
- hidl_vec_push_back(&model->main.outputIndexes, index);
- });
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ uint32_t index = addOperand(model, OperandLifeTime::SUBGRAPH_OUTPUT);
+ hidl_vec_push_back(&model->main.operations[operation].outputs, index);
+ hidl_vec_push_back(&model->main.outputIndexes, index);
+ });
}
}
@@ -737,12 +768,31 @@
};
static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
- for (int32_t preference : invalidExecutionPreferences) {
+ for (int32_t invalidPreference : invalidExecutionPreferences) {
const std::string message =
- "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
- validate(
- device, message, model, [](Model*) {},
- static_cast<ExecutionPreference>(preference));
+ "mutateExecutionPreferenceTest: preference " + std::to_string(invalidPreference);
+ validate(device, message, model,
+ [invalidPreference](Model*, ExecutionPreference* preference, Priority*) {
+ *preference = static_cast<ExecutionPreference>(invalidPreference);
+ });
+ }
+}
+
+///////////////////////// VALIDATE PRIORITY /////////////////////////
+
+static const int32_t invalidPriorities[] = {
+ static_cast<int32_t>(Priority::LOW) - 1, // lower bound
+ static_cast<int32_t>(Priority::HIGH) + 1, // upper bound
+};
+
+static void mutateExecutionPriorityTest(const sp<IDevice>& device, const Model& model) {
+ for (int32_t invalidPriority : invalidPriorities) {
+ const std::string message =
+ "mutatePriorityTest: priority " + std::to_string(invalidPriority);
+ validate(device, message, model,
+ [invalidPriority](Model*, ExecutionPreference*, Priority* priority) {
+ *priority = static_cast<Priority>(invalidPriority);
+ });
}
}
@@ -764,6 +814,7 @@
addOperationInputTest(device, model);
addOperationOutputTest(device, model);
mutateExecutionPreferenceTest(device, model);
+ mutateExecutionPriorityTest(device, model);
}
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index 5e806e5..1ae8b3f 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -34,6 +34,8 @@
using V1_2::OutputShape;
using V1_2::Timing;
+using ExecutionMutation = std::function<void(Request*)>;
+
///////////////////////// UTILITY FUNCTIONS /////////////////////////
static bool badTiming(Timing timing) {
@@ -42,11 +44,11 @@
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
-// that use the request. Note that the request here is passed by value, and any
-// mutation to the request does not leave this function.
+// that use the request.
static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
- Request request, const std::function<void(Request*)>& mutation) {
- mutation(&request);
+ const Request& originalRequest, const ExecutionMutation& mutate) {
+ Request request = originalRequest;
+ mutate(&request);
// We'd like to test both with timing requested and without timing
// requested. Rather than running each test both ways, we'll decide whether