More tests for graph validation.
- detect cycle (CycleTest)
- detect bad execution order (mutateExecutionOrderTest)
- detect lifetime inconsistent with whether operand is written (mutateOperandLifeTimeTest)
- detect lifetime inconsistent with Model inputIndexes/outputIndexes (mutateOperandInputOutputTest)
- detect incorrect number of consumers (mutateOperandNumberOfConsumersTest)
- detect operand written multiple times (mutateOperandAddWriterTest)
- detect operand never written (mutateOperationRemoveWriteTest)
Bug: 66478689
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
Merged-In: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
(cherry picked from commit af51663e9980265853750a51fa2f4bb1cd4e48c1)
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
index cc44c9e..bda43b1 100644
--- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
@@ -18,8 +18,12 @@
#include "VtsHalNeuralnetworks.h"
+#include "1.0/Callbacks.h"
+
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
+using implementation::PreparedModelCallback;
+
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -43,4 +47,136 @@
EXPECT_TRUE(ret.isOk());
}
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ const Model model = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel(model, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 3613e69..32850b0 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -29,7 +29,11 @@
#include <gtest/gtest.h>
#include <algorithm>
+#include <cstring>
+#include <functional>
#include <iostream>
+#include <map>
+#include <numeric>
#include <vector>
namespace android::hardware::neuralnetworks {
@@ -172,6 +176,45 @@
return outputBuffers;
}
+uint32_t sizeOfData(V1_0::OperandType type) {
+ switch (type) {
+ case V1_0::OperandType::FLOAT32:
+ case V1_0::OperandType::INT32:
+ case V1_0::OperandType::UINT32:
+ case V1_0::OperandType::TENSOR_FLOAT32:
+ case V1_0::OperandType::TENSOR_INT32:
+ return 4;
+ case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
+ return 1;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return 0;
+ }
+}
+
+static bool isTensor(V1_0::OperandType type) {
+ switch (type) {
+ case V1_0::OperandType::FLOAT32:
+ case V1_0::OperandType::INT32:
+ case V1_0::OperandType::UINT32:
+ return false;
+ case V1_0::OperandType::TENSOR_FLOAT32:
+ case V1_0::OperandType::TENSOR_INT32:
+ case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
+ return true;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return false;
+ }
+}
+
+uint32_t sizeOfData(const V1_0::Operand& operand) {
+ const uint32_t dataSize = sizeOfData(operand.type);
+ if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
+ return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
+ std::multiplies<>{});
+}
+
std::string gtestCompliantName(std::string name) {
// gtest test names must only contain alphanumeric characters
std::replace_if(
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index 79d8594..5ffbd43 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -17,9 +17,14 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
+#include <optional>
+#include <type_traits>
+#include <utility>
+
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
using implementation::PreparedModelCallback;
@@ -67,26 +72,6 @@
validatePrepareModel(device, message, model);
}
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
- if (vec) {
- std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
- vec->resize(vec->size() - 1);
- }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
- // assume vec is valid
- const uint32_t index = vec->size();
- vec->resize(index + 1);
- (*vec)[index] = value;
- return index;
-}
-
static uint32_t addOperand(Model* model) {
return hidl_vec_push_back(&model->operands,
{
@@ -107,6 +92,211 @@
return index;
}
+// If we introduce a CONSTANT_COPY for an operand of size operandSize,
+// how much will this increase the size of the model? This assumes
+// that we can (re)use all of model.operandValues for the operand
+// value.
+static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
+ const size_t operandValuesSize = model.operandValues.size();
+ return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
+}
+
+// Highly specialized utility routine for converting an operand to
+// CONSTANT_COPY lifetime.
+//
+// Expects that:
+// - operand has a known size
+// - operand->lifetime has already been set to CONSTANT_COPY
+// - operand->location has been zeroed out
+//
+// Does the following:
+// - initializes operand->location to point to the beginning of model->operandValues
+// - resizes model->operandValues (if necessary) to be large enough for the operand
+// value, padding it with zeroes on the end
+//
+// Potential problem:
+// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
+// operand with unspecified (but deterministic) data. This means that the model may be invalidated
+// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
+// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
+// value). For now, this should be fine because it just means we're not testing what we think we're
+// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
+// exercise the validation code over the span of the entire test set and operand space.
+//
+// Aborts if the specified operand type is an extension type or OEM type.
+static void becomeConstantCopy(Model* model, Operand* operand) {
+ // sizeOfData will abort if the specified type is an extension type or OEM type.
+ const size_t sizeOfOperand = sizeOfData(*operand);
+ EXPECT_NE(sizeOfOperand, size_t(0));
+ operand->location.poolIndex = 0;
+ operand->location.offset = 0;
+ operand->location.length = sizeOfOperand;
+ if (model->operandValues.size() < sizeOfOperand) {
+ model->operandValues.resize(sizeOfOperand);
+ }
+}
+
+// The sizeForBinder() functions estimate the size of the
+// representation of a value when sent to binder. It's probably a bit
+// of an under-estimate, because we don't know the size of the
+// metadata in the binder format (e.g., representation of the size of
+// a vector); but at least it adds up "big" things like vector
+// contents. However, it doesn't treat inter-field or end-of-struct
+// padding in a methodical way -- there's no attempt to be consistent
+// in whether or not padding in the native (C++) representation
+// contributes to the estimated size for the binder representation;
+// and there's no attempt to understand what padding (if any) is
+// needed in the binder representation.
+//
+// This assumes that non-metadata uses a fixed length encoding (e.g.,
+// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
+// using an encoding whose length is related to the magnitude of the
+// encoded value).
+
+template <typename Type>
+static size_t sizeForBinder(const Type& val) {
+ static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
+ "expected a trivially copyable type");
+ return sizeof(val);
+}
+
+template <typename Type>
+static size_t sizeForBinder(const hidl_vec<Type>& vec) {
+ return std::accumulate(vec.begin(), vec.end(), 0,
+ [](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
+}
+
+template <>
+size_t sizeForBinder(const Operand& operand) {
+ size_t size = 0;
+
+ size += sizeForBinder(operand.type);
+ size += sizeForBinder(operand.dimensions);
+ size += sizeForBinder(operand.numberOfConsumers);
+ size += sizeForBinder(operand.scale);
+ size += sizeForBinder(operand.zeroPoint);
+ size += sizeForBinder(operand.lifetime);
+ size += sizeForBinder(operand.location);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operation& operation) {
+ size_t size = 0;
+
+ size += sizeForBinder(operation.type);
+ size += sizeForBinder(operation.inputs);
+ size += sizeForBinder(operation.outputs);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const hidl_string& name) {
+ return name.size();
+}
+
+template <>
+size_t sizeForBinder(const hidl_memory& memory) {
+ // This is just a guess.
+
+ size_t size = 0;
+
+ if (const native_handle_t* handle = memory.handle()) {
+ size += sizeof(*handle);
+ size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
+ }
+ size += sizeForBinder(memory.name());
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model& model) {
+ size_t size = 0;
+
+ size += sizeForBinder(model.operands);
+ size += sizeForBinder(model.operations);
+ size += sizeForBinder(model.inputIndexes);
+ size += sizeForBinder(model.outputIndexes);
+ size += sizeForBinder(model.operandValues);
+ size += sizeForBinder(model.pools);
+
+ return size;
+}
+
+// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
+//
+// "The Binder transaction buffer has a limited fixed size,
+// currently 1Mb, which is shared by all transactions in progress
+// for the process."
+//
+// Will our representation fit under this limit? There are two complications:
+// - Our representation size is just approximate (see sizeForBinder()).
+// - This object may not be the only occupant of the Binder transaction buffer.
+// So we'll be very conservative: We want the representation size to be no
+// larger than half the transaction buffer size.
+//
+// If our representation grows large enough that it still fits within
+// the transaction buffer but combined with other transactions may
+// exceed the buffer size, then we may see intermittent HAL transport
+// errors.
+static bool exceedsBinderSizeLimit(size_t representationSize) {
+ // Instead of using this fixed buffer size, we might instead be able to use
+ // ProcessState::self()->getMmapSize(). However, this has a potential
+ // problem: The binder/mmap size of the current process does not necessarily
+ // indicate the binder/mmap size of the service (i.e., the other process).
+ // The only way it would be a good indication is if both the current process
+ // and the service use the default size.
+ static const size_t kHalfBufferSize = 1024 * 1024 / 2;
+
+ return representationSize > kHalfBufferSize;
+}
+
+///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
+
+static void mutateExecutionOrderTest(const sp<IDevice>& device, const V1_0::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const Operation& operationObj = model.operations[operation];
+ for (uint32_t input : operationObj.inputs) {
+ if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+ model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ // This operation reads an operand written by some
+ // other operation. Move this operation to the
+ // beginning of the sequence, ensuring that it reads
+ // the operand before that operand is written, thereby
+ // violating execution order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a reader";
+ validate(device, message, model, [operation](Model* model) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin(), operations.begin() + operation,
+ operations.begin() + operation + 1);
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ for (uint32_t output : operationObj.outputs) {
+ if (model.operands[output].numberOfConsumers > 0) {
+ // This operation writes an operand read by some other
+ // operation. Move this operation to the end of the
+ // sequence, ensuring that it writes the operand after
+ // that operand is read, thereby violating execution
+ // order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a writer";
+ validate(device, message, model, [operation](Model* model) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin() + operation, operations.begin() + operation + 1,
+ operations.end());
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ }
+}
+
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const int32_t invalidOperandTypes[] = {
@@ -218,9 +408,233 @@
}
}
+///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
+
+static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
+ // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
+
+ // Ways to get an invalid lifetime:
+ // - change whether a lifetime means an operand should have a writer
+ std::vector<OperandLifeTime> ret;
+ switch (operand.lifetime) {
+ case OperandLifeTime::MODEL_OUTPUT:
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ ret = {
+ OperandLifeTime::MODEL_INPUT,
+ OperandLifeTime::CONSTANT_COPY,
+ };
+ break;
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ case OperandLifeTime::MODEL_INPUT:
+ ret = {
+ OperandLifeTime::TEMPORARY_VARIABLE,
+ OperandLifeTime::MODEL_OUTPUT,
+ };
+ break;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
+ // is this operand written (then CONSTANT_COPY would be
+ // invalid) or not (then TEMPORARY_VARIABLE would be
+ // invalid)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
+ }
+
+ return ret;
+}
+
+static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const V1_0::Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<OperandLifeTime> invalidLifeTimes =
+ getInvalidLifeTimes(model, modelSize, model.operands[operand]);
+ for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
+ const std::string message = "mutateOperandLifetimeTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(invalidLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model, [operand, invalidLifeTime](Model* model) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ switch (operandObj.lifetime) {
+ case OperandLifeTime::MODEL_INPUT: {
+ hidl_vec_remove(&model->inputIndexes, uint32_t(operand));
+ break;
+ }
+ case OperandLifeTime::MODEL_OUTPUT: {
+ hidl_vec_remove(&model->outputIndexes, uint32_t(operand));
+ break;
+ }
+ default:
+ break;
+ }
+ operandObj.lifetime = invalidLifeTime;
+ operandObj.location = kZeroDataLocation;
+ switch (invalidLifeTime) {
+ case OperandLifeTime::CONSTANT_COPY: {
+ becomeConstantCopy(model, &operandObj);
+ break;
+ }
+ case OperandLifeTime::MODEL_INPUT:
+ hidl_vec_push_back(&model->inputIndexes, uint32_t(operand));
+ break;
+ case OperandLifeTime::MODEL_OUTPUT:
+ hidl_vec_push_back(&model->outputIndexes, uint32_t(operand));
+ break;
+ default:
+ break;
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
+
+static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
+ // - change whether a lifetime means an operand is a model input, a model output, or neither
+ // - preserve whether or not a lifetime means an operand should have a writer
+ switch (operand.lifetime) {
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ return OperandLifeTime::MODEL_INPUT;
+ case OperandLifeTime::MODEL_INPUT: {
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ break;
+ }
+ return OperandLifeTime::CONSTANT_COPY;
+ }
+ case OperandLifeTime::MODEL_OUTPUT:
+ return OperandLifeTime::TEMPORARY_VARIABLE;
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ return OperandLifeTime::MODEL_OUTPUT;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be an
+ // appropriate choice -- is this operand written (then
+ // TEMPORARY_VARIABLE would be appropriate) or not (then
+ // CONSTANT_COPY would be appropriate)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ return std::nullopt;
+}
+
+static void mutateOperandInputOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::optional<OperandLifeTime> changedLifeTime =
+ getInputOutputLifeTime(model, modelSize, model.operands[operand]);
+ if (changedLifeTime) {
+ const std::string message = "mutateOperandInputOutputTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(*changedLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model, [operand, changedLifeTime](Model* model) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ operandObj.lifetime = *changedLifeTime;
+ operandObj.location = kZeroDataLocation;
+ if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
+ becomeConstantCopy(model, &operandObj);
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
+
+static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
+ if (numberOfConsumers == 0) {
+ return {1};
+ } else {
+ return {numberOfConsumers - 1, numberOfConsumers + 1};
+ }
+}
+
+static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device,
+ const V1_0::Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<uint32_t> invalidNumberOfConsumersVec =
+ getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers);
+ for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
+ const std::string message =
+ "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
+ " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
+ validate(device, message, model, [operand, invalidNumberOfConsumers](Model* model) {
+ model->operands[operand].numberOfConsumers = invalidNumberOfConsumers;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
+
+static void mutateOperandAddWriterTest(const sp<IDevice>& device, const V1_0::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size();
+ ++badOutputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum];
+ const std::string message = "mutateOperandAddWriterTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ // We'll insert a copy of the operation, all of whose
+ // OTHER output operands are newly-created -- i.e.,
+ // there'll only be a duplicate write of ONE of that
+ // operation's output operands.
+ validate(device, message, model, [operation, badOutputNum](Model* model) {
+ Operation newOperation = model->operations[operation];
+ for (uint32_t input : newOperation.inputs) {
+ ++model->operands[input].numberOfConsumers;
+ }
+ for (size_t outputNum = 0; outputNum < newOperation.outputs.size(); ++outputNum) {
+ if (outputNum == badOutputNum) continue;
+
+ Operand operandValue = model->operands[newOperation.outputs[outputNum]];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime, OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ newOperation.outputs[outputNum] =
+ hidl_vec_push_back(&model->operands, operandValue);
+ }
+ // Where do we insert the extra writer (a new
+ // operation)? It has to be later than all the
+ // writers of its inputs. The easiest thing to do
+ // is to insert it at the end of the operation
+ // sequence.
+ hidl_vec_push_back(&model->operations, newOperation);
+ });
+ }
+ }
+}
+
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
-// TODO: Operand::lifetime
// TODO: Operand::location
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
@@ -351,6 +765,33 @@
}
}
+///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
+
+static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const V1_0::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size();
+ ++outputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum];
+ if (model.operands[outputOperandIndex].numberOfConsumers > 0) {
+ const std::string message = "mutateOperationRemoveWriteTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ validate(device, message, model, [operation, outputNum](Model* model) {
+ uint32_t& outputOperandIndex = model->operations[operation].outputs[outputNum];
+ Operand operandValue = model->operands[outputOperandIndex];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime, OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ outputOperandIndex = hidl_vec_push_back(&model->operands, operandValue);
+ });
+ }
+ }
+ }
+}
+
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
@@ -476,14 +917,20 @@
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateExecutionOrderTest(device, model);
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
mutateOperandZeroPointTest(device, model);
+ mutateOperandLifeTimeTest(device, model);
+ mutateOperandInputOutputTest(device, model);
+ mutateOperandNumberOfConsumersTest(device, model);
+ mutateOperandAddWriterTest(device, model);
mutateOperationOperandTypeTest(device, model);
mutateOperationTypeTest(device, model);
mutateOperationInputOperandIndexTest(device, model);
mutateOperationOutputOperandIndexTest(device, model);
+ mutateOperationRemoveWriteTest(device, model);
removeOperandTest(device, model);
removeOperationTest(device, model);
removeOperationInputTest(device, model);
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 3292f79..7bd0460 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -21,6 +21,7 @@
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware_buffer.h>
#include <android/hidl/memory/1.0/IMemory.h>
+#include <gtest/gtest.h>
#include <algorithm>
#include <iosfwd>
#include <string>
@@ -108,6 +109,15 @@
vec->resize(vec->size() - 1);
}
+// Assumes there is exactly one instance of the value in the vector.
+template <typename Type>
+inline void hidl_vec_remove(hidl_vec<Type>* vec, const Type& val) {
+ CHECK(vec != nullptr);
+ auto where = std::find(vec->begin(), vec->end(), val);
+ ASSERT_NE(where, vec->end());
+ hidl_vec_removeAt(vec, where - vec->begin());
+}
+
template <typename Type>
inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
CHECK(vec != nullptr);
@@ -117,6 +127,18 @@
return index;
}
+// Returns the amount of space needed to store a value of the specified type.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(V1_0::OperandType type);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a non-extension, non-OEM tensor with unspecified
+// rank or at least one unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(const V1_0::Operand& operand);
+
template <typename Type>
using Named = std::pair<std::string, Type>;
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
index 44836f0..baadd1b 100644
--- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -18,10 +18,16 @@
#include "VtsHalNeuralnetworks.h"
+#include "1.0/Callbacks.h"
+
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
+using V1_0::Operand;
+using V1_0::OperandLifeTime;
+using V1_0::OperandType;
+using V1_0::implementation::PreparedModelCallback;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -48,4 +54,137 @@
EXPECT_TRUE(ret.isOk());
}
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ const Model model = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations_1_1() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_1(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel_1_1() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index 3b6f0f8..1f4e4ed 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -16,13 +16,19 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include <android/hardware/neuralnetworks/1.1/types.h>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
+#include <optional>
+#include <type_traits>
+#include <utility>
+
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
+using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::IPreparedModel;
using V1_0::Operand;
@@ -105,6 +111,212 @@
return index;
}
+// If we introduce a CONSTANT_COPY for an operand of size operandSize,
+// how much will this increase the size of the model? This assumes
+// that we can (re)use all of model.operandValues for the operand
+// value.
+static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
+ const size_t operandValuesSize = model.operandValues.size();
+ return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
+}
+
+// Highly specialized utility routine for converting an operand to
+// CONSTANT_COPY lifetime.
+//
+// Expects that:
+// - operand has a known size
+// - operand->lifetime has already been set to CONSTANT_COPY
+// - operand->location has been zeroed out
+//
+// Does the following:
+// - initializes operand->location to point to the beginning of model->operandValues
+// - resizes model->operandValues (if necessary) to be large enough for the operand
+// value, padding it with zeroes on the end
+//
+// Potential problem:
+// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
+// operand with unspecified (but deterministic) data. This means that the model may be invalidated
+// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
+// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
+// value). For now, this should be fine because it just means we're not testing what we think we're
+// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
+// exercise the validation code over the span of the entire test set and operand space.
+//
+// Aborts if the specified operand type is an extension type or OEM type.
+static void becomeConstantCopy(Model* model, Operand* operand) {
+ // sizeOfData will abort if the specified type is an extension type or OEM type.
+ const size_t sizeOfOperand = sizeOfData(*operand);
+ EXPECT_NE(sizeOfOperand, size_t(0));
+ operand->location.poolIndex = 0;
+ operand->location.offset = 0;
+ operand->location.length = sizeOfOperand;
+ if (model->operandValues.size() < sizeOfOperand) {
+ model->operandValues.resize(sizeOfOperand);
+ }
+}
+
+// The sizeForBinder() functions estimate the size of the
+// representation of a value when sent to binder. It's probably a bit
+// of an under-estimate, because we don't know the size of the
+// metadata in the binder format (e.g., representation of the size of
+// a vector); but at least it adds up "big" things like vector
+// contents. However, it doesn't treat inter-field or end-of-struct
+// padding in a methodical way -- there's no attempt to be consistent
+// in whether or not padding in the native (C++) representation
+// contributes to the estimated size for the binder representation;
+// and there's no attempt to understand what padding (if any) is
+// needed in the binder representation.
+//
+// This assumes that non-metadata uses a fixed length encoding (e.g.,
+// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
+// using an encoding whose length is related to the magnitude of the
+// encoded value).
+
+template <typename Type>
+static size_t sizeForBinder(const Type& val) {
+ static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
+ "expected a trivially copyable type");
+ return sizeof(val);
+}
+
+template <typename Type>
+static size_t sizeForBinder(const hidl_vec<Type>& vec) {
+ return std::accumulate(vec.begin(), vec.end(), 0,
+ [](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
+}
+
+template <>
+size_t sizeForBinder(const Operand& operand) {
+ size_t size = 0;
+
+ size += sizeForBinder(operand.type);
+ size += sizeForBinder(operand.dimensions);
+ size += sizeForBinder(operand.numberOfConsumers);
+ size += sizeForBinder(operand.scale);
+ size += sizeForBinder(operand.zeroPoint);
+ size += sizeForBinder(operand.lifetime);
+ size += sizeForBinder(operand.location);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operation& operation) {
+ size_t size = 0;
+
+ size += sizeForBinder(operation.type);
+ size += sizeForBinder(operation.inputs);
+ size += sizeForBinder(operation.outputs);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const hidl_string& name) {
+ return name.size();
+}
+
+template <>
+size_t sizeForBinder(const hidl_memory& memory) {
+ // This is just a guess.
+
+ size_t size = 0;
+
+ if (const native_handle_t* handle = memory.handle()) {
+ size += sizeof(*handle);
+ size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
+ }
+ size += sizeForBinder(memory.name());
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model& model) {
+ size_t size = 0;
+
+ size += sizeForBinder(model.operands);
+ size += sizeForBinder(model.operations);
+ size += sizeForBinder(model.inputIndexes);
+ size += sizeForBinder(model.outputIndexes);
+ size += sizeForBinder(model.operandValues);
+ size += sizeForBinder(model.pools);
+ size += sizeForBinder(model.relaxComputationFloat32toFloat16);
+
+ return size;
+}
+
+// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
+//
+// "The Binder transaction buffer has a limited fixed size,
+// currently 1Mb, which is shared by all transactions in progress
+// for the process."
+//
+// Will our representation fit under this limit? There are two complications:
+// - Our representation size is just approximate (see sizeForBinder()).
+// - This object may not be the only occupant of the Binder transaction buffer.
+// So we'll be very conservative: We want the representation size to be no
+// larger than half the transaction buffer size.
+//
+// If our representation grows large enough that it still fits within
+// the transaction buffer but combined with other transactions may
+// exceed the buffer size, then we may see intermittent HAL transport
+// errors.
+static bool exceedsBinderSizeLimit(size_t representationSize) {
+ // Instead of using this fixed buffer size, we might instead be able to use
+ // ProcessState::self()->getMmapSize(). However, this has a potential
+ // problem: The binder/mmap size of the current process does not necessarily
+ // indicate the binder/mmap size of the service (i.e., the other process).
+ // The only way it would be a good indication is if both the current process
+ // and the service use the default size.
+ static const size_t kHalfBufferSize = 1024 * 1024 / 2;
+
+ return representationSize > kHalfBufferSize;
+}
+
+///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
+
+static void mutateExecutionOrderTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const Operation& operationObj = model.operations[operation];
+ for (uint32_t input : operationObj.inputs) {
+ if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+ model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ // This operation reads an operand written by some
+ // other operation. Move this operation to the
+ // beginning of the sequence, ensuring that it reads
+ // the operand before that operand is written, thereby
+ // violating execution order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a reader";
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin(), operations.begin() + operation,
+ operations.begin() + operation + 1);
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ for (uint32_t output : operationObj.outputs) {
+ if (model.operands[output].numberOfConsumers > 0) {
+ // This operation writes an operand read by some other
+ // operation. Move this operation to the end of the
+ // sequence, ensuring that it writes the operand after
+ // that operand is read, thereby violating execution
+ // order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a writer";
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin() + operation, operations.begin() + operation + 1,
+ operations.end());
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ }
+}
+
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const int32_t invalidOperandTypes[] = {
@@ -221,9 +433,240 @@
}
}
+///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
+
+static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
+ // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
+
+ // Ways to get an invalid lifetime:
+ // - change whether a lifetime means an operand should have a writer
+ std::vector<OperandLifeTime> ret;
+ switch (operand.lifetime) {
+ case OperandLifeTime::MODEL_OUTPUT:
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ ret = {
+ OperandLifeTime::MODEL_INPUT,
+ OperandLifeTime::CONSTANT_COPY,
+ };
+ break;
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ case OperandLifeTime::MODEL_INPUT:
+ ret = {
+ OperandLifeTime::TEMPORARY_VARIABLE,
+ OperandLifeTime::MODEL_OUTPUT,
+ };
+ break;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
+ // is this operand written (then CONSTANT_COPY would be
+ // invalid) or not (then TEMPORARY_VARIABLE would be
+ // invalid)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
+ }
+
+ return ret;
+}
+
+static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<OperandLifeTime> invalidLifeTimes =
+ getInvalidLifeTimes(model, modelSize, model.operands[operand]);
+ for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
+ const std::string message = "mutateOperandLifetimeTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(invalidLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, invalidLifeTime](Model* model, ExecutionPreference*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ switch (operandObj.lifetime) {
+ case OperandLifeTime::MODEL_INPUT: {
+ hidl_vec_remove(&model->inputIndexes, uint32_t(operand));
+ break;
+ }
+ case OperandLifeTime::MODEL_OUTPUT: {
+ hidl_vec_remove(&model->outputIndexes, uint32_t(operand));
+ break;
+ }
+ default:
+ break;
+ }
+ operandObj.lifetime = invalidLifeTime;
+ operandObj.location = kZeroDataLocation;
+ switch (invalidLifeTime) {
+ case OperandLifeTime::CONSTANT_COPY: {
+ becomeConstantCopy(model, &operandObj);
+ break;
+ }
+ case OperandLifeTime::MODEL_INPUT:
+ hidl_vec_push_back(&model->inputIndexes, uint32_t(operand));
+ break;
+ case OperandLifeTime::MODEL_OUTPUT:
+ hidl_vec_push_back(&model->outputIndexes, uint32_t(operand));
+ break;
+ default:
+ break;
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
+
+static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
+ // - change whether a lifetime means an operand is a model input, a model output, or neither
+ // - preserve whether or not a lifetime means an operand should have a writer
+ switch (operand.lifetime) {
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ return OperandLifeTime::MODEL_INPUT;
+ case OperandLifeTime::MODEL_INPUT: {
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ break;
+ }
+ return OperandLifeTime::CONSTANT_COPY;
+ }
+ case OperandLifeTime::MODEL_OUTPUT:
+ return OperandLifeTime::TEMPORARY_VARIABLE;
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ return OperandLifeTime::MODEL_OUTPUT;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be an
+ // appropriate choice -- is this operand written (then
+ // TEMPORARY_VARIABLE would be appropriate) or not (then
+ // CONSTANT_COPY would be appropriate)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ return std::nullopt;
+}
+
+static void mutateOperandInputOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::optional<OperandLifeTime> changedLifeTime =
+ getInputOutputLifeTime(model, modelSize, model.operands[operand]);
+ if (changedLifeTime) {
+ const std::string message = "mutateOperandInputOutputTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(*changedLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, changedLifeTime](Model* model, ExecutionPreference*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ operandObj.lifetime = *changedLifeTime;
+ operandObj.location = kZeroDataLocation;
+ if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
+ becomeConstantCopy(model, &operandObj);
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
+
+static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
+ if (numberOfConsumers == 0) {
+ return {1};
+ } else {
+ return {numberOfConsumers - 1, numberOfConsumers + 1};
+ }
+}
+
+static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device,
+ const V1_1::Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<uint32_t> invalidNumberOfConsumersVec =
+ getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers);
+ for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
+ const std::string message =
+ "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
+ " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
+ validate(device, message, model,
+ [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*) {
+ model->operands[operand].numberOfConsumers = invalidNumberOfConsumers;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
+
+static void mutateOperandAddWriterTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size();
+ ++badOutputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum];
+ const std::string message = "mutateOperandAddWriterTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ // We'll insert a copy of the operation, all of whose
+ // OTHER output operands are newly-created -- i.e.,
+ // there'll only be a duplicate write of ONE of that
+ // operation's output operands.
+ validate(device, message, model,
+ [operation, badOutputNum](Model* model, ExecutionPreference*) {
+ Operation newOperation = model->operations[operation];
+ for (uint32_t input : newOperation.inputs) {
+ ++model->operands[input].numberOfConsumers;
+ }
+ for (size_t outputNum = 0; outputNum < newOperation.outputs.size();
+ ++outputNum) {
+ if (outputNum == badOutputNum) continue;
+
+ Operand operandValue =
+ model->operands[newOperation.outputs[outputNum]];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ newOperation.outputs[outputNum] =
+ hidl_vec_push_back(&model->operands, operandValue);
+ }
+ // Where do we insert the extra writer (a new
+ // operation)? It has to be later than all the
+ // writers of its inputs. The easiest thing to do
+ // is to insert it at the end of the operation
+ // sequence.
+ hidl_vec_push_back(&model->operations, newOperation);
+ });
+ }
+ }
+}
+
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
-// TODO: Operand::lifetime
// TODO: Operand::location
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
@@ -358,6 +801,37 @@
}
}
+///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
+
+static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const V1_1::Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size();
+ ++outputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum];
+ if (model.operands[outputOperandIndex].numberOfConsumers > 0) {
+ const std::string message = "mutateOperationRemoveWriteTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ validate(device, message, model,
+ [operation, outputNum](Model* model, ExecutionPreference*) {
+ uint32_t& outputOperandIndex =
+ model->operations[operation].outputs[outputNum];
+ Operand operandValue = model->operands[outputOperandIndex];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ outputOperandIndex =
+ hidl_vec_push_back(&model->operands, operandValue);
+ });
+ }
+ }
+ }
+}
+
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
@@ -504,14 +978,20 @@
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateExecutionOrderTest(device, model);
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
mutateOperandZeroPointTest(device, model);
+ mutateOperandLifeTimeTest(device, model);
+ mutateOperandInputOutputTest(device, model);
+ mutateOperandNumberOfConsumersTest(device, model);
+ mutateOperandAddWriterTest(device, model);
mutateOperationOperandTypeTest(device, model);
mutateOperationTypeTest(device, model);
mutateOperationInputOperandIndexTest(device, model);
mutateOperationOutputOperandIndexTest(device, model);
+ mutateOperationRemoveWriteTest(device, model);
removeOperandTest(device, model);
removeOperationTest(device, model);
removeOperationInputTest(device, model);
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 481eb80..182f716 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -15,11 +15,12 @@
//
cc_library_static {
- name: "VtsHalNeuralNetworksV1_2Callbacks",
+ name: "VtsHalNeuralNetworksV1_2_utils",
defaults: ["neuralnetworks_vts_functional_defaults"],
export_include_dirs: ["include"],
srcs: [
"Callbacks.cpp",
+ "Utils.cpp",
],
static_libs: [
"android.hardware.neuralnetworks@1.0",
@@ -51,7 +52,7 @@
],
static_libs: [
"VtsHalNeuralNetworksV1_0_utils",
- "VtsHalNeuralNetworksV1_2Callbacks",
+ "VtsHalNeuralNetworksV1_2_utils",
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 58d3c4a..77340e7 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -20,9 +20,13 @@
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
+using implementation::PreparedModelCallback;
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
+using V1_0::OperandLifeTime;
using V1_0::PerformanceInfo;
+using V1_1::ExecutionPreference;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -123,4 +127,139 @@
});
EXPECT_TRUE(ret.isOk());
}
+
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ const Model model = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations_1_2() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_2(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel_1_2() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_2(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/Utils.cpp b/neuralnetworks/1.2/vts/functional/Utils.cpp
new file mode 100644
index 0000000..cc654f2
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/Utils.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+
+#include <functional>
+#include <numeric>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+uint32_t sizeOfData(V1_2::OperandType type) {
+ switch (type) {
+ case V1_2::OperandType::FLOAT32:
+ case V1_2::OperandType::INT32:
+ case V1_2::OperandType::UINT32:
+ case V1_2::OperandType::TENSOR_FLOAT32:
+ case V1_2::OperandType::TENSOR_INT32:
+ return 4;
+ case V1_2::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_2::OperandType::TENSOR_FLOAT16:
+ case V1_2::OperandType::FLOAT16:
+ case V1_2::OperandType::TENSOR_QUANT16_ASYMM:
+ return 2;
+ case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_2::OperandType::BOOL:
+ case V1_2::OperandType::TENSOR_BOOL8:
+ case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_2::OperandType::TENSOR_QUANT8_SYMM:
+ return 1;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return 0;
+ }
+}
+
+static bool isTensor(V1_2::OperandType type) {
+ switch (type) {
+ case V1_2::OperandType::FLOAT32:
+ case V1_2::OperandType::INT32:
+ case V1_2::OperandType::UINT32:
+ case V1_2::OperandType::FLOAT16:
+ case V1_2::OperandType::BOOL:
+ return false;
+ case V1_2::OperandType::TENSOR_FLOAT32:
+ case V1_2::OperandType::TENSOR_INT32:
+ case V1_2::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_2::OperandType::TENSOR_FLOAT16:
+ case V1_2::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_2::OperandType::TENSOR_BOOL8:
+ case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_2::OperandType::TENSOR_QUANT8_SYMM:
+ return true;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return false;
+ }
+}
+
+uint32_t sizeOfData(const V1_2::Operand& operand) {
+ const uint32_t dataSize = sizeOfData(operand.type);
+ if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
+ return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
+ std::multiplies<>{});
+}
+
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index e9fc6e9..6583dfe 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -16,14 +16,21 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include <android/hardware/neuralnetworks/1.1/types.h>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
+#include "1.2/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
+#include <optional>
+#include <type_traits>
+#include <utility>
+
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
using implementation::PreparedModelCallback;
+using V1_0::DataLocation;
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
@@ -105,6 +112,250 @@
return index;
}
+// If we introduce a CONSTANT_COPY for an operand of size operandSize,
+// how much will this increase the size of the model? This assumes
+// that we can (re)use all of model.operandValues for the operand
+// value.
+static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
+ const size_t operandValuesSize = model.operandValues.size();
+ return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
+}
+
+// Highly specialized utility routine for converting an operand to
+// CONSTANT_COPY lifetime.
+//
+// Expects that:
+// - operand has a known size
+// - operand->lifetime has already been set to CONSTANT_COPY
+// - operand->location has been zeroed out
+//
+// Does the following:
+// - initializes operand->location to point to the beginning of model->operandValues
+// - resizes model->operandValues (if necessary) to be large enough for the operand
+// value, padding it with zeroes on the end
+//
+// Potential problem:
+// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
+// operand with unspecified (but deterministic) data. This means that the model may be invalidated
+// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
+// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
+// value). For now, this should be fine because it just means we're not testing what we think we're
+// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
+// exercise the validation code over the span of the entire test set and operand space.
+//
+// Aborts if the specified operand type is an extension type or OEM type.
+static void becomeConstantCopy(Model* model, Operand* operand) {
+ // sizeOfData will abort if the specified type is an extension type or OEM type.
+ const size_t sizeOfOperand = sizeOfData(*operand);
+ EXPECT_NE(sizeOfOperand, size_t(0));
+ operand->location.poolIndex = 0;
+ operand->location.offset = 0;
+ operand->location.length = sizeOfOperand;
+ if (model->operandValues.size() < sizeOfOperand) {
+ model->operandValues.resize(sizeOfOperand);
+ }
+}
+
+// The sizeForBinder() functions estimate the size of the
+// representation of a value when sent to binder. It's probably a bit
+// of an under-estimate, because we don't know the size of the
+// metadata in the binder format (e.g., representation of the size of
+// a vector); but at least it adds up "big" things like vector
+// contents. However, it doesn't treat inter-field or end-of-struct
+// padding in a methodical way -- there's no attempt to be consistent
+// in whether or not padding in the native (C++) representation
+// contributes to the estimated size for the binder representation;
+// and there's no attempt to understand what padding (if any) is
+// needed in the binder representation.
+//
+// This assumes that non-metadata uses a fixed length encoding (e.g.,
+// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
+// using an encoding whose length is related to the magnitude of the
+// encoded value).
+
+template <typename Type>
+static size_t sizeForBinder(const Type& val) {
+ static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
+ "expected a trivially copyable type");
+ return sizeof(val);
+}
+
+template <typename Type>
+static size_t sizeForBinder(const hidl_vec<Type>& vec) {
+ return std::accumulate(vec.begin(), vec.end(), 0,
+ [](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
+}
+
+template <>
+size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) {
+ size_t size = 0;
+
+ size += sizeForBinder(symmPerChannelQuantParams.scales);
+ size += sizeForBinder(symmPerChannelQuantParams.channelDim);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operand::ExtraParams& extraParams) {
+ using Discriminator = Operand::ExtraParams::hidl_discriminator;
+ switch (extraParams.getDiscriminator()) {
+ case Discriminator::none:
+ return 0;
+ case Discriminator::channelQuant:
+ return sizeForBinder(extraParams.channelQuant());
+ case Discriminator::extension:
+ return sizeForBinder(extraParams.extension());
+ }
+ LOG(FATAL) << "Unrecognized extraParams enum: "
+ << static_cast<int>(extraParams.getDiscriminator());
+ return 0;
+}
+
+template <>
+size_t sizeForBinder(const Operand& operand) {
+ size_t size = 0;
+
+ size += sizeForBinder(operand.type);
+ size += sizeForBinder(operand.dimensions);
+ size += sizeForBinder(operand.numberOfConsumers);
+ size += sizeForBinder(operand.scale);
+ size += sizeForBinder(operand.zeroPoint);
+ size += sizeForBinder(operand.lifetime);
+ size += sizeForBinder(operand.location);
+ size += sizeForBinder(operand.extraParams);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operation& operation) {
+ size_t size = 0;
+
+ size += sizeForBinder(operation.type);
+ size += sizeForBinder(operation.inputs);
+ size += sizeForBinder(operation.outputs);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const hidl_string& name) {
+ return name.size();
+}
+
+template <>
+size_t sizeForBinder(const hidl_memory& memory) {
+ // This is just a guess.
+
+ size_t size = 0;
+
+ if (const native_handle_t* handle = memory.handle()) {
+ size += sizeof(*handle);
+ size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
+ }
+ size += sizeForBinder(memory.name());
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
+ size_t size = 0;
+
+ size += sizeForBinder(extensionNameToPrefix.name);
+ size += sizeForBinder(extensionNameToPrefix.prefix);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model& model) {
+ size_t size = 0;
+
+ size += sizeForBinder(model.operands);
+ size += sizeForBinder(model.operations);
+ size += sizeForBinder(model.inputIndexes);
+ size += sizeForBinder(model.outputIndexes);
+ size += sizeForBinder(model.operandValues);
+ size += sizeForBinder(model.pools);
+ size += sizeForBinder(model.relaxComputationFloat32toFloat16);
+ size += sizeForBinder(model.extensionNameToPrefix);
+
+ return size;
+}
+
+// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
+//
+// "The Binder transaction buffer has a limited fixed size,
+// currently 1Mb, which is shared by all transactions in progress
+// for the process."
+//
+// Will our representation fit under this limit? There are two complications:
+// - Our representation size is just approximate (see sizeForBinder()).
+// - This object may not be the only occupant of the Binder transaction buffer.
+// So we'll be very conservative: We want the representation size to be no
+// larger than half the transaction buffer size.
+//
+// If our representation grows large enough that it still fits within
+// the transaction buffer but combined with other transactions may
+// exceed the buffer size, then we may see intermittent HAL transport
+// errors.
+static bool exceedsBinderSizeLimit(size_t representationSize) {
+ // Instead of using this fixed buffer size, we might instead be able to use
+ // ProcessState::self()->getMmapSize(). However, this has a potential
+ // problem: The binder/mmap size of the current process does not necessarily
+ // indicate the binder/mmap size of the service (i.e., the other process).
+ // The only way it would be a good indication is if both the current process
+ // and the service use the default size.
+ static const size_t kHalfBufferSize = 1024 * 1024 / 2;
+
+ return representationSize > kHalfBufferSize;
+}
+
+///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
+
+static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const Operation& operationObj = model.operations[operation];
+ for (uint32_t input : operationObj.inputs) {
+ if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+ model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ // This operation reads an operand written by some
+ // other operation. Move this operation to the
+ // beginning of the sequence, ensuring that it reads
+ // the operand before that operand is written, thereby
+ // violating execution order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a reader";
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin(), operations.begin() + operation,
+ operations.begin() + operation + 1);
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ for (uint32_t output : operationObj.outputs) {
+ if (model.operands[output].numberOfConsumers > 0) {
+ // This operation writes an operand read by some other
+ // operation. Move this operation to the end of the
+ // sequence, ensuring that it writes the operand after
+ // that operand is read, thereby violating execution
+ // order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a writer";
+ validate(device, message, model, [operation](Model* model, ExecutionPreference*) {
+ auto& operations = model->operations;
+ std::rotate(operations.begin() + operation, operations.begin() + operation + 1,
+ operations.end());
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ }
+}
+
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const uint32_t invalidOperandTypes[] = {
@@ -251,9 +502,239 @@
}
}
+///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
+
+static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
+ // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
+
+ // Ways to get an invalid lifetime:
+ // - change whether a lifetime means an operand should have a writer
+ std::vector<OperandLifeTime> ret;
+ switch (operand.lifetime) {
+ case OperandLifeTime::MODEL_OUTPUT:
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ ret = {
+ OperandLifeTime::MODEL_INPUT,
+ OperandLifeTime::CONSTANT_COPY,
+ };
+ break;
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ case OperandLifeTime::MODEL_INPUT:
+ ret = {
+ OperandLifeTime::TEMPORARY_VARIABLE,
+ OperandLifeTime::MODEL_OUTPUT,
+ };
+ break;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
+ // is this operand written (then CONSTANT_COPY would be
+ // invalid) or not (then TEMPORARY_VARIABLE would be
+ // invalid)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
+ }
+
+ return ret;
+}
+
+static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<OperandLifeTime> invalidLifeTimes =
+ getInvalidLifeTimes(model, modelSize, model.operands[operand]);
+ for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
+ const std::string message = "mutateOperandLifetimeTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(invalidLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, invalidLifeTime](Model* model, ExecutionPreference*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ switch (operandObj.lifetime) {
+ case OperandLifeTime::MODEL_INPUT: {
+ hidl_vec_remove(&model->inputIndexes, uint32_t(operand));
+ break;
+ }
+ case OperandLifeTime::MODEL_OUTPUT: {
+ hidl_vec_remove(&model->outputIndexes, uint32_t(operand));
+ break;
+ }
+ default:
+ break;
+ }
+ operandObj.lifetime = invalidLifeTime;
+ operandObj.location = kZeroDataLocation;
+ switch (invalidLifeTime) {
+ case OperandLifeTime::CONSTANT_COPY: {
+ becomeConstantCopy(model, &operandObj);
+ break;
+ }
+ case OperandLifeTime::MODEL_INPUT:
+ hidl_vec_push_back(&model->inputIndexes, uint32_t(operand));
+ break;
+ case OperandLifeTime::MODEL_OUTPUT:
+ hidl_vec_push_back(&model->outputIndexes, uint32_t(operand));
+ break;
+ default:
+ break;
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
+
+static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
+ // - change whether a lifetime means an operand is a model input, a model output, or neither
+ // - preserve whether or not a lifetime means an operand should have a writer
+ switch (operand.lifetime) {
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ return OperandLifeTime::MODEL_INPUT;
+ case OperandLifeTime::MODEL_INPUT: {
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ break;
+ }
+ return OperandLifeTime::CONSTANT_COPY;
+ }
+ case OperandLifeTime::MODEL_OUTPUT:
+ return OperandLifeTime::TEMPORARY_VARIABLE;
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ return OperandLifeTime::MODEL_OUTPUT;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be an
+ // appropriate choice -- is this operand written (then
+ // TEMPORARY_VARIABLE would be appropriate) or not (then
+ // CONSTANT_COPY would be appropriate)?
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ return std::nullopt;
+}
+
+static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::optional<OperandLifeTime> changedLifeTime =
+ getInputOutputLifeTime(model, modelSize, model.operands[operand]);
+ if (changedLifeTime) {
+ const std::string message = "mutateOperandInputOutputTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(*changedLifeTime) + " instead of lifetime " +
+ toString(model.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, changedLifeTime](Model* model, ExecutionPreference*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->operands[operand];
+ operandObj.lifetime = *changedLifeTime;
+ operandObj.location = kZeroDataLocation;
+ if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
+ becomeConstantCopy(model, &operandObj);
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
+
+static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
+ if (numberOfConsumers == 0) {
+ return {1};
+ } else {
+ return {numberOfConsumers - 1, numberOfConsumers + 1};
+ }
+}
+
+static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<uint32_t> invalidNumberOfConsumersVec =
+ getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers);
+ for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
+ const std::string message =
+ "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
+ " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
+ validate(device, message, model,
+ [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*) {
+ model->operands[operand].numberOfConsumers = invalidNumberOfConsumers;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
+
+static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size();
+ ++badOutputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum];
+ const std::string message = "mutateOperandAddWriterTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ // We'll insert a copy of the operation, all of whose
+ // OTHER output operands are newly-created -- i.e.,
+ // there'll only be a duplicate write of ONE of that
+ // operation's output operands.
+ validate(device, message, model,
+ [operation, badOutputNum](Model* model, ExecutionPreference*) {
+ Operation newOperation = model->operations[operation];
+ for (uint32_t input : newOperation.inputs) {
+ ++model->operands[input].numberOfConsumers;
+ }
+ for (size_t outputNum = 0; outputNum < newOperation.outputs.size();
+ ++outputNum) {
+ if (outputNum == badOutputNum) continue;
+
+ Operand operandValue =
+ model->operands[newOperation.outputs[outputNum]];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ newOperation.outputs[outputNum] =
+ hidl_vec_push_back(&model->operands, operandValue);
+ }
+ // Where do we insert the extra writer (a new
+ // operation)? It has to be later than all the
+ // writers of its inputs. The easiest thing to do
+ // is to insert it at the end of the operation
+ // sequence.
+ hidl_vec_push_back(&model->operations, newOperation);
+ });
+ }
+ }
+}
+
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
-// TODO: Operand::lifetime
// TODO: Operand::location
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
@@ -461,6 +942,37 @@
}
}
+///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
+
+static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size();
+ ++outputNum) {
+ const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum];
+ if (model.operands[outputOperandIndex].numberOfConsumers > 0) {
+ const std::string message = "mutateOperationRemoveWriteTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ validate(device, message, model,
+ [operation, outputNum](Model* model, ExecutionPreference*) {
+ uint32_t& outputOperandIndex =
+ model->operations[operation].outputs[outputNum];
+ Operand operandValue = model->operands[outputOperandIndex];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ outputOperandIndex =
+ hidl_vec_push_back(&model->operands, operandValue);
+ });
+ }
+ }
+ }
+}
+
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
@@ -711,14 +1223,20 @@
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateExecutionOrderTest(device, model);
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
mutateOperandZeroPointTest(device, model);
+ mutateOperandLifeTimeTest(device, model);
+ mutateOperandInputOutputTest(device, model);
+ mutateOperandNumberOfConsumersTest(device, model);
+ mutateOperandAddWriterTest(device, model);
mutateOperationOperandTypeTest(device, model);
mutateOperationTypeTest(device, model);
mutateOperationInputOperandIndexTest(device, model);
mutateOperationOutputOperandIndexTest(device, model);
+ mutateOperationRemoveWriteTest(device, model);
removeOperandTest(device, model);
removeOperationTest(device, model);
removeOperationInputTest(device, model);
diff --git a/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h b/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h
new file mode 100644
index 0000000..61a8d74
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H
+
+#include <android/hardware/neuralnetworks/1.2/types.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+// Returns the amount of space needed to store a value of the specified type.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(V1_2::OperandType type);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a non-extension, non-OEM tensor with unspecified
+// rank or at least one unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(const V1_2::Operand& operand);
+
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index 545a5be..457e36e 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -54,7 +54,7 @@
],
static_libs: [
"VtsHalNeuralNetworksV1_0_utils",
- "VtsHalNeuralNetworksV1_2Callbacks",
+ "VtsHalNeuralNetworksV1_2_utils",
"VtsHalNeuralNetworksV1_3_utils",
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
index 1c25369..6fcfc34 100644
--- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
@@ -20,11 +20,14 @@
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+using implementation::PreparedModelCallback;
using V1_0::DeviceStatus;
using V1_0::PerformanceInfo;
+using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::DeviceType;
using V1_2::Extension;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -65,4 +68,143 @@
});
EXPECT_TRUE(ret.isOk());
}
+
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ Subgraph subgraph = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ };
+ const Model model = {
+ .main = std::move(subgraph),
+ .referenced = {},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations_1_2() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_3(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.main.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel_1_3() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_3(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, Priority::MEDIUM, {},
+ hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/Utils.cpp b/neuralnetworks/1.3/vts/functional/Utils.cpp
index 23e2af8..c460e11 100644
--- a/neuralnetworks/1.3/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.3/vts/functional/Utils.cpp
@@ -17,11 +17,78 @@
#include "1.3/Utils.h"
#include <iostream>
+#include <numeric>
+#include "android-base/logging.h"
+#include "android/hardware/neuralnetworks/1.3/types.h"
-namespace android::hardware::neuralnetworks::V1_3 {
+namespace android::hardware::neuralnetworks {
+
+uint32_t sizeOfData(V1_3::OperandType type) {
+ switch (type) {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ return 4;
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ return 2;
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ return 1;
+ case V1_3::OperandType::SUBGRAPH:
+ return 0;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return 0;
+ }
+}
+
+static bool isTensor(V1_3::OperandType type) {
+ switch (type) {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ return false;
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ return true;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return false;
+ }
+}
+
+uint32_t sizeOfData(const V1_3::Operand& operand) {
+ const uint32_t dataSize = sizeOfData(operand.type);
+ if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
+ return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
+ std::multiplies<>{});
+}
+
+namespace V1_3 {
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
-} // namespace android::hardware::neuralnetworks::V1_3
+} // namespace V1_3
+} // namespace android::hardware::neuralnetworks
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index e590fda..849ef7b 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -16,15 +16,22 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
#include "1.0/Utils.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
+#include <optional>
+#include <type_traits>
+#include <utility>
+
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
+using V1_0::DataLocation;
using V1_1::ExecutionPreference;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@@ -112,6 +119,262 @@
return index;
}
+// If we introduce a CONSTANT_COPY for an operand of size operandSize,
+// how much will this increase the size of the model? This assumes
+// that we can (re)use all of model.operandValues for the operand
+// value.
+static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
+ const size_t operandValuesSize = model.operandValues.size();
+ return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
+}
+
+// Highly specialized utility routine for converting an operand to
+// CONSTANT_COPY lifetime.
+//
+// Expects that:
+// - operand has a known size
+// - operand->lifetime has already been set to CONSTANT_COPY
+// - operand->location has been zeroed out
+//
+// Does the following:
+// - initializes operand->location to point to the beginning of model->operandValues
+// - resizes model->operandValues (if necessary) to be large enough for the operand
+// value, padding it with zeroes on the end
+//
+// Potential problem:
+// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
+// operand with unspecified (but deterministic) data. This means that the model may be invalidated
+// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
+// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
+// value). For now, this should be fine because it just means we're not testing what we think we're
+// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
+// exercise the validation code over the span of the entire test set and operand space.
+//
+// Aborts if the specified operand type is an extension type or OEM type.
+static void becomeConstantCopy(Model* model, Operand* operand) {
+ // sizeOfData will abort if the specified type is an extension type or OEM type.
+ const size_t sizeOfOperand = sizeOfData(*operand);
+ EXPECT_NE(sizeOfOperand, size_t(0));
+ operand->location.poolIndex = 0;
+ operand->location.offset = 0;
+ operand->location.length = sizeOfOperand;
+ if (model->operandValues.size() < sizeOfOperand) {
+ model->operandValues.resize(sizeOfOperand);
+ }
+}
+
+// The sizeForBinder() functions estimate the size of the
+// representation of a value when sent to binder. It's probably a bit
+// of an under-estimate, because we don't know the size of the
+// metadata in the binder format (e.g., representation of the size of
+// a vector); but at least it adds up "big" things like vector
+// contents. However, it doesn't treat inter-field or end-of-struct
+// padding in a methodical way -- there's no attempt to be consistent
+// in whether or not padding in the native (C++) representation
+// contributes to the estimated size for the binder representation;
+// and there's no attempt to understand what padding (if any) is
+// needed in the binder representation.
+//
+// This assumes that non-metadata uses a fixed length encoding (e.g.,
+// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
+// using an encoding whose length is related to the magnitude of the
+// encoded value).
+
+template <typename Type>
+static size_t sizeForBinder(const Type& val) {
+ static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
+ "expected a trivially copyable type");
+ return sizeof(val);
+}
+
+template <typename Type>
+static size_t sizeForBinder(const hidl_vec<Type>& vec) {
+ return std::accumulate(vec.begin(), vec.end(), 0,
+ [](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
+}
+
+template <>
+size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) {
+ size_t size = 0;
+
+ size += sizeForBinder(symmPerChannelQuantParams.scales);
+ size += sizeForBinder(symmPerChannelQuantParams.channelDim);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const V1_2::Operand::ExtraParams& extraParams) {
+ using Discriminator = V1_2::Operand::ExtraParams::hidl_discriminator;
+ switch (extraParams.getDiscriminator()) {
+ case Discriminator::none:
+ return 0;
+ case Discriminator::channelQuant:
+ return sizeForBinder(extraParams.channelQuant());
+ case Discriminator::extension:
+ return sizeForBinder(extraParams.extension());
+ }
+ LOG(FATAL) << "Unrecognized extraParams enum: "
+ << static_cast<int>(extraParams.getDiscriminator());
+ return 0;
+}
+
+template <>
+size_t sizeForBinder(const Operand& operand) {
+ size_t size = 0;
+
+ size += sizeForBinder(operand.type);
+ size += sizeForBinder(operand.dimensions);
+ size += sizeForBinder(operand.numberOfConsumers);
+ size += sizeForBinder(operand.scale);
+ size += sizeForBinder(operand.zeroPoint);
+ size += sizeForBinder(operand.lifetime);
+ size += sizeForBinder(operand.location);
+ size += sizeForBinder(operand.extraParams);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operation& operation) {
+ size_t size = 0;
+
+ size += sizeForBinder(operation.type);
+ size += sizeForBinder(operation.inputs);
+ size += sizeForBinder(operation.outputs);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const hidl_string& name) {
+ return name.size();
+}
+
+template <>
+size_t sizeForBinder(const hidl_memory& memory) {
+ // This is just a guess.
+
+ size_t size = 0;
+
+ if (const native_handle_t* handle = memory.handle()) {
+ size += sizeof(*handle);
+ size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
+ }
+ size += sizeForBinder(memory.name());
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Subgraph& subgraph) {
+ size_t size = 0;
+
+ size += sizeForBinder(subgraph.operands);
+ size += sizeForBinder(subgraph.operations);
+ size += sizeForBinder(subgraph.inputIndexes);
+ size += sizeForBinder(subgraph.outputIndexes);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const V1_2::Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
+ size_t size = 0;
+
+ size += sizeForBinder(extensionNameToPrefix.name);
+ size += sizeForBinder(extensionNameToPrefix.prefix);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model& model) {
+ size_t size = 0;
+
+ size += sizeForBinder(model.main);
+ size += sizeForBinder(model.referenced);
+ size += sizeForBinder(model.operandValues);
+ size += sizeForBinder(model.pools);
+ size += sizeForBinder(model.relaxComputationFloat32toFloat16);
+ size += sizeForBinder(model.extensionNameToPrefix);
+
+ return size;
+}
+
+// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
+//
+// "The Binder transaction buffer has a limited fixed size,
+// currently 1Mb, which is shared by all transactions in progress
+// for the process."
+//
+// Will our representation fit under this limit? There are two complications:
+// - Our representation size is just approximate (see sizeForBinder()).
+// - This object may not be the only occupant of the Binder transaction buffer.
+// So we'll be very conservative: We want the representation size to be no
+// larger than half the transaction buffer size.
+//
+// If our representation grows large enough that it still fits within
+// the transaction buffer but combined with other transactions may
+// exceed the buffer size, then we may see intermittent HAL transport
+// errors.
+static bool exceedsBinderSizeLimit(size_t representationSize) {
+ // Instead of using this fixed buffer size, we might instead be able to use
+ // ProcessState::self()->getMmapSize(). However, this has a potential
+ // problem: The binder/mmap size of the current process does not necessarily
+ // indicate the binder/mmap size of the service (i.e., the other process).
+ // The only way it would be a good indication is if both the current process
+ // and the service use the default size.
+ static const size_t kHalfBufferSize = 1024 * 1024 / 2;
+
+ return representationSize > kHalfBufferSize;
+}
+
+///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
+
+static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ const Operation& operationObj = model.main.operations[operation];
+ for (uint32_t input : operationObj.inputs) {
+ if (model.main.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+ model.main.operands[input].lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ // This operation reads an operand written by some
+ // other operation. Move this operation to the
+ // beginning of the sequence, ensuring that it reads
+ // the operand before that operand is written, thereby
+ // violating execution order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a reader";
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ auto& operations = model->main.operations;
+ std::rotate(operations.begin(), operations.begin() + operation,
+ operations.begin() + operation + 1);
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ for (uint32_t output : operationObj.outputs) {
+ if (model.main.operands[output].numberOfConsumers > 0) {
+ // This operation writes an operand read by some other
+ // operation. Move this operation to the end of the
+ // sequence, ensuring that it writes the operand after
+ // that operand is read, thereby violating execution
+ // order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a writer";
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ auto& operations = model->main.operations;
+ std::rotate(operations.begin() + operation,
+ operations.begin() + operation + 1, operations.end());
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ }
+}
+
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const uint32_t invalidOperandTypes[] = {
@@ -261,9 +524,245 @@
}
}
+///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
+
+static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
+ // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
+
+ // Ways to get an invalid lifetime:
+ // - change whether a lifetime means an operand should have a writer
+ std::vector<OperandLifeTime> ret;
+ switch (operand.lifetime) {
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ ret = {
+ OperandLifeTime::SUBGRAPH_INPUT,
+ OperandLifeTime::CONSTANT_COPY,
+ };
+ break;
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ case OperandLifeTime::SUBGRAPH_INPUT:
+ ret = {
+ OperandLifeTime::TEMPORARY_VARIABLE,
+ OperandLifeTime::SUBGRAPH_OUTPUT,
+ };
+ break;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
+ // is this operand written (then CONSTANT_COPY would be
+ // invalid) or not (then TEMPORARY_VARIABLE would be
+ // invalid)?
+ break;
+ case OperandLifeTime::SUBGRAPH:
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
+ }
+
+ return ret;
+}
+
+static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::vector<OperandLifeTime> invalidLifeTimes =
+ getInvalidLifeTimes(model, modelSize, model.main.operands[operand]);
+ for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
+ const std::string message = "mutateOperandLifetimeTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(invalidLifeTime) + " instead of lifetime " +
+ toString(model.main.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, invalidLifeTime](Model* model, ExecutionPreference*, Priority*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->main.operands[operand];
+ switch (operandObj.lifetime) {
+ case OperandLifeTime::SUBGRAPH_INPUT: {
+ hidl_vec_remove(&model->main.inputIndexes, uint32_t(operand));
+ break;
+ }
+ case OperandLifeTime::SUBGRAPH_OUTPUT: {
+ hidl_vec_remove(&model->main.outputIndexes, uint32_t(operand));
+ break;
+ }
+ default:
+ break;
+ }
+ operandObj.lifetime = invalidLifeTime;
+ operandObj.location = kZeroDataLocation;
+ switch (invalidLifeTime) {
+ case OperandLifeTime::CONSTANT_COPY: {
+ becomeConstantCopy(model, &operandObj);
+ break;
+ }
+ case OperandLifeTime::SUBGRAPH_INPUT:
+ hidl_vec_push_back(&model->main.inputIndexes, uint32_t(operand));
+ break;
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ hidl_vec_push_back(&model->main.outputIndexes, uint32_t(operand));
+ break;
+ default:
+ break;
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
+
+static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
+ // - change whether a lifetime means an operand is a model input, a model output, or neither
+ // - preserve whether or not a lifetime means an operand should have a writer
+ switch (operand.lifetime) {
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ return OperandLifeTime::SUBGRAPH_INPUT;
+ case OperandLifeTime::SUBGRAPH_INPUT: {
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ break;
+ }
+ return OperandLifeTime::CONSTANT_COPY;
+ }
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ return OperandLifeTime::TEMPORARY_VARIABLE;
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ return OperandLifeTime::SUBGRAPH_OUTPUT;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be an
+ // appropriate choice -- is this operand written (then
+ // TEMPORARY_VARIABLE would be appropriate) or not (then
+ // CONSTANT_COPY would be appropriate)?
+ break;
+ case OperandLifeTime::SUBGRAPH:
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ return std::nullopt;
+}
+
+static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::optional<OperandLifeTime> changedLifeTime =
+ getInputOutputLifeTime(model, modelSize, model.main.operands[operand]);
+ if (changedLifeTime) {
+ const std::string message = "mutateOperandInputOutputTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(*changedLifeTime) + " instead of lifetime " +
+ toString(model.main.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, changedLifeTime](Model* model, ExecutionPreference*, Priority*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->main.operands[operand];
+ operandObj.lifetime = *changedLifeTime;
+ operandObj.location = kZeroDataLocation;
+ if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
+ becomeConstantCopy(model, &operandObj);
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
+
+static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
+ if (numberOfConsumers == 0) {
+ return {1};
+ } else {
+ return {numberOfConsumers - 1, numberOfConsumers + 1};
+ }
+}
+
+static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::vector<uint32_t> invalidNumberOfConsumersVec =
+ getInvalidNumberOfConsumers(model.main.operands[operand].numberOfConsumers);
+ for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
+ const std::string message =
+ "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
+ " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
+ validate(device, message, model,
+ [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*,
+ Priority*) {
+ model->main.operands[operand].numberOfConsumers = invalidNumberOfConsumers;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
+
+static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ for (size_t badOutputNum = 0;
+ badOutputNum < model.main.operations[operation].outputs.size(); ++badOutputNum) {
+ const uint32_t outputOperandIndex =
+ model.main.operations[operation].outputs[badOutputNum];
+ const std::string message = "mutateOperandAddWriterTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ // We'll insert a copy of the operation, all of whose
+ // OTHER output operands are newly-created -- i.e.,
+ // there'll only be a duplicate write of ONE of that
+ // operation's output operands.
+ validate(device, message, model,
+ [operation, badOutputNum](Model* model, ExecutionPreference*, Priority*) {
+ Operation newOperation = model->main.operations[operation];
+ for (uint32_t input : newOperation.inputs) {
+ ++model->main.operands[input].numberOfConsumers;
+ }
+ for (size_t outputNum = 0; outputNum < newOperation.outputs.size();
+ ++outputNum) {
+ if (outputNum == badOutputNum) continue;
+
+ Operand operandValue =
+ model->main.operands[newOperation.outputs[outputNum]];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ newOperation.outputs[outputNum] =
+ hidl_vec_push_back(&model->main.operands, operandValue);
+ }
+ // Where do we insert the extra writer (a new
+ // operation)? It has to be later than all the
+ // writers of its inputs. The easiest thing to do
+ // is to insert it at the end of the operation
+ // sequence.
+ hidl_vec_push_back(&model->main.operations, newOperation);
+ });
+ }
+ }
+}
+
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
-// TODO: Operand::lifetime
// TODO: Operand::location
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
@@ -511,6 +1010,37 @@
}
}
+///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
+
+static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ for (size_t outputNum = 0; outputNum < model.main.operations[operation].outputs.size();
+ ++outputNum) {
+ const uint32_t outputOperandIndex = model.main.operations[operation].outputs[outputNum];
+ if (model.main.operands[outputOperandIndex].numberOfConsumers > 0) {
+ const std::string message = "mutateOperationRemoveWriteTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ validate(device, message, model,
+ [operation, outputNum](Model* model, ExecutionPreference*, Priority*) {
+ uint32_t& outputOperandIndex =
+ model->main.operations[operation].outputs[outputNum];
+ Operand operandValue = model->main.operands[outputOperandIndex];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ outputOperandIndex =
+ hidl_vec_push_back(&model->main.operands, operandValue);
+ });
+ }
+ }
+ }
+}
+
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
@@ -804,14 +1334,20 @@
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateExecutionOrderTest(device, model);
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
mutateOperandZeroPointTest(device, model);
+ mutateOperandLifeTimeTest(device, model);
+ mutateOperandInputOutputTest(device, model);
+ mutateOperandNumberOfConsumersTest(device, model);
+ mutateOperandAddWriterTest(device, model);
mutateOperationOperandTypeTest(device, model);
mutateOperationTypeTest(device, model);
mutateOperationInputOperandIndexTest(device, model);
mutateOperationOutputOperandIndexTest(device, model);
+ mutateOperationRemoveWriteTest(device, model);
removeOperandTest(device, model);
removeOperationTest(device, model);
removeOperationInputTest(device, model);
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
index 3661b66..e07e73b 100644
--- a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
@@ -24,6 +24,18 @@
inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM;
+// Returns the amount of space needed to store a value of the specified type.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(V1_3::OperandType type);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a non-extension, non-OEM tensor with unspecified
+// rank or at least one unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(const V1_3::Operand& operand);
+
} // namespace android::hardware::neuralnetworks
namespace android::hardware::neuralnetworks::V1_3 {