More tests for graph validation.
- detect cycle (CycleTest)
- detect bad execution order (mutateExecutionOrderTest)
- detect lifetime inconsistent with whether operand is written (mutateOperandLifeTimeTest)
- detect lifetime inconsistent with Model inputIndexes/outputIndexes (mutateOperandInputOutputTest)
- detect incorrect number of consumers (mutateOperandNumberOfConsumersTest)
- detect operand written multiple times (mutateOperandAddWriterTest)
- detect operand never written (mutateOperationRemoveWriteTest)
Bug: 66478689
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
Merged-In: Id4ba19660bbd31a16f8a675f7b6437f4d779e8da
(cherry picked from commit af51663e9980265853750a51fa2f4bb1cd4e48c1)
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index 545a5be..457e36e 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -54,7 +54,7 @@
],
static_libs: [
"VtsHalNeuralNetworksV1_0_utils",
- "VtsHalNeuralNetworksV1_2Callbacks",
+ "VtsHalNeuralNetworksV1_2_utils",
"VtsHalNeuralNetworksV1_3_utils",
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
index 1c25369..6fcfc34 100644
--- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
@@ -20,11 +20,14 @@
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+using implementation::PreparedModelCallback;
using V1_0::DeviceStatus;
using V1_0::PerformanceInfo;
+using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::DeviceType;
using V1_2::Extension;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
// create device test
TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
@@ -65,4 +68,143 @@
});
EXPECT_TRUE(ret.isOk());
}
+
+// detect cycle
+TEST_P(NeuralnetworksHidlTest, CycleTest) {
+ // opnd0 = TENSOR_FLOAT32 // model input
+ // opnd1 = TENSOR_FLOAT32 // model input
+ // opnd2 = INT32 // model input
+ // opnd3 = ADD(opnd0, opnd4, opnd2)
+ // opnd4 = ADD(opnd1, opnd3, opnd2)
+ // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
+ //
+ // +-----+
+ // | |
+ // v |
+ // 3 = ADD(0, 4, 2) |
+ // | |
+ // +----------+ |
+ // | |
+ // v |
+ // 4 = ADD(1, 3, 2) |
+ // | |
+ // +----------------+
+ // |
+ // |
+ // +-------+
+ // |
+ // v
+ // 5 = ADD(4, 0, 2)
+
+ const std::vector<Operand> operands = {
+ {
+ // operands[0]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[1]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[2]
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 3,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[3]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[4]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 2,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ // operands[5]
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::SUBGRAPH_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ };
+
+ const std::vector<Operation> operations = {
+ {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
+ {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
+ {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
+ };
+
+ Subgraph subgraph = {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = {0, 1, 2},
+ .outputIndexes = {5},
+ };
+ const Model model = {
+ .main = std::move(subgraph),
+ .referenced = {},
+ .operandValues = {},
+ .pools = {},
+ };
+
+ // ensure that getSupportedOperations_1_2() checks model validity
+ ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
+ Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_3(
+ model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
+ const hidl_vec<bool>& supported) {
+ supportedOpsErrorStatus = status;
+ if (status == ErrorStatus::NONE) {
+ ASSERT_EQ(supported.size(), model.main.operations.size());
+ }
+ });
+ ASSERT_TRUE(supportedOpsReturn.isOk());
+ ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
+
+ // ensure that prepareModel_1_3() checks model validity
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_3(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, Priority::MEDIUM, {},
+ hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchReturn.isOk());
+ // Note that preparation can fail for reasons other than an
+ // invalid model (invalid model should result in
+ // INVALID_ARGUMENT) -- for example, perhaps not all
+ // operations are supported, or perhaps the device hit some
+ // kind of capacity limit.
+ EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
+ EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
+}
+
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/Utils.cpp b/neuralnetworks/1.3/vts/functional/Utils.cpp
index 23e2af8..c460e11 100644
--- a/neuralnetworks/1.3/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.3/vts/functional/Utils.cpp
@@ -17,11 +17,78 @@
#include "1.3/Utils.h"
#include <iostream>
+#include <numeric>
+#include "android-base/logging.h"
+#include "android/hardware/neuralnetworks/1.3/types.h"
-namespace android::hardware::neuralnetworks::V1_3 {
+namespace android::hardware::neuralnetworks {
+
+uint32_t sizeOfData(V1_3::OperandType type) {
+ switch (type) {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ return 4;
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ return 2;
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ return 1;
+ case V1_3::OperandType::SUBGRAPH:
+ return 0;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return 0;
+ }
+}
+
+static bool isTensor(V1_3::OperandType type) {
+ switch (type) {
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::SUBGRAPH:
+ return false;
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ return true;
+ default:
+ CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type);
+ return false;
+ }
+}
+
+uint32_t sizeOfData(const V1_3::Operand& operand) {
+ const uint32_t dataSize = sizeOfData(operand.type);
+ if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0;
+ return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize,
+ std::multiplies<>{});
+}
+
+namespace V1_3 {
::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
return os << toString(errorStatus);
}
-} // namespace android::hardware::neuralnetworks::V1_3
+} // namespace V1_3
+} // namespace android::hardware::neuralnetworks
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index e590fda..849ef7b 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -16,15 +16,22 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
#include "1.0/Utils.h"
#include "1.3/Callbacks.h"
#include "1.3/Utils.h"
#include "GeneratedTestHarness.h"
#include "VtsHalNeuralnetworks.h"
+#include <optional>
+#include <type_traits>
+#include <utility>
+
namespace android::hardware::neuralnetworks::V1_3::vts::functional {
using implementation::PreparedModelCallback;
+using V1_0::DataLocation;
using V1_1::ExecutionPreference;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@@ -112,6 +119,262 @@
return index;
}
+// If we introduce a CONSTANT_COPY for an operand of size operandSize,
+// how much will this increase the size of the model? This assumes
+// that we can (re)use all of model.operandValues for the operand
+// value.
+static size_t constantCopyExtraSize(const Model& model, size_t operandSize) {
+ const size_t operandValuesSize = model.operandValues.size();
+ return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0;
+}
+
+// Highly specialized utility routine for converting an operand to
+// CONSTANT_COPY lifetime.
+//
+// Expects that:
+// - operand has a known size
+// - operand->lifetime has already been set to CONSTANT_COPY
+// - operand->location has been zeroed out
+//
+// Does the following:
+// - initializes operand->location to point to the beginning of model->operandValues
+// - resizes model->operandValues (if necessary) to be large enough for the operand
+// value, padding it with zeroes on the end
+//
+// Potential problem:
+// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the
+// operand with unspecified (but deterministic) data. This means that the model may be invalidated
+// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the
+// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid
+// value). For now, this should be fine because it just means we're not testing what we think we're
+// testing in certain cases; but we can handwave this and assume we're probabilistically likely to
+// exercise the validation code over the span of the entire test set and operand space.
+//
+// Aborts if the specified operand type is an extension type or OEM type.
+static void becomeConstantCopy(Model* model, Operand* operand) {
+ // sizeOfData will abort if the specified type is an extension type or OEM type.
+ const size_t sizeOfOperand = sizeOfData(*operand);
+ EXPECT_NE(sizeOfOperand, size_t(0));
+ operand->location.poolIndex = 0;
+ operand->location.offset = 0;
+ operand->location.length = sizeOfOperand;
+ if (model->operandValues.size() < sizeOfOperand) {
+ model->operandValues.resize(sizeOfOperand);
+ }
+}
+
+// The sizeForBinder() functions estimate the size of the
+// representation of a value when sent to binder. It's probably a bit
+// of an under-estimate, because we don't know the size of the
+// metadata in the binder format (e.g., representation of the size of
+// a vector); but at least it adds up "big" things like vector
+// contents. However, it doesn't treat inter-field or end-of-struct
+// padding in a methodical way -- there's no attempt to be consistent
+// in whether or not padding in the native (C++) representation
+// contributes to the estimated size for the binder representation;
+// and there's no attempt to understand what padding (if any) is
+// needed in the binder representation.
+//
+// This assumes that non-metadata uses a fixed length encoding (e.g.,
+// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than
+// using an encoding whose length is related to the magnitude of the
+// encoded value).
+
+template <typename Type>
+static size_t sizeForBinder(const Type& val) {
+ static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>,
+ "expected a trivially copyable type");
+ return sizeof(val);
+}
+
+template <typename Type>
+static size_t sizeForBinder(const hidl_vec<Type>& vec) {
+ return std::accumulate(vec.begin(), vec.end(), 0,
+ [](size_t acc, const Type& x) { return acc + sizeForBinder(x); });
+}
+
+template <>
+size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) {
+ size_t size = 0;
+
+ size += sizeForBinder(symmPerChannelQuantParams.scales);
+ size += sizeForBinder(symmPerChannelQuantParams.channelDim);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const V1_2::Operand::ExtraParams& extraParams) {
+ using Discriminator = V1_2::Operand::ExtraParams::hidl_discriminator;
+ switch (extraParams.getDiscriminator()) {
+ case Discriminator::none:
+ return 0;
+ case Discriminator::channelQuant:
+ return sizeForBinder(extraParams.channelQuant());
+ case Discriminator::extension:
+ return sizeForBinder(extraParams.extension());
+ }
+ LOG(FATAL) << "Unrecognized extraParams enum: "
+ << static_cast<int>(extraParams.getDiscriminator());
+ return 0;
+}
+
+template <>
+size_t sizeForBinder(const Operand& operand) {
+ size_t size = 0;
+
+ size += sizeForBinder(operand.type);
+ size += sizeForBinder(operand.dimensions);
+ size += sizeForBinder(operand.numberOfConsumers);
+ size += sizeForBinder(operand.scale);
+ size += sizeForBinder(operand.zeroPoint);
+ size += sizeForBinder(operand.lifetime);
+ size += sizeForBinder(operand.location);
+ size += sizeForBinder(operand.extraParams);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Operation& operation) {
+ size_t size = 0;
+
+ size += sizeForBinder(operation.type);
+ size += sizeForBinder(operation.inputs);
+ size += sizeForBinder(operation.outputs);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const hidl_string& name) {
+ return name.size();
+}
+
+template <>
+size_t sizeForBinder(const hidl_memory& memory) {
+ // This is just a guess.
+
+ size_t size = 0;
+
+ if (const native_handle_t* handle = memory.handle()) {
+ size += sizeof(*handle);
+ size += sizeof(handle->data[0] * (handle->numFds + handle->numInts));
+ }
+ size += sizeForBinder(memory.name());
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Subgraph& subgraph) {
+ size_t size = 0;
+
+ size += sizeForBinder(subgraph.operands);
+ size += sizeForBinder(subgraph.operations);
+ size += sizeForBinder(subgraph.inputIndexes);
+ size += sizeForBinder(subgraph.outputIndexes);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const V1_2::Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
+ size_t size = 0;
+
+ size += sizeForBinder(extensionNameToPrefix.name);
+ size += sizeForBinder(extensionNameToPrefix.prefix);
+
+ return size;
+}
+
+template <>
+size_t sizeForBinder(const Model& model) {
+ size_t size = 0;
+
+ size += sizeForBinder(model.main);
+ size += sizeForBinder(model.referenced);
+ size += sizeForBinder(model.operandValues);
+ size += sizeForBinder(model.pools);
+ size += sizeForBinder(model.relaxComputationFloat32toFloat16);
+ size += sizeForBinder(model.extensionNameToPrefix);
+
+ return size;
+}
+
+// https://developer.android.com/reference/android/os/TransactionTooLargeException.html
+//
+// "The Binder transaction buffer has a limited fixed size,
+// currently 1Mb, which is shared by all transactions in progress
+// for the process."
+//
+// Will our representation fit under this limit? There are two complications:
+// - Our representation size is just approximate (see sizeForBinder()).
+// - This object may not be the only occupant of the Binder transaction buffer.
+// So we'll be very conservative: We want the representation size to be no
+// larger than half the transaction buffer size.
+//
+// If our representation grows large enough that it still fits within
+// the transaction buffer but combined with other transactions may
+// exceed the buffer size, then we may see intermittent HAL transport
+// errors.
+static bool exceedsBinderSizeLimit(size_t representationSize) {
+ // Instead of using this fixed buffer size, we might instead be able to use
+ // ProcessState::self()->getMmapSize(). However, this has a potential
+ // problem: The binder/mmap size of the current process does not necessarily
+ // indicate the binder/mmap size of the service (i.e., the other process).
+ // The only way it would be a good indication is if both the current process
+ // and the service use the default size.
+ static const size_t kHalfBufferSize = 1024 * 1024 / 2;
+
+ return representationSize > kHalfBufferSize;
+}
+
+///////////////////////// VALIDATE EXECUTION ORDER ////////////////////////////
+
+static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ const Operation& operationObj = model.main.operations[operation];
+ for (uint32_t input : operationObj.inputs) {
+ if (model.main.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
+ model.main.operands[input].lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ // This operation reads an operand written by some
+ // other operation. Move this operation to the
+ // beginning of the sequence, ensuring that it reads
+ // the operand before that operand is written, thereby
+ // violating execution order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a reader";
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ auto& operations = model->main.operations;
+ std::rotate(operations.begin(), operations.begin() + operation,
+ operations.begin() + operation + 1);
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ for (uint32_t output : operationObj.outputs) {
+ if (model.main.operands[output].numberOfConsumers > 0) {
+ // This operation writes an operand read by some other
+ // operation. Move this operation to the end of the
+ // sequence, ensuring that it writes the operand after
+ // that operand is read, thereby violating execution
+ // order rules.
+ const std::string message = "mutateExecutionOrderTest: operation " +
+ std::to_string(operation) + " is a writer";
+ validate(device, message, model,
+ [operation](Model* model, ExecutionPreference*, Priority*) {
+ auto& operations = model->main.operations;
+ std::rotate(operations.begin() + operation,
+ operations.begin() + operation + 1, operations.end());
+ });
+ break; // only need to do this once per operation
+ }
+ }
+ }
+}
+
///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
static const uint32_t invalidOperandTypes[] = {
@@ -261,9 +524,245 @@
}
}
+///////////////////////// VALIDATE OPERAND LIFETIME /////////////////////////////////////////////
+
+static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime
+ // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime
+
+ // Ways to get an invalid lifetime:
+ // - change whether a lifetime means an operand should have a writer
+ std::vector<OperandLifeTime> ret;
+ switch (operand.lifetime) {
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ ret = {
+ OperandLifeTime::SUBGRAPH_INPUT,
+ OperandLifeTime::CONSTANT_COPY,
+ };
+ break;
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ case OperandLifeTime::SUBGRAPH_INPUT:
+ ret = {
+ OperandLifeTime::TEMPORARY_VARIABLE,
+ OperandLifeTime::SUBGRAPH_OUTPUT,
+ };
+ break;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid --
+ // is this operand written (then CONSTANT_COPY would be
+ // invalid) or not (then TEMPORARY_VARIABLE would be
+ // invalid)?
+ break;
+ case OperandLifeTime::SUBGRAPH:
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end());
+ }
+
+ return ret;
+}
+
+static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::vector<OperandLifeTime> invalidLifeTimes =
+ getInvalidLifeTimes(model, modelSize, model.main.operands[operand]);
+ for (OperandLifeTime invalidLifeTime : invalidLifeTimes) {
+ const std::string message = "mutateOperandLifetimeTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(invalidLifeTime) + " instead of lifetime " +
+ toString(model.main.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, invalidLifeTime](Model* model, ExecutionPreference*, Priority*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->main.operands[operand];
+ switch (operandObj.lifetime) {
+ case OperandLifeTime::SUBGRAPH_INPUT: {
+ hidl_vec_remove(&model->main.inputIndexes, uint32_t(operand));
+ break;
+ }
+ case OperandLifeTime::SUBGRAPH_OUTPUT: {
+ hidl_vec_remove(&model->main.outputIndexes, uint32_t(operand));
+ break;
+ }
+ default:
+ break;
+ }
+ operandObj.lifetime = invalidLifeTime;
+ operandObj.location = kZeroDataLocation;
+ switch (invalidLifeTime) {
+ case OperandLifeTime::CONSTANT_COPY: {
+ becomeConstantCopy(model, &operandObj);
+ break;
+ }
+ case OperandLifeTime::SUBGRAPH_INPUT:
+ hidl_vec_push_back(&model->main.inputIndexes, uint32_t(operand));
+ break;
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ hidl_vec_push_back(&model->main.outputIndexes, uint32_t(operand));
+ break;
+ default:
+ break;
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT //////////////////////////////////////
+
+static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize,
+ const Operand& operand) {
+ // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes):
+ // - change whether a lifetime means an operand is a model input, a model output, or neither
+ // - preserve whether or not a lifetime means an operand should have a writer
+ switch (operand.lifetime) {
+ case OperandLifeTime::CONSTANT_COPY:
+ case OperandLifeTime::CONSTANT_REFERENCE:
+ return OperandLifeTime::SUBGRAPH_INPUT;
+ case OperandLifeTime::SUBGRAPH_INPUT: {
+ const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown
+ if (!operandSize ||
+ exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) {
+ // Unknown size or too-large size
+ break;
+ }
+ return OperandLifeTime::CONSTANT_COPY;
+ }
+ case OperandLifeTime::SUBGRAPH_OUTPUT:
+ return OperandLifeTime::TEMPORARY_VARIABLE;
+ case OperandLifeTime::TEMPORARY_VARIABLE:
+ return OperandLifeTime::SUBGRAPH_OUTPUT;
+ case OperandLifeTime::NO_VALUE:
+ // Not enough information to know whether
+ // TEMPORARY_VARIABLE or CONSTANT_COPY would be an
+ // appropriate choice -- is this operand written (then
+ // TEMPORARY_VARIABLE would be appropriate) or not (then
+ // CONSTANT_COPY would be appropriate)?
+ break;
+ case OperandLifeTime::SUBGRAPH:
+ break;
+ default:
+ ADD_FAILURE();
+ break;
+ }
+
+ return std::nullopt;
+}
+
+static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) {
+ const size_t modelSize = sizeForBinder(model);
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::optional<OperandLifeTime> changedLifeTime =
+ getInputOutputLifeTime(model, modelSize, model.main.operands[operand]);
+ if (changedLifeTime) {
+ const std::string message = "mutateOperandInputOutputTest: operand " +
+ std::to_string(operand) + " has lifetime " +
+ toString(*changedLifeTime) + " instead of lifetime " +
+ toString(model.main.operands[operand].lifetime);
+ validate(device, message, model,
+ [operand, changedLifeTime](Model* model, ExecutionPreference*, Priority*) {
+ static const DataLocation kZeroDataLocation = {};
+ Operand& operandObj = model->main.operands[operand];
+ operandObj.lifetime = *changedLifeTime;
+ operandObj.location = kZeroDataLocation;
+ if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) {
+ becomeConstantCopy(model, &operandObj);
+ }
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS //////////////////////////////////
+
+static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) {
+ if (numberOfConsumers == 0) {
+ return {1};
+ } else {
+ return {numberOfConsumers - 1, numberOfConsumers + 1};
+ }
+}
+
+static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.main.operands.size(); ++operand) {
+ const std::vector<uint32_t> invalidNumberOfConsumersVec =
+ getInvalidNumberOfConsumers(model.main.operands[operand].numberOfConsumers);
+ for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) {
+ const std::string message =
+ "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) +
+ " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers);
+ validate(device, message, model,
+ [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*,
+ Priority*) {
+ model->main.operands[operand].numberOfConsumers = invalidNumberOfConsumers;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS ////////////////////////////////////
+
+static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ for (size_t badOutputNum = 0;
+ badOutputNum < model.main.operations[operation].outputs.size(); ++badOutputNum) {
+ const uint32_t outputOperandIndex =
+ model.main.operations[operation].outputs[badOutputNum];
+ const std::string message = "mutateOperandAddWriterTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ // We'll insert a copy of the operation, all of whose
+ // OTHER output operands are newly-created -- i.e.,
+ // there'll only be a duplicate write of ONE of that
+ // operation's output operands.
+ validate(device, message, model,
+ [operation, badOutputNum](Model* model, ExecutionPreference*, Priority*) {
+ Operation newOperation = model->main.operations[operation];
+ for (uint32_t input : newOperation.inputs) {
+ ++model->main.operands[input].numberOfConsumers;
+ }
+ for (size_t outputNum = 0; outputNum < newOperation.outputs.size();
+ ++outputNum) {
+ if (outputNum == badOutputNum) continue;
+
+ Operand operandValue =
+ model->main.operands[newOperation.outputs[outputNum]];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ newOperation.outputs[outputNum] =
+ hidl_vec_push_back(&model->main.operands, operandValue);
+ }
+ // Where do we insert the extra writer (a new
+ // operation)? It has to be later than all the
+ // writers of its inputs. The easiest thing to do
+ // is to insert it at the end of the operation
+ // sequence.
+ hidl_vec_push_back(&model->main.operations, newOperation);
+ });
+ }
+ }
+}
+
///////////////////////// VALIDATE EXTRA ??? /////////////////////////
-// TODO: Operand::lifetime
// TODO: Operand::location
///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
@@ -511,6 +1010,37 @@
}
}
+///////////////////////// VALIDATE MODEL OPERANDS WRITTEN ///////////////////////////////////////
+
+static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.main.operations.size(); ++operation) {
+ for (size_t outputNum = 0; outputNum < model.main.operations[operation].outputs.size();
+ ++outputNum) {
+ const uint32_t outputOperandIndex = model.main.operations[operation].outputs[outputNum];
+ if (model.main.operands[outputOperandIndex].numberOfConsumers > 0) {
+ const std::string message = "mutateOperationRemoveWriteTest: operation " +
+ std::to_string(operation) + " writes to " +
+ std::to_string(outputOperandIndex);
+ validate(device, message, model,
+ [operation, outputNum](Model* model, ExecutionPreference*, Priority*) {
+ uint32_t& outputOperandIndex =
+ model->main.operations[operation].outputs[outputNum];
+ Operand operandValue = model->main.operands[outputOperandIndex];
+ operandValue.numberOfConsumers = 0;
+ if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) {
+ operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+ } else {
+ ASSERT_EQ(operandValue.lifetime,
+ OperandLifeTime::TEMPORARY_VARIABLE);
+ }
+ outputOperandIndex =
+ hidl_vec_push_back(&model->main.operands, operandValue);
+ });
+ }
+ }
+ }
+}
+
///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
@@ -804,14 +1334,20 @@
////////////////////////// ENTRY POINT //////////////////////////////
void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateExecutionOrderTest(device, model);
mutateOperandTypeTest(device, model);
mutateOperandRankTest(device, model);
mutateOperandScaleTest(device, model);
mutateOperandZeroPointTest(device, model);
+ mutateOperandLifeTimeTest(device, model);
+ mutateOperandInputOutputTest(device, model);
+ mutateOperandNumberOfConsumersTest(device, model);
+ mutateOperandAddWriterTest(device, model);
mutateOperationOperandTypeTest(device, model);
mutateOperationTypeTest(device, model);
mutateOperationInputOperandIndexTest(device, model);
mutateOperationOutputOperandIndexTest(device, model);
+ mutateOperationRemoveWriteTest(device, model);
removeOperandTest(device, model);
removeOperationTest(device, model);
removeOperationInputTest(device, model);
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
index 3661b66..e07e73b 100644
--- a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
@@ -24,6 +24,18 @@
inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM;
+// Returns the amount of space needed to store a value of the specified type.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(V1_3::OperandType type);
+
+// Returns the amount of space needed to store a value of the dimensions and
+// type of this operand. For a non-extension, non-OEM tensor with unspecified
+// rank or at least one unspecified dimension, returns zero.
+//
+// Aborts if the specified type is an extension type or OEM type.
+uint32_t sizeOfData(const V1_3::Operand& operand);
+
} // namespace android::hardware::neuralnetworks
namespace android::hardware::neuralnetworks::V1_3 {