Add new OperandType TENSOR_QUANT16_ASYMM.
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I8fcd6b30c32f8fbc181d2b43f9ac0b94fdc57e2f
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 1f9c99d..5e661fb 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,6 +161,7 @@
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
return 0;
@@ -199,6 +200,7 @@
case OperandType::TENSOR_INT32:
return -1.0f;
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
return 0.0f;
default:
@@ -233,6 +235,8 @@
return {1};
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ return {-1, 65536};
case OperandType::TENSOR_QUANT16_SYMM:
return {-32769, -1, 1, 32768};
default:
@@ -288,6 +292,7 @@
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});