Add new OperandType TENSOR_QUANT16_ASYMM.
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I8fcd6b30c32f8fbc181d2b43f9ac0b94fdc57e2f
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index de0e494..d45922e 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -45,13 +45,10 @@
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
using ::test_helper::filter;
-using ::test_helper::Float32Operands;
using ::test_helper::for_all;
using ::test_helper::for_each;
-using ::test_helper::Int32Operands;
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
-using ::test_helper::Quant8Operands;
using ::test_helper::resize_accordingly;
template <typename T>
@@ -67,12 +64,13 @@
void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
copy_back_(&dst->float32Operands, ra, src);
copy_back_(&dst->int32Operands, ra, src);
- copy_back_(&dst->quant8Operands, ra, src);
- copy_back_(&dst->quant16Operands, ra, src);
+ copy_back_(&dst->quant8AsymmOperands, ra, src);
+ copy_back_(&dst->quant16SymmOperands, ra, src);
copy_back_(&dst->float16Operands, ra, src);
copy_back_(&dst->bool8Operands, ra, src);
copy_back_(&dst->quant8ChannelOperands, ra, src);
- static_assert(7 == MixedTyped::kNumTypes,
+ copy_back_(&dst->quant16AsymmOperands, ra, src);
+ static_assert(8 == MixedTyped::kNumTypes,
"Number of types in MixedTyped changed, but copy_back function wasn't updated");
}
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index b072793..c164d72 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -76,6 +76,18 @@
* where C is an index in the Channel dimension.
*/
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
+ /**
+ * A tensor of 16 bit unsigned integers that represent real numbers.
+ *
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 16 bit integer to the real value and vice versa. These two numbers are:
+ * - scale: a 32 bit floating point value greater than zero.
+ * - zeroPoint: a 32 bit integer, in range [0, 65535].
+ *
+ * The formula is:
+ * real_value = (integer_value - zeroPoint) * scale.
+ */
+ TENSOR_QUANT16_ASYMM = 12,
/* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
* OperandTypeRange::OPERAND_FUNDAMENTAL_MAX.
*/
@@ -89,7 +101,7 @@
*/
enum OperandTypeRange : uint32_t {
OPERAND_FUNDAMENTAL_MIN = 0,
- OPERAND_FUNDAMENTAL_MAX = 11,
+ OPERAND_FUNDAMENTAL_MAX = 12,
OPERAND_OEM_MIN = 10000,
OPERAND_OEM_MAX = 10001,
};
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 1f9c99d..5e661fb 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,6 +161,7 @@
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
return 0;
@@ -199,6 +200,7 @@
case OperandType::TENSOR_INT32:
return -1.0f;
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
return 0.0f;
default:
@@ -233,6 +235,8 @@
return {1};
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ return {-1, 65536};
case OperandType::TENSOR_QUANT16_SYMM:
return {-32769, -1, 1, 32768};
default:
@@ -288,6 +292,7 @@
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});