Replace TENSOR_QUANT16_ASYMM with TENSOR_QUANT16_SYMM
* Update doc string
* Update zero point mutation to check for symmetric quantization
Fix: 118671831
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: Id1999c793c839b892cfe45cbb245611b12db2a72
Merged-In: Id1999c793c839b892cfe45cbb245611b12db2a72
(cherry picked from commit 48c8820bac243e7eaff680056f2145f1b3b1ab2a)
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 366e626..fe9b312 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -33,15 +33,13 @@
/**
* A tensor of 16 bit signed integers that represent real numbers.
*
- * Attached to this tensor are two numbers that are used to convert the 16
- * bit integer to the real value and vice versa. These two numbers are:
- * - scale: a 32 bit floating point value greater than zero.
- * - zeroPoint: a 32 bit integer, in range [-32768, 32767].
+ * Attached to this tensor is a number representing real value scale that is
+ * used to convert the 16 bit number to a real value in the following way:
+ * realValue = integerValue * scale.
*
- * The formula is:
- * realValue = (integerValue - zeroPoint) * scale.
+ * scale is a 32 bit floating point with value greater then zero.
*/
- TENSOR_QUANT16_ASYMM = 7,
+ TENSOR_QUANT16_SYMM = 7,
/** A tensor of 16 bit floating point values. */
TENSOR_FLOAT16 = 8,
};
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index e309642..c4f1b5e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,7 +161,7 @@
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
return 0;
default:
return 0;
@@ -193,7 +193,7 @@
case OperandType::TENSOR_INT32:
return -1.0f;
case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
return 0.0f;
default:
return 0.0f;
@@ -225,8 +225,8 @@
return {1};
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
- case OperandType::TENSOR_QUANT16_ASYMM:
- return {-32769, 32768};
+ case OperandType::TENSOR_QUANT16_SYMM:
+ return {-32769, -1, 1, 32768};
default:
return {};
}
@@ -279,7 +279,7 @@
newOperand.zeroPoint = 0;
break;
case OperandType::TENSOR_QUANT8_ASYMM:
- case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;