Replace TENSOR_QUANT16_ASYMM with TENSOR_QUANT16_SYMM

* Update doc string
* Update zero point mutation to check for symmetric quantization

Fix: 118671831
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: Id1999c793c839b892cfe45cbb245611b12db2a72
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index e309642..c4f1b5e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,7 +161,7 @@
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
         case OperandType::TENSOR_QUANT8_ASYMM:
-        case OperandType::TENSOR_QUANT16_ASYMM:
+        case OperandType::TENSOR_QUANT16_SYMM:
             return 0;
         default:
             return 0;
@@ -193,7 +193,7 @@
         case OperandType::TENSOR_INT32:
             return -1.0f;
         case OperandType::TENSOR_QUANT8_ASYMM:
-        case OperandType::TENSOR_QUANT16_ASYMM:
+        case OperandType::TENSOR_QUANT16_SYMM:
             return 0.0f;
         default:
             return 0.0f;
@@ -225,8 +225,8 @@
             return {1};
         case OperandType::TENSOR_QUANT8_ASYMM:
             return {-1, 256};
-        case OperandType::TENSOR_QUANT16_ASYMM:
-            return {-32769, 32768};
+        case OperandType::TENSOR_QUANT16_SYMM:
+            return {-32769, -1, 1, 32768};
         default:
             return {};
     }
@@ -279,7 +279,7 @@
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
-        case OperandType::TENSOR_QUANT16_ASYMM:
+        case OperandType::TENSOR_QUANT16_SYMM:
             newOperand.dimensions =
                 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;