Added TENSOR_QUANT8_SYMM type.

Test: none
Change-Id: I02fc8698b3f80e1ae2a318e5cde593c6d7222bac
Merged-In: I02fc8698b3f80e1ae2a318e5cde593c6d7222bac
(cherry picked from commit bae91697b5397693a10f225de3d39102152e4f18)
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 9e7d8f0..2e48ba0 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -47,7 +47,7 @@
      * used to convert the 16 bit number to a real value in the following way:
      * realValue = integerValue * scale.
      *
-     * scale is a 32 bit floating point with value greater then zero.
+     * scale is a 32 bit floating point with value greater than zero.
      */
     TENSOR_QUANT16_SYMM = 7,
     /** A tensor of IEEE 754 16 bit floating point values. */
@@ -97,6 +97,16 @@
      * real_value = (integer_value - zeroPoint) * scale.
      */
     TENSOR_QUANT16_ASYMM = 12,
+    /**
+     * A tensor of 8 bit signed integers that represent real numbers.
+     *
+     * Attached to this tensor is a number representing real value scale that is
+     * used to convert the 8 bit number to a real value in the following way:
+     * realValue = integerValue * scale.
+     *
+     * scale is a 32 bit floating point with value greater than zero.
+     */
+    TENSOR_QUANT8_SYMM = 13,
     /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
      * OperandTypeRange::FUNDAMENTAL_MAX.
      */
@@ -111,7 +121,7 @@
 enum OperandTypeRange : uint32_t {
     BASE_MIN        = 0,
     FUNDAMENTAL_MIN = 0,
-    FUNDAMENTAL_MAX = 12,
+    FUNDAMENTAL_MAX = 13,
     OEM_MIN         = 10000,
     OEM_MAX         = 10001,
     BASE_MAX        = 0xFFFF,
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 1bbb203..590116e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,6 +161,7 @@
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT8_SYMM:
         case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
@@ -199,6 +200,7 @@
             return 1.0f;
         case OperandType::TENSOR_INT32:
             return -1.0f;
+        case OperandType::TENSOR_QUANT8_SYMM:
         case OperandType::TENSOR_QUANT8_ASYMM:
         case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
@@ -235,6 +237,8 @@
             return {1};
         case OperandType::TENSOR_QUANT8_ASYMM:
             return {-1, 256};
+        case OperandType::TENSOR_QUANT8_SYMM:
+            return {-129, -1, 1, 128};
         case OperandType::TENSOR_QUANT16_ASYMM:
             return {-1, 65536};
         case OperandType::TENSOR_QUANT16_SYMM:
@@ -292,6 +296,7 @@
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT8_SYMM:
         case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
             newOperand.dimensions =