Add new OperandType TENSOR_QUANT16_ASYMM.

Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I8fcd6b30c32f8fbc181d2b43f9ac0b94fdc57e2f
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index b072793..c164d72 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -76,6 +76,18 @@
      * where C is an index in the Channel dimension.
      */
     TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
+    /**
+     * A tensor of 16 bit unsigned integers that represent real numbers.
+     *
+     * Attached to this tensor are two numbers that can be used to convert the
+     * 16 bit integer to the real value and vice versa. These two numbers are:
+     * - scale: a 32 bit floating point value greater than zero.
+     * - zeroPoint: a 32 bit integer, in range [0, 65535].
+     *
+     * The formula is:
+     * real_value = (integer_value - zeroPoint) * scale.
+     */
+    TENSOR_QUANT16_ASYMM = 12,
     /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
      * OperandTypeRange::OPERAND_FUNDAMENTAL_MAX.
      */
@@ -89,7 +101,7 @@
  */
 enum OperandTypeRange : uint32_t {
     OPERAND_FUNDAMENTAL_MIN = 0,
-    OPERAND_FUNDAMENTAL_MAX = 11,
+    OPERAND_FUNDAMENTAL_MAX = 12,
     OPERAND_OEM_MIN     = 10000,
     OPERAND_OEM_MAX     = 10001,
 };
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 1f9c99d..5e661fb 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -161,6 +161,7 @@
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
             return 0;
@@ -199,6 +200,7 @@
         case OperandType::TENSOR_INT32:
             return -1.0f;
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
             return 0.0f;
         default:
@@ -233,6 +235,8 @@
             return {1};
         case OperandType::TENSOR_QUANT8_ASYMM:
             return {-1, 256};
+        case OperandType::TENSOR_QUANT16_ASYMM:
+            return {-1, 65536};
         case OperandType::TENSOR_QUANT16_SYMM:
             return {-32769, -1, 1, 32768};
         default:
@@ -288,6 +292,7 @@
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
             newOperand.dimensions =
                 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});