Add TENSOR_QUANT16_ASYMM to operand types

Add new OperandType::TENSOR_QUANT16_ASYMM.
Add VTS validation for the new type.

Bug: 113561892
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I4f9ed6a33d5d3ec227e9f335df71954c73edf344
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 5a8b8c5..9af6258 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -129,10 +129,10 @@
 ///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
 
 static const int32_t invalidOperandTypes[] = {
-    static_cast<int32_t>(OperandType::FLOAT32) - 1,          // lower bound fundamental
-    static_cast<int32_t>(OperandType::BOOL) + 1,             // upper bound fundamental
-    static_cast<int32_t>(OperandType::OEM) - 1,              // lower bound OEM
-    static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,  // upper bound OEM
+    static_cast<int32_t>(OperandType::FLOAT32) - 1,               // lower bound fundamental
+    static_cast<int32_t>(OperandType::TENSOR_QUANT16_ASYMM) + 1,  // upper bound fundamental
+    static_cast<int32_t>(OperandType::OEM) - 1,                   // lower bound OEM
+    static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,       // upper bound OEM
 };
 
 static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
@@ -160,6 +160,7 @@
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
             return 0;
         default:
             return 0;
@@ -190,6 +191,7 @@
         case OperandType::TENSOR_INT32:
             return -1.0f;
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
             return 0.0f;
         default:
             return 0.0f;
@@ -219,6 +221,7 @@
         case OperandType::TENSOR_INT32:
             return {1};
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
             return {-1, 256};
         default:
             return {};
@@ -271,6 +274,7 @@
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
+        case OperandType::TENSOR_QUANT16_ASYMM:
             newOperand.dimensions =
                 operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;