Fix VTS tests.

* Adds a specification of invalid scale and zero point for TENSOR_BOOL8.
This fixes vts failures for comparison ops.
* Removes (FUNDAMENTAL_MIN - 1) from invalid OperationTypes.
FUNDAMENTAL_MIN is equal to 0 and resulting -1 was statically casted to
uint32_t and passed 4294967295 as an invalid OperationType. However, our
validateOperation function interpreted this ID as an extension ID and
didn't fail.
* Adds mutateOperationOperandTypeSkip for QUANTIZE and DEQUANTIZE.
* Adds removeOperandSkip for BIDIRECTIONAL_SEQUENCE_RNN.

Fix: 121130841
Fix: 123247345
Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Change-Id: Iefb502c6b9301d5470eb4cdaa46d398f1a0e512a
Merged-In: Iefb502c6b9301d5470eb4cdaa46d398f1a0e512a
(cherry picked from commit 923b8c5842ee07caae4ec3d78ef1421a8e0c3a29)
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 590116e..7f4d385 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -157,6 +157,7 @@
         case OperandType::UINT32:
         case OperandType::BOOL:
             return 1;
+        case OperandType::TENSOR_BOOL8:
         case OperandType::TENSOR_FLOAT16:
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
@@ -194,6 +195,7 @@
         case OperandType::INT32:
         case OperandType::UINT32:
         case OperandType::BOOL:
+        case OperandType::TENSOR_BOOL8:
         case OperandType::TENSOR_FLOAT16:
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
@@ -230,6 +232,7 @@
         case OperandType::INT32:
         case OperandType::UINT32:
         case OperandType::BOOL:
+        case OperandType::TENSOR_BOOL8:
         case OperandType::TENSOR_FLOAT16:
         case OperandType::TENSOR_FLOAT32:
         case OperandType::TENSOR_INT32:
@@ -283,6 +286,7 @@
             newOperand.scale = 0.0f;
             newOperand.zeroPoint = 0;
             break;
+        case OperandType::TENSOR_BOOL8:
         case OperandType::TENSOR_FLOAT16:
         case OperandType::TENSOR_FLOAT32:
             newOperand.dimensions =
@@ -339,6 +343,10 @@
         // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
         // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
         // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
+        // - DEQUANTIZE input can be any of
+        // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can
+        // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32.
+        // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32
         // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
         // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
         // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
@@ -357,8 +365,22 @@
                     return true;
                 }
             } break;
+            case OperationType::QUANTIZE:
             case OperationType::RANDOM_MULTINOMIAL: {
-                if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32) {
+                if (operand == operation.inputs[0] &&
+                    (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
+                    return true;
+                }
+            } break;
+            case OperationType::DEQUANTIZE: {
+                if (operand == operation.inputs[0] &&
+                    (type == OperandType::TENSOR_QUANT8_ASYMM ||
+                     type == OperandType::TENSOR_QUANT8_SYMM ||
+                     type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
+                    return true;
+                }
+                if (operand == operation.outputs[0] &&
+                    (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
                     return true;
                 }
             } break;
@@ -397,7 +419,6 @@
 ///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
 
 static const uint32_t invalidOperationTypes[] = {
-        static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MIN) - 1,
         static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MAX) + 1,
         static_cast<uint32_t>(OperationTypeRange::OEM_MIN) - 1,
         static_cast<uint32_t>(OperationTypeRange::OEM_MAX) + 1,
@@ -484,6 +505,15 @@
                 }
             }
         }
+        // BIDIRECTIONAL_SEQUENCE_RNN can have either on or two outputs
+        // depending on a mergeOutputs parameter
+        if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) {
+            for (const size_t outOprand : operation.outputs) {
+                if (operand == outOprand) {
+                    return true;
+                }
+            }
+        }
     }
     return false;
 }