Add TENSOR_QUANT8_SYMM_PER_CHANNEL to operand types
Added ExtraParams union for extra Operand parameters.
It's a more sensible approach than adding more fields
to the Operand struct.
Bug: 119249581
Test: NeuralNetworksTest_static
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Change-Id: I59731134cf0ea34cf9e10342686d331da9e9c3b3
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index ac23f41..aa23fec 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -163,6 +163,7 @@
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
return 0;
default:
return 0;
@@ -191,6 +192,7 @@
case OperandType::BOOL:
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
return 1.0f;
case OperandType::TENSOR_INT32:
return -1.0f;
@@ -225,6 +227,7 @@
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
return {1};
case OperandType::TENSOR_QUANT8_ASYMM:
return {-1, 256};
@@ -288,6 +291,21 @@
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
break;
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
+ newOperand.dimensions =
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ newOperand.scale = 0.0f;
+ newOperand.zeroPoint = 0;
+
+ SymmPerChannelQuantParams channelQuant;
+ channelQuant.channelDim = 0;
+ channelQuant.scales = hidl_vec<float>(
+ operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) : 0);
+ for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
+ channelQuant.scales[i] = 1.0f;
+ }
+ newOperand.extraParams.channelQuant(std::move(channelQuant));
+ } break;
case OperandType::OEM:
case OperandType::TENSOR_OEM_BYTE:
default: