Adds TENSOR_FLOAT16 operand type.
Bug: 113563458
Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Change-Id: If12ceff428e1b1a90ef99b7353f0df60d4ef8010
Merged-In: If12ceff428e1b1a90ef99b7353f0df60d4ef8010
(cherry picked from commit 19d63453d4c2723c3fad4ce7f852f548d761278e)
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 0aa7cc2..4a1e7a8 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -42,6 +42,8 @@
* realValue = (integerValue - zeroPoint) * scale.
*/
TENSOR_QUANT16_ASYMM = 7,
+ /** A tensor of 16 bit floating point values. */
+ TENSOR_FLOAT16 = 8,
};
/**
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 9af6258..b840199 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -157,6 +157,7 @@
case OperandType::UINT32:
case OperandType::BOOL:
return 1;
+ case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
@@ -186,6 +187,7 @@
case OperandType::INT32:
case OperandType::UINT32:
case OperandType::BOOL:
+ case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
return 1.0f;
case OperandType::TENSOR_INT32:
@@ -217,6 +219,7 @@
case OperandType::INT32:
case OperandType::UINT32:
case OperandType::BOOL:
+ case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
return {1};
@@ -262,6 +265,7 @@
newOperand.scale = 0.0f;
newOperand.zeroPoint = 0;
break;
+ case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_FLOAT32:
newOperand.dimensions =
operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});