Adds float16 support to generated tests.

Uses the _Float16 type (ISO/IEC TS 18661-3:2015) for storage.

Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Bug: 113563458
Change-Id: I1779b828d397b5354dc854c68d21c159cd5b582c
Merged-In: I1779b828d397b5354dc854c68d21c159cd5b582c
(cherry picked from commit efa4c814fb463a07d561393e4ba78a9a47e266e4)
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 1f66c43..802d018 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -45,6 +45,7 @@
 using ::test_helper::Int32Operands;
 using ::test_helper::MixedTyped;
 using ::test_helper::MixedTypedExample;
+using ::test_helper::MixedTypedIndex;
 using ::test_helper::Quant8Operands;
 using ::test_helper::resize_accordingly;
 
@@ -63,14 +64,16 @@
     copy_back_<int32_t>(dst, ra, src);
     copy_back_<uint8_t>(dst, ra, src);
     copy_back_<int16_t>(dst, ra, src);
-    static_assert(4 == std::tuple_size<MixedTyped>::value,
+    copy_back_<_Float16>(dst, ra, src);
+    static_assert(5 == std::tuple_size<MixedTyped>::value,
                   "Number of types in MixedTyped changed, but copy_back function wasn't updated");
 }
 
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
 void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
-                           const std::vector<MixedTypedExample>& examples, float fpAtol = 1e-5f,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
                            float fpRtol = 1e-5f) {
     const uint32_t INPUT = 0;
     const uint32_t OUTPUT = 1;
@@ -78,13 +81,20 @@
     int example_no = 1;
     for (auto& example : examples) {
         SCOPED_TRACE(example_no++);
-
         const MixedTyped& inputs = example.operands.first;
         const MixedTyped& golden = example.operands.second;
 
+        const bool hasFloat16Inputs = !std::get<MixedTypedIndex<_Float16>::index>(inputs).empty();
+        if (hasRelaxedFloat32Model || hasFloat16Inputs) {
+            // TODO: Adjust the error limit based on testing.
+            // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+            fpAtol = 5.0f * 0.0009765625f;
+            // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+            fpRtol = 5.0f * 0.0009765625f;
+        }
+
         std::vector<RequestArgument> inputs_info, outputs_info;
         uint32_t inputSize = 0, outputSize = 0;
-
         // This function only partially specifies the metadata (vector of RequestArguments).
         // The contents are copied over below.
         for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
@@ -228,7 +238,8 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol);
 }
 
 void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -272,13 +283,8 @@
     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel.get());
 
-    // TODO: Adjust the error limit based on testing.
-    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
-    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
-    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
-    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
-                                                           : 5.0f * 0.0009765625f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16);
 }
 
 // TODO: Reduce code duplication.
@@ -323,13 +329,8 @@
     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel.get());
 
-    // TODO: Adjust the error limit based on testing.
-    // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
-    float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
-    // Set the relative tolerance to be 5ULP of the corresponding FP precision.
-    float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f
-                                                           : 5.0f * 0.0009765625f;
-    EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16);
 }
 
 }  // namespace generated_tests