Test harness for generated tests

Created initial test harness for test models and examples from
NNAPI test generator in VtsHalNeuralnetworksV1_0TargetTest. As
an example, also added a test generated from test spec at
frameworks/ml/nn/tools/test_generator/tests/P_vts_full/.

Generated model setup code and examples are from:
frameworks/ml/nn/runtime/test/generated/examples and
frameworks/ml/nn/runtime/test/generated/vts_models respectively.

Bug: 63905942
Bug: 63525563
Test: VtsHalNeuralnetworksV1_0TargetTest with sample driver enabled
      by cherry-pick

Change-Id: Ief029eed9718c8724ef0b64fc6a7f6b9a7bc7b7b
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..2f557f8
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Event.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworksV1_0TargetTest.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace vts {
+namespace functional {
+// allocator helper
+hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem");
+
+namespace generated_tests {
+using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
+using ::generated_tests::for_all;
+using ::generated_tests::for_each;
+using ::generated_tests::resize_accordingly;
+using ::generated_tests::MixedTyped;
+using ::generated_tests::MixedTypedExampleType;
+using ::generated_tests::Float32Operands;
+using ::generated_tests::Int32Operands;
+using ::generated_tests::Quant8Operands;
+// Top level driver for models and examples generated by test_generator.py
+// Test driver for those generated from ml/nn/runtime/test/spec
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+             const std::vector<MixedTypedExampleType>& examples) {
+    Model model = create_model();
+    sp<IPreparedModel> preparedModel;
+    sp<Event> preparationEvent = new Event();
+    ASSERT_NE(nullptr, preparationEvent.get());
+    Return<void> prepareRet = device->prepareModel(
+        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+            EXPECT_EQ(ErrorStatus::NONE, status);
+            preparedModel = prepared;
+        });
+    ASSERT_TRUE(prepareRet.isOk());
+    ASSERT_NE(nullptr, preparedModel.get());
+    Event::Status preparationStatus = preparationEvent->wait();
+    EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
+
+    const uint32_t INPUT = 0;
+    const uint32_t OUTPUT = 1;
+
+    int example_no = 1;
+    for (auto& example : examples) {
+        SCOPED_TRACE(example_no++);
+
+        const MixedTyped& inputs = example.first;
+        const MixedTyped& golden = example.second;
+
+        std::vector<RequestArgument> inputs_info, outputs_info;
+        uint32_t inputSize = 0, outputSize = 0;
+
+        // This function only partially specifies the metadata (vector of RequestArguments).
+        // The contents are copied over below.
+        for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
+            if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
+            RequestArgument arg = {
+                .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
+                .dimensions = {},
+            };
+            inputs_info[index] = arg;
+            inputSize += s;
+        });
+        // Compute offset for inputs 1 and so on
+        {
+            size_t offset = 0;
+            for (auto& i : inputs_info) {
+                i.location.offset = offset;
+                offset += i.location.length;
+            }
+        }
+
+        MixedTyped test;  // holding test results
+
+        // Go through all outputs, initialize RequestArgument descriptors
+        resize_accordingly<float>(golden, test);
+        resize_accordingly<int32_t>(golden, test);
+        resize_accordingly<uint8_t>(golden, test);
+        for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
+            if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+            RequestArgument arg = {
+                .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
+                .dimensions = {},
+            };
+            outputs_info[index] = arg;
+            outputSize += s;
+        });
+        // Compute offset for outputs 1 and so on
+        {
+            size_t offset = 0;
+            for (auto& i : outputs_info) {
+                i.location.offset = offset;
+                offset += i.location.length;
+            }
+        }
+        std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
+                                          allocateSharedMemory(outputSize)};
+        ASSERT_NE(0ull, pools[INPUT].size());
+        ASSERT_NE(0ull, pools[OUTPUT].size());
+
+        // load data
+        sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+        sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+        ASSERT_NE(nullptr, inputMemory.get());
+        ASSERT_NE(nullptr, outputMemory.get());
+        char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
+        char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
+        ASSERT_NE(nullptr, inputPtr);
+        ASSERT_NE(nullptr, outputPtr);
+        inputMemory->update();
+        outputMemory->update();
+
+        // Go through all inputs, copy the values
+        for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
+            char* begin = (char*)p;
+            char* end = begin + s;
+            // TODO: handle more than one input
+            std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
+        });
+
+        inputMemory->commit();
+        outputMemory->commit();
+        // execute request
+        sp<Event> executionEvent = new Event();
+        ASSERT_NE(nullptr, executionEvent.get());
+        Return<ErrorStatus> executeStatus = preparedModel->execute(
+            {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionEvent);
+        ASSERT_TRUE(executeStatus.isOk());
+        EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
+        Event::Status eventStatus = executionEvent->wait();
+        EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
+
+        // validate results
+        outputMemory->read();
+#define COPY_BACK(ty)                                                              \
+    for_each<ty>(test, [&outputs_info, outputPtr](int index, std::vector<ty>& m) { \
+        RequestArgument& i = outputs_info[index];                                  \
+        ASSERT_EQ(m.size(), i.location.length / sizeof(ty));                       \
+        char* begin = outputPtr + i.location.offset;                               \
+        memcpy(m.data(), begin, i.location.length);                                \
+    });
+        COPY_BACK(float);
+        COPY_BACK(int32_t);
+        COPY_BACK(uint8_t);
+#undef COPY_BACK
+        outputMemory->commit();
+        // We want "close-enough" results for float
+        for_each<float>(golden, [&test](int index, auto& golden_float) {
+            auto& test_float_operands = std::get<Float32Operands>(test);
+            auto& test_float = test_float_operands[index];
+            for (unsigned int i = 0; i < golden_float.size(); i++) {
+                SCOPED_TRACE(i);
+                EXPECT_FLOAT_EQ(golden_float[i], test_float[i]);
+            }
+        });
+
+        EXPECT_EQ(std::get<Int32Operands>(golden), std::get<Int32Operands>(test));
+        EXPECT_EQ(std::get<Quant8Operands>(golden), std::get<Quant8Operands>(test));
+    }
+}
+
+}  // namespace generated_tests
+
+}  // namespace functional
+}  // namespace vts
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android