Modify 1.0 VTS tests to consume test struct directly.
Implement converter utilities constructing HIDL model and request from
TestModel.
Bug: 123092187
Bug: 138718240
Test: All VTS
Change-Id: I0b26b7f41d31d5e63ed083ab5f6f269a3620f034
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 40d2f4c..0fd9947 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -15,6 +15,7 @@
*/
#include "GeneratedTestHarness.h"
+
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "MemoryUtils.h"
@@ -28,6 +29,7 @@
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include <gtest/gtest.h>
#include <iostream>
namespace android {
@@ -36,6 +38,7 @@
namespace V1_0 {
namespace generated_tests {
+using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
using ::android::hardware::neuralnetworks::V1_0::IDevice;
using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
@@ -45,137 +48,111 @@
using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
-using ::test_helper::compare;
-using ::test_helper::filter;
-using ::test_helper::for_all;
-using ::test_helper::MixedTyped;
-using ::test_helper::MixedTypedExample;
-using ::test_helper::resize_accordingly;
+
+Model createModel(const TestModel& testModel) {
+ // Model operands.
+ hidl_vec<Operand> operands(testModel.operands.size());
+ size_t constCopySize = 0, constRefSize = 0;
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+
+ DataLocation loc = {};
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constCopySize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constCopySize += op.data.alignedSize();
+ } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constRefSize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constRefSize += op.data.alignedSize();
+ }
+
+ operands[i] = {.type = static_cast<OperandType>(op.type),
+ .dimensions = op.dimensions,
+ .numberOfConsumers = op.numberOfConsumers,
+ .scale = op.scale,
+ .zeroPoint = op.zeroPoint,
+ .lifetime = static_cast<OperandLifeTime>(op.lifetime),
+ .location = loc};
+ }
+
+ // Model operations.
+ hidl_vec<Operation> operations(testModel.operations.size());
+ std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
+ [](const TestOperation& op) -> Operation {
+ return {.type = static_cast<OperationType>(op.type),
+ .inputs = op.inputs,
+ .outputs = op.outputs};
+ });
+
+ // Constant copies.
+ hidl_vec<uint8_t> operandValues(constCopySize);
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, operandValues.data() + operands[i].location.offset);
+ }
+ }
+
+ // Shared memory.
+ hidl_vec<hidl_memory> pools;
+ if (constRefSize > 0) {
+ hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
+ CHECK_NE(pools[0].size(), 0u);
+
+ // load data
+ sp<IMemory> mappedMemory = mapMemory(pools[0]);
+ CHECK(mappedMemory.get() != nullptr);
+ uint8_t* mappedPtr =
+ reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
+ CHECK(mappedPtr != nullptr);
+
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, mappedPtr + operands[i].location.offset);
+ }
+ }
+ }
+
+ return {.operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = testModel.inputIndexes,
+ .outputIndexes = testModel.outputIndexes,
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools)};
+}
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
- const std::vector<MixedTypedExample>& examples, float fpAtol,
- float fpRtol) {
- const uint32_t INPUT = 0;
- const uint32_t OUTPUT = 1;
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
+ const Request request = createRequest(testModel);
- int example_no = 1;
- for (auto& example : examples) {
- SCOPED_TRACE(example_no++);
- const MixedTyped& inputs = example.operands.first;
- const MixedTyped& golden = example.operands.second;
+ // Launch execution.
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
- CHECK(inputs.float16Operands.empty()) << "float16 is not supported in 1.0";
+ // Retrieve execution status.
+ executionCallback->wait();
+ ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
- std::vector<RequestArgument> inputs_info, outputs_info;
- uint32_t inputSize = 0, outputSize = 0;
- // This function only partially specifies the metadata (vector of RequestArguments).
- // The contents are copied over below.
- for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
- if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = INPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- RequestArgument arg_empty = {
- .hasNoValue = true,
- };
- inputs_info[index] = s ? arg : arg_empty;
- inputSize += s;
- });
- // Compute offset for inputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : inputs_info) {
- if (!i.hasNoValue) i.location.offset = offset;
- offset += i.location.length;
- }
- }
+ // Retrieve execution results.
+ const std::vector<TestBuffer> outputs = getOutputBuffers(request);
- MixedTyped test; // holding test results
-
- // Go through all outputs, initialize RequestArgument descriptors
- resize_accordingly(golden, test);
- for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
- if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = OUTPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- outputs_info[index] = arg;
- outputSize += s;
- });
- // Compute offset for outputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : outputs_info) {
- i.location.offset = offset;
- offset += i.location.length;
- }
- }
- std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- ASSERT_NE(0ull, pools[INPUT].size());
- ASSERT_NE(0ull, pools[OUTPUT].size());
-
- // load data
- sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
- sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
- ASSERT_NE(nullptr, inputMemory.get());
- ASSERT_NE(nullptr, outputMemory.get());
- char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
- char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
- ASSERT_NE(nullptr, inputPtr);
- ASSERT_NE(nullptr, outputPtr);
- inputMemory->update();
- outputMemory->update();
-
- // Go through all inputs, copy the values
- for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
- char* begin = (char*)p;
- char* end = begin + s;
- // TODO: handle more than one input
- std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
- });
-
- inputMemory->commit();
- outputMemory->commit();
-
- const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
-
- // launch execution
- sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- ASSERT_NE(nullptr, executionCallback.get());
- Return<ErrorStatus> executionLaunchStatus =
- preparedModel->execute(request, executionCallback);
- ASSERT_TRUE(executionLaunchStatus.isOk());
- EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
-
- // retrieve execution status
- executionCallback->wait();
- ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
-
- // validate results
- outputMemory->read();
- copy_back(&test, outputs_info, outputPtr);
- outputMemory->commit();
- // Filter out don't cares
- MixedTyped filtered_golden = filter(golden, is_ignored);
- MixedTyped filtered_test = filter(test, is_ignored);
-
- // We want "close-enough" results for float
- compare(filtered_golden, filtered_test, fpAtol, fpRtol);
- }
+ // We want "close-enough" results.
+ checkResults(testModel, outputs);
}
-void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
- Model model = create_model();
+void Execute(const sp<IDevice>& device, const TestModel& testModel) {
+ Model model = createModel(testModel);
// see if service can handle model
bool fullySupportsModel = false;
@@ -190,7 +167,6 @@
// launch prepare model
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -213,8 +189,7 @@
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
ASSERT_NE(nullptr, preparedModel.get());
- float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
- EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol);
+ EvaluatePreparedModel(preparedModel, testModel);
}
} // namespace generated_tests