Modify 1.2 VTS tests to consume test struct directly.
Comparing with v1.1, the converter for 1.2 HIDL model has additional support
for extraParam, dynamic output shape, and zero-sized output.
Modify CompilationCachingTests to use the new test struct.
Bug: 123092187
Bug: 138718240
Test: All VTS
Change-Id: I54ac97f62898e47a338b51cc6d895a0309ab001f
Merged-In: I54ac97f62898e47a338b51cc6d895a0309ab001f
(cherry picked from commit 491b0a89133b8519a68b5999cf3b227c750f6deb)
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index cf5905f..13d45e4 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -16,14 +16,9 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
-#include <android-base/logging.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidlmemory/mapping.h>
-
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
-#include "MemoryUtils.h"
#include "TestHarness.h"
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
@@ -35,12 +30,7 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hidl::memory::V1_0::IMemory;
-using test_helper::for_all;
-using test_helper::MixedTyped;
-using test_helper::MixedTypedExample;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -161,119 +151,23 @@
///////////////////////////// ENTRY POINT //////////////////////////////////
-std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples) {
- const uint32_t INPUT = 0;
- const uint32_t OUTPUT = 1;
-
- std::vector<Request> requests;
-
- for (auto& example : examples) {
- const MixedTyped& inputs = example.operands.first;
- const MixedTyped& outputs = example.operands.second;
-
- std::vector<RequestArgument> inputs_info, outputs_info;
- uint32_t inputSize = 0, outputSize = 0;
-
- // This function only partially specifies the metadata (vector of RequestArguments).
- // The contents are copied over below.
- for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
- if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = INPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- RequestArgument arg_empty = {
- .hasNoValue = true,
- };
- inputs_info[index] = s ? arg : arg_empty;
- inputSize += s;
- });
- // Compute offset for inputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : inputs_info) {
- if (!i.hasNoValue) i.location.offset = offset;
- offset += i.location.length;
- }
- }
-
- // Go through all outputs, initialize RequestArgument descriptors
- for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
- if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
- RequestArgument arg = {
- .location = {.poolIndex = OUTPUT,
- .offset = 0,
- .length = static_cast<uint32_t>(s)},
- .dimensions = {},
- };
- outputs_info[index] = arg;
- outputSize += s;
- });
- // Compute offset for outputs 1 and so on
- {
- size_t offset = 0;
- for (auto& i : outputs_info) {
- i.location.offset = offset;
- offset += i.location.length;
- }
- }
- std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) {
- return {};
- }
-
- // map pool
- sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
- if (inputMemory == nullptr) {
- return {};
- }
- char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
- if (inputPtr == nullptr) {
- return {};
- }
-
- // initialize pool
- inputMemory->update();
- for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
- char* begin = (char*)p;
- char* end = begin + s;
- // TODO: handle more than one input
- std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
- });
- inputMemory->commit();
-
- requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools});
- }
-
- return requests;
-}
-
-void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
- // validate each request
- for (const Request& request : requests) {
- removeInputTest(preparedModel, request);
- removeOutputTest(preparedModel, request);
- }
+void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
+ const Request& request) {
+ removeInputTest(preparedModel, request);
+ removeOutputTest(preparedModel, request);
}
void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
- const std::vector<Request>& requests) {
- for (const Request& request : requests) {
- SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
- Return<void> executeStatus = preparedModel->executeSynchronously(
- request, MeasureTiming::NO,
- [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
- const Timing& timing) {
- ASSERT_NE(ErrorStatus::NONE, error);
- EXPECT_EQ(outputShapes.size(), 0);
- EXPECT_TRUE(badTiming(timing));
- });
- ASSERT_TRUE(executeStatus.isOk());
- }
+ const Request& request) {
+ SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
+ Return<void> executeStatus = preparedModel->executeSynchronously(
+ request, MeasureTiming::NO,
+ [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+ ASSERT_NE(ErrorStatus::NONE, error);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ });
+ ASSERT_TRUE(executeStatus.isOk());
}
} // namespace functional