Update VTS tests with the new test harness.
Bug: 120601396
Test: All VTS
Change-Id: I539e75585b2cc01d153565814491361adfa048be
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 0fd9947..5f96539 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -20,6 +20,7 @@
#include "1.0/Utils.h"
#include "MemoryUtils.h"
#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
@@ -36,7 +37,8 @@
namespace hardware {
namespace neuralnetworks {
namespace V1_0 {
-namespace generated_tests {
+namespace vts {
+namespace functional {
using namespace test_helper;
using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
@@ -151,48 +153,61 @@
checkResults(testModel, outputs);
}
-void Execute(const sp<IDevice>& device, const TestModel& testModel) {
- Model model = createModel(testModel);
+// Tag for the generated tests
+class GeneratedTest : public GeneratedTestBase {
+ protected:
+ void Execute(const TestModel& testModel) {
+ Model model = createModel(testModel);
- // see if service can handle model
- bool fullySupportsModel = false;
- Return<void> supportedCall = device->getSupportedOperations(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel = std::all_of(supported.begin(), supported.end(),
- [](bool valid) { return valid; });
- });
- ASSERT_TRUE(supportedCall.isOk());
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = device->getSupportedOperations(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
- // launch prepare model
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModel(model, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
- // retrieve prepared model
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
- // early termination if vendor service cannot fully prepare model
- if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel.get());
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "prepare model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "prepare model that it does not support."
- << std::endl;
- GTEST_SKIP();
+ // early termination if vendor service cannot fully prepare model
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel.get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel.get());
+
+ EvaluatePreparedModel(preparedModel, testModel);
}
- EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel.get());
+};
- EvaluatePreparedModel(preparedModel, testModel);
+TEST_P(GeneratedTest, Test) {
+ Execute(*mTestModel);
}
-} // namespace generated_tests
+INSTANTIATE_GENERATED_TEST(GeneratedTest,
+ [](const TestModel& testModel) { return !testModel.expectFailure; });
+
+} // namespace functional
+} // namespace vts
} // namespace V1_0
} // namespace neuralnetworks
} // namespace hardware