Change NNAPI VTS to use TEST_P to iterate across all service instances
This CL removes a dependency on the VTS test runner by dynamically
discovering all NN HAL service instances in the gtest binary itself,
and runs through all service instances with parameterized tests.
This CL converts TEST_F cases to TEST_P cases, where the test parameter
is the name of the service instance. For existing TEST_P cases (such as
the generated test cases), the service instance name is made to be the
first test parameter.
This CL enables the NN VTS tests to be more portable, e.g., they can
run directly as a presubmit test.
Fixes: 124540002
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest (with sample-all)
Test: cd $ANDROID_BUILD_TOP/hardware/interfaces/neuralnetworks && atest
Change-Id: I1e301d7c9f9342bb8f35a267bef180f510944b19
Merged-In: I1e301d7c9f9342bb8f35a267bef180f510944b19
(cherry picked from commit 7076f629b75aa24e9759d1efea874662744dd4bf)
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index bb46e06..2130a76 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
+#include <fcntl.h>
#include <ftw.h>
#include <gtest/gtest.h>
#include <hidlmemory/mapping.h>
@@ -37,11 +38,11 @@
// Forward declaration of the mobilenet generated test models in
// frameworks/ml/nn/runtime/test/generated/.
namespace generated_tests::mobilenet_224_gender_basic_fixed {
-const ::test_helper::TestModel& get_test_model();
+const test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_224_gender_basic_fixed
namespace generated_tests::mobilenet_quantized {
-const ::test_helper::TestModel& get_test_model();
+const test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_quantized
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
@@ -53,13 +54,13 @@
namespace float32_model {
-constexpr auto get_test_model = ::generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
+constexpr auto get_test_model = generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
} // namespace float32_model
namespace quant8_model {
-constexpr auto get_test_model = ::generated_tests::mobilenet_quantized::get_test_model;
+constexpr auto get_test_model = generated_tests::mobilenet_quantized::get_test_model;
} // namespace quant8_model
@@ -217,12 +218,13 @@
} // namespace
// Tag for the compilation caching tests.
-class CompilationCachingTestBase : public NeuralnetworksHidlTest {
+class CompilationCachingTestBase : public testing::Test {
protected:
- CompilationCachingTestBase(OperandType type) : kOperandType(type) {}
+ CompilationCachingTestBase(sp<IDevice> device, OperandType type)
+ : kDevice(std::move(device)), kOperandType(type) {}
void SetUp() override {
- NeuralnetworksHidlTest::SetUp();
+ testing::Test::SetUp();
ASSERT_NE(kDevice.get(), nullptr);
// Create cache directory. The cache directory and a temporary cache file is always created
@@ -274,7 +276,7 @@
};
nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
}
- NeuralnetworksHidlTest::TearDown();
+ testing::Test::TearDown();
}
// Model and examples creators. According to kOperandType, the following methods will return
@@ -398,16 +400,21 @@
uint32_t mNumDataCache;
uint32_t mIsCachingSupported;
+ const sp<IDevice> kDevice;
// The primary data type of the testModel.
const OperandType kOperandType;
};
+using CompilationCachingTestParam = std::tuple<NamedDevice, OperandType>;
+
// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
// pass running with float32 models and the second pass running with quant8 models.
class CompilationCachingTest : public CompilationCachingTestBase,
- public testing::WithParamInterface<OperandType> {
+ public testing::WithParamInterface<CompilationCachingTestParam> {
protected:
- CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
+ CompilationCachingTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
};
TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
@@ -1192,16 +1199,30 @@
}
}
+static const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
static const auto kOperandTypeChoices =
testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
-INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
+std::string printCompilationCachingTest(
+ const testing::TestParamInfo<CompilationCachingTestParam>& info) {
+ const auto& [namedDevice, operandType] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type);
+}
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest,
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices),
+ printCompilationCachingTest);
+
+using CompilationCachingSecurityTestParam = std::tuple<NamedDevice, OperandType, uint32_t>;
class CompilationCachingSecurityTest
: public CompilationCachingTestBase,
- public testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
+ public testing::WithParamInterface<CompilationCachingSecurityTestParam> {
protected:
- CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
+ CompilationCachingSecurityTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
void SetUp() {
CompilationCachingTestBase::SetUp();
@@ -1291,7 +1312,7 @@
}
}
- const uint32_t kSeed = std::get<1>(GetParam());
+ const uint32_t kSeed = std::get<uint32_t>(GetParam());
std::mt19937 generator;
};
@@ -1338,7 +1359,16 @@
});
}
+std::string printCompilationCachingSecurityTest(
+ const testing::TestParamInfo<CompilationCachingSecurityTestParam>& info) {
+ const auto& [namedDevice, operandType, seed] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + std::to_string(seed));
+}
+
INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
- testing::Combine(kOperandTypeChoices, testing::Range(0U, 10U)));
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices,
+ testing::Range(0U, 10U)),
+ printCompilationCachingSecurityTest);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional