Add VTS tests for NNAPI compilation caching.
Add the following tests for compilation caching:
- validation tests
- Test isCachingSupported
- Test prepareModelFromCache with invalid numFd and invalid access mode
- Test saveToCache with invalid numFd, invalid access mode,
invalid file size, and invalid fd offset
- execution test
- Save a mobilenet model to cache and then retrieve and run accuracy
evaluation.
- The same test but the file offsets for prepareModelFromCache is not at zero.
- security test
- CompilationCachingSecurityTest.CorruptedSecuritySensitiveCache
Randomly flip one bit of security-sensitive cache.
- CompilationCachingSecurityTest.WrongLengthSecuritySensitiveCache
Randomly append bytes to security-sensitive cache.
- CompilationCachingSecurityTest.WrongToken
Randomly flip one bit of cache token.
Bug: 119616526
Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver
Test: VtsHalNeuralnetworksV1_xTargetTest with a test driver that can
read and write cache entries
Change-Id: Iae9211cb28ce972b29572dfedd45d1ade4dfdaf5
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index c2ecd9a..2e13854 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "GeneratedTestHarness.h"
#include "Callbacks.h"
#include "ExecutionBurstController.h"
#include "TestHarness.h"
@@ -364,6 +365,51 @@
kDefaultRtol, executor, measure, outputType);
}
+void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+ std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
+ if (testDynamicOutputShape) {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
+ } else {
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+ Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+ }
+}
+
static void getPreparedModel(sp<PreparedModelCallback> callback,
sp<V1_0::IPreparedModel>* preparedModel) {
*preparedModel = callback->getPreparedModel();
@@ -468,12 +514,8 @@
MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
}
-// TODO: Reduce code duplication.
-void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
- std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
- bool testDynamicOutputShape) {
- V1_2::Model model = create_model();
-
+void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
+ sp<V1_2::IPreparedModel>* preparedModel) {
// see if service can handle model
bool fullySupportsModel = false;
Return<void> supportedCall = device->getSupportedOperations_1_2(
@@ -496,12 +538,11 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<V1_2::IPreparedModel> preparedModel;
- getPreparedModel(preparedModelCallback, &preparedModel);
+ getPreparedModel(preparedModelCallback, preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel.get());
+ ASSERT_EQ(nullptr, preparedModel->get());
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"prepare model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
@@ -510,65 +551,18 @@
GTEST_SKIP();
}
EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel.get());
+ ASSERT_NE(nullptr, preparedModel->get());
+}
- if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::NO, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::YES, OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::NO, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::YES, OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::YES, OutputType::INSUFFICIENT);
- } else {
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::ASYNC,
- MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::SYNC,
- MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, is_ignored, examples,
- model.relaxComputationFloat32toFloat16, Executor::BURST,
- MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
- }
+// TODO: Reduce code duplication.
+void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+ bool testDynamicOutputShape) {
+ V1_2::Model model = create_model();
+ sp<V1_2::IPreparedModel> preparedModel = nullptr;
+ PrepareModel(device, model, &preparedModel);
+ EvaluatePreparedModel(preparedModel, is_ignored, examples,
+ model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
}
} // namespace generated_tests
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
new file mode 100644
index 0000000..c7d2399
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
+#define VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
+
+#include "TestHarness.h"
+
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+
+void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
+ sp<V1_2::IPreparedModel>* preparedModel);
+
+void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+ std::function<bool(int)> is_ignored,
+ const std::vector<MixedTypedExample>& examples,
+ bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
+
+void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
+
+void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples);
+
+void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
+ std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+ bool testDynamicOutputShape = false);
+
+} // namespace generated_tests
+
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // VTS_HAL_NEURALNETWORKS_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index 55e5861..d1c7de3 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,13 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_0::IDevice>&, std::function<V1_0::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
-} // namespace generated_tests
-
namespace V1_0 {
namespace vts {
namespace functional {
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index d98ea04..4db1276 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,13 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
-} // namespace generated_tests
-
namespace V1_1 {
namespace vts {
namespace functional {
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
index 1df3218..e67ef8e 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,13 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&);
-} // namespace generated_tests
-
namespace V1_1 {
namespace vts {
namespace functional {
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 0cb9e16..510a0d5 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -46,6 +46,7 @@
defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
srcs: [
"BasicTests.cpp",
+ "CompilationCachingTests.cpp",
"GeneratedTests.cpp",
],
cflags: [
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 0eec365..2b88edd 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -72,6 +72,12 @@
EXPECT_TRUE(ret.isOk());
}
+// isCachingSupported test
+TEST_F(NeuralnetworksHidlTest, IsCachingSupported) {
+ Return<void> ret = device->isCachingSupported(
+ [](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); });
+ EXPECT_TRUE(ret.isOk());
+}
} // namespace functional
} // namespace vts
} // namespace V1_2
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
new file mode 100644
index 0000000..454aa1f
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -0,0 +1,652 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+#include <cstdio>
+#include <cstdlib>
+#include <random>
+
+#include <gtest/gtest.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+namespace {
+
+// In frameworks/ml/nn/runtime/tests/generated/, creates a hidl model of mobilenet.
+#include "examples/mobilenet_224_gender_basic_fixed.example.cpp"
+#include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp"
+
+// Prevent the compiler from complaining about an otherwise unused function.
+[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
+[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
+
+enum class AccessMode { READ_ONLY, WRITE_ONLY };
+
+void createCacheHandle(const std::vector<std::string>& files, AccessMode mode,
+ hidl_handle* handle) {
+ std::vector<int> fds;
+ for (const auto& file : files) {
+ int fd;
+ if (mode == AccessMode::READ_ONLY) {
+ fd = open(file.c_str(), O_RDONLY);
+ } else if (mode == AccessMode::WRITE_ONLY) {
+ fd = open(file.c_str(), O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR);
+ } else {
+ FAIL();
+ }
+ ASSERT_GE(fd, 0);
+ fds.push_back(fd);
+ }
+ native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
+ ASSERT_NE(cacheNativeHandle, nullptr);
+ for (uint32_t i = 0; i < fds.size(); i++) {
+ cacheNativeHandle->data[i] = fds[i];
+ }
+ handle->setTo(cacheNativeHandle, /*shouldOwn=*/true);
+}
+
+} // namespace
+
+// Tag for the compilation caching tests.
+class CompilationCachingTest : public NeuralnetworksHidlTest {
+ protected:
+ void SetUp() override {
+ NeuralnetworksHidlTest::SetUp();
+
+ // Create cache directory.
+ char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
+ char* cacheDir = mkdtemp(cacheDirTemp);
+ ASSERT_NE(cacheDir, nullptr);
+ mCache1 = cacheDir + mCache1;
+ mCache2 = cacheDir + mCache2;
+ mCache3 = cacheDir + mCache3;
+
+ // Check if caching is supported.
+ bool isCachingSupported;
+ Return<void> ret = device->isCachingSupported(
+ [&isCachingSupported](ErrorStatus status, bool supported) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ isCachingSupported = supported;
+ });
+ EXPECT_TRUE(ret.isOk());
+ if (isCachingSupported) {
+ mIsCachingSupported = true;
+ } else {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
+ "support compilation caching.";
+ std::cout << "[ ] Early termination of test because vendor service does not "
+ "support compilation caching."
+ << std::endl;
+ mIsCachingSupported = false;
+ }
+
+ // Create empty cache files.
+ hidl_handle handle;
+ createCacheHandle({mCache1, mCache2, mCache3}, AccessMode::WRITE_ONLY, &handle);
+ }
+
+ void saveModelToCache(sp<IPreparedModel> preparedModel, const hidl_handle& cache1,
+ const hidl_handle& cache2, ErrorStatus* status) {
+ // Save IPreparedModel to cache.
+ hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
+ Return<ErrorStatus> saveToCacheStatus =
+ preparedModel->saveToCache(cache1, cache2, cacheToken);
+ ASSERT_TRUE(saveToCacheStatus.isOk());
+ *status = static_cast<ErrorStatus>(saveToCacheStatus);
+ }
+
+ bool checkEarlyTermination(ErrorStatus status) {
+ if (status == ErrorStatus::GENERAL_FAILURE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "save the prepared model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "save the prepared model that it does not support."
+ << std::endl;
+ return true;
+ }
+ return false;
+ }
+
+ void prepareModelFromCache(const hidl_handle& cache1, const hidl_handle& cache2,
+ sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
+ // Launch prepare model from cache.
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModelFromCache(cache1, cache2, cacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
+ *preparedModel = nullptr;
+ *status = static_cast<ErrorStatus>(prepareLaunchStatus);
+ return;
+ }
+
+ // Retrieve prepared model.
+ preparedModelCallback->wait();
+ *status = preparedModelCallback->getStatus();
+ *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+ .withDefault(nullptr);
+ }
+
+ std::string mCache1 = "/cache1";
+ std::string mCache2 = "/cache2";
+ std::string mCache3 = "/cache3";
+ uint8_t mToken[static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
+ bool mIsCachingSupported;
+};
+
+TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (!mIsCachingSupported) {
+ EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ } else {
+ if (checkEarlyTermination(status)) return;
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ if (!mIsCachingSupported) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ ASSERT_NE(preparedModel, nullptr);
+ }
+ }
+
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+}
+
+TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (!mIsCachingSupported) {
+ EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ } else {
+ if (checkEarlyTermination(status)) return;
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ uint8_t dummyByte = 0;
+ // Advance offset by one byte.
+ ASSERT_GE(read(cache1.getNativeHandle()->data[0], &dummyByte, 1), 0);
+ ASSERT_GE(read(cache2.getNativeHandle()->data[0], &dummyByte, 1), 0);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ if (!mIsCachingSupported) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ ASSERT_NE(preparedModel, nullptr);
+ }
+ }
+
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; }, get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+}
+
+TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // cache1 with invalid NumFd.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1, mCache3}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ }
+
+ // cache2 with invalid NumFd.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2, mCache3}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ }
+}
+
+TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+ }
+
+ // cache1 with invalid NumFd.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1, mCache3}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+ }
+
+ // cache2 with invalid NumFd.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2, mCache3}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+ }
+}
+
+TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // cache1 with invalid access mode.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+
+ // cache2 with invalid access mode.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+}
+
+TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+ }
+
+ // cache1 with invalid access mode.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // cache2 with invalid access mode.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_F(CompilationCachingTest, SaveToCacheInvalidOffset) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // cache1 with invalid file descriptor offset.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ uint8_t dummyByte = 0;
+ // Advance offset by one byte.
+ ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+
+ // cache2 with invalid file descriptor offset.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ uint8_t dummyByte = 0;
+ // Advance offset by one byte.
+ ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+}
+
+TEST_F(CompilationCachingTest, SaveToCacheInvalidFileSize) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // cache1 with invalid file size.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ uint8_t dummyByte = 0;
+ // Write one byte and seek back to the beginning.
+ ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
+ ASSERT_EQ(lseek(cache1.getNativeHandle()->data[0], 0, SEEK_SET), 0);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+
+ // cache2 with invalid file size.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ uint8_t dummyByte = 0;
+ // Write one byte and seek back to the beginning.
+ ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
+ ASSERT_EQ(lseek(cache2.getNativeHandle()->data[0], 0, SEEK_SET), 0);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+}
+
+class CompilationCachingSecurityTest : public CompilationCachingTest,
+ public ::testing::WithParamInterface<uint32_t> {
+ protected:
+ void SetUp() {
+ CompilationCachingTest::SetUp();
+ generator.seed(kSeed);
+ }
+
+ // Get a random integer within a closed range [lower, upper].
+ template <typename T>
+ T getRandomInt(T lower, T upper) {
+ std::uniform_int_distribution<T> dis(lower, upper);
+ return dis(generator);
+ }
+
+ const uint32_t kSeed = GetParam();
+ std::mt19937 generator;
+};
+
+TEST_P(CompilationCachingSecurityTest, CorruptedSecuritySensitiveCache) {
+ if (!mIsCachingSupported) return;
+
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (checkEarlyTermination(status)) return;
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+
+ // Randomly flip one single bit of the cache entry.
+ FILE* pFile = fopen(mCache1.c_str(), "r+");
+ ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
+ long int fileSize = ftell(pFile);
+ ASSERT_GT(fileSize, 0);
+ ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
+ int readByte = fgetc(pFile);
+ ASSERT_NE(readByte, EOF);
+ ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
+ ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
+ fclose(pFile);
+
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongLengthSecuritySensitiveCache) {
+ if (!mIsCachingSupported) return;
+
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (checkEarlyTermination(status)) return;
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+
+ // Randomly append bytes to the cache entry.
+ FILE* pFile = fopen(mCache1.c_str(), "a");
+ uint32_t appendLength = getRandomInt(1, 256);
+ for (uint32_t i = 0; i < appendLength; i++) {
+ ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
+ }
+ fclose(pFile);
+
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongToken) {
+ if (!mIsCachingSupported) return;
+
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+ sp<IPreparedModel> preparedModel = nullptr;
+ generated_tests::PrepareModel(device, testModel, &preparedModel);
+ // Terminate early if the driver cannot prepare the model.
+ if (preparedModel == nullptr) return;
+
+ // Save the compilation to cache.
+ {
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
+ saveModelToCache(preparedModel, cache1, cache2, &status);
+ if (checkEarlyTermination(status)) return;
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ }
+
+ // Randomly flip one single bit in mToken.
+ uint32_t ind = getRandomInt(0u, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
+ mToken[ind] ^= (1U << getRandomInt(0, 7));
+
+ // Retrieve the preparedModel from cache, expect failure.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_handle cache1, cache2;
+ createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
+ createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
+ ::testing::Range(0U, 10U));
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index 4bc891f..2c3287a 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,14 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&,
- bool testDynamicOutputShape = false);
-} // namespace generated_tests
-
namespace V1_2 {
namespace vts {
namespace functional {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 956926a..990cab9 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,14 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&,
- bool testDynamicOutputShape = false);
-} // namespace generated_tests
-
namespace V1_2 {
namespace vts {
namespace functional {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index 425690f..fa6d54d 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -19,6 +19,7 @@
#include "VtsHalNeuralnetworks.h"
#include "Callbacks.h"
+#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include "Utils.h"
@@ -29,14 +30,6 @@
namespace android {
namespace hardware {
namespace neuralnetworks {
-
-namespace generated_tests {
-using ::test_helper::MixedTypedExample;
-extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
- std::function<bool(int)>, const std::vector<MixedTypedExample>&,
- bool testDynamicOutputShape = false);
-} // namespace generated_tests
-
namespace V1_2 {
namespace vts {
namespace functional {