Separates VTS tests by HAL version.
This prevents a fatal relocation error trying to link
VtsHalNeuralnetworksV1_2TargetTest if it exceeds 1GB.
Test: VtsHalNeuralnetworksV1_2TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all
Test: VtsHalNeuralnetworksV1_1TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-all
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-all
Test: VtsHalNeuralnetworksV1_0TargetTest --hal_service_instance=android.hardware.neuralnetworks@1.0::IDevice/sample-all
Bug: 119135172
Change-Id: Id5cfdeddc2f0d810b4eb47084640ae1dbf297ea1
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index ffba45c..1b5e338 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -39,17 +39,14 @@
],
}
-cc_test {
- name: "VtsHalNeuralnetworksV1_0TargetTest",
+cc_defaults {
+ name: "VtsHalNeuralNetworksTargetTestDefaults",
+ defaults: ["VtsHalTargetTestDefaults"],
srcs: [
- "BasicTests.cpp",
- "GeneratedTests.cpp",
"ValidateModel.cpp",
"ValidateRequest.cpp",
- "ValidationTests.cpp",
"VtsHalNeuralnetworks.cpp",
],
- defaults: ["VtsHalTargetTestDefaults"],
static_libs: [
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
@@ -66,4 +63,23 @@
"libneuralnetworks_generated_test_harness_headers",
"libneuralnetworks_generated_tests",
],
+ // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
+ // error in ld.gold.
+ arch: {
+ arm: {
+ sanitize: {
+ never: true,
+ },
+ },
+ },
+}
+
+cc_test {
+ name: "VtsHalNeuralnetworksV1_0TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ srcs: [
+ "BasicTests.cpp",
+ "GeneratedTests.cpp",
+ "ValidationTests.cpp",
+ ],
}
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index a1c0f1f..df1ac67 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -14,40 +14,23 @@
// limitations under the License.
//
+// Tests for V1_0 models using the V1_1 HAL.
+cc_test {
+ name: "VtsHalNeuralnetworksV1_1CompatV1_0TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ srcs: [
+ "GeneratedTestsV1_0.cpp",
+ "ValidationTestsV1_0.cpp",
+ ],
+}
+
+// Tests for V1_1 models.
cc_test {
name: "VtsHalNeuralnetworksV1_1TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
- "ValidateModel.cpp",
- "ValidateRequest.cpp",
"ValidationTests.cpp",
- "VtsHalNeuralnetworks.cpp",
],
- defaults: ["VtsHalTargetTestDefaults"],
- static_libs: [
- "android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- "android.hardware.neuralnetworks@1.2",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
- "libgmock",
- "libhidlmemory",
- "libneuralnetworks_utils",
- "VtsHalNeuralnetworksTest_utils",
- ],
- header_libs: [
- "libneuralnetworks_headers",
- "libneuralnetworks_generated_test_harness_headers",
- "libneuralnetworks_generated_tests",
- ],
- // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
- // error in ld.gold.
- arch: {
- arm: {
- sanitize: {
- never: true,
- },
- },
- },
}
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 1f49904..d16f181 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -46,7 +46,6 @@
using ::test_helper::MixedTypedExample;
// in frameworks/ml/nn/runtime/tests/generated/
-#include "all_generated_V1_0_vts_tests.cpp"
#include "all_generated_V1_1_vts_tests.cpp"
} // namespace functional
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
new file mode 100644
index 0000000..e2acd7d
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_1::IDevice>&, std::function<V1_1::Model(void)>,
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+} // namespace generated_tests
+
+namespace V1_1 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_0_vts_tests.cpp"
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_1
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.1/vts/functional/Models.h b/neuralnetworks/1.1/vts/functional/Models.h
index 62bc95e..57da010 100644
--- a/neuralnetworks/1.1/vts/functional/Models.h
+++ b/neuralnetworks/1.1/vts/functional/Models.h
@@ -34,193 +34,86 @@
using MixedTypedExample = test_helper::MixedTypedExample;
#define FOR_EACH_TEST_MODEL(FN) \
- FN(add) \
- FN(add_broadcast_quant8) \
- FN(add_quant8) \
FN(add_relaxed) \
- FN(avg_pool_float_1) \
FN(avg_pool_float_1_relaxed) \
- FN(avg_pool_float_2) \
FN(avg_pool_float_2_relaxed) \
- FN(avg_pool_float_3) \
FN(avg_pool_float_3_relaxed) \
- FN(avg_pool_float_4) \
FN(avg_pool_float_4_relaxed) \
- FN(avg_pool_float_5) \
FN(avg_pool_float_5_relaxed) \
- FN(avg_pool_quant8_1) \
- FN(avg_pool_quant8_2) \
- FN(avg_pool_quant8_3) \
- FN(avg_pool_quant8_4) \
- FN(avg_pool_quant8_5) \
FN(batch_to_space) \
FN(batch_to_space_float_1) \
FN(batch_to_space_float_1_relaxed) \
FN(batch_to_space_quant8_1) \
FN(batch_to_space_relaxed) \
- FN(concat_float_1) \
FN(concat_float_1_relaxed) \
- FN(concat_float_2) \
FN(concat_float_2_relaxed) \
- FN(concat_float_3) \
FN(concat_float_3_relaxed) \
- FN(concat_quant8_1) \
- FN(concat_quant8_2) \
- FN(concat_quant8_3) \
- FN(conv_1_h3_w2_SAME) \
FN(conv_1_h3_w2_SAME_relaxed) \
- FN(conv_1_h3_w2_VALID) \
FN(conv_1_h3_w2_VALID_relaxed) \
- FN(conv_3_h3_w2_SAME) \
FN(conv_3_h3_w2_SAME_relaxed) \
- FN(conv_3_h3_w2_VALID) \
FN(conv_3_h3_w2_VALID_relaxed) \
- FN(conv_float) \
- FN(conv_float_2) \
FN(conv_float_2_relaxed) \
- FN(conv_float_channels) \
FN(conv_float_channels_relaxed) \
- FN(conv_float_channels_weights_as_inputs) \
FN(conv_float_channels_weights_as_inputs_relaxed) \
- FN(conv_float_large) \
FN(conv_float_large_relaxed) \
- FN(conv_float_large_weights_as_inputs) \
FN(conv_float_large_weights_as_inputs_relaxed) \
FN(conv_float_relaxed) \
- FN(conv_float_weights_as_inputs) \
FN(conv_float_weights_as_inputs_relaxed) \
- FN(conv_quant8) \
- FN(conv_quant8_2) \
- FN(conv_quant8_channels) \
- FN(conv_quant8_channels_weights_as_inputs) \
- FN(conv_quant8_large) \
- FN(conv_quant8_large_weights_as_inputs) \
- FN(conv_quant8_overflow) \
- FN(conv_quant8_overflow_weights_as_inputs) \
- FN(conv_quant8_weights_as_inputs) \
- FN(depth_to_space_float_1) \
FN(depth_to_space_float_1_relaxed) \
- FN(depth_to_space_float_2) \
FN(depth_to_space_float_2_relaxed) \
- FN(depth_to_space_float_3) \
FN(depth_to_space_float_3_relaxed) \
- FN(depth_to_space_quant8_1) \
- FN(depth_to_space_quant8_2) \
- FN(depthwise_conv) \
- FN(depthwise_conv2d_float) \
- FN(depthwise_conv2d_float_2) \
FN(depthwise_conv2d_float_2_relaxed) \
- FN(depthwise_conv2d_float_large) \
- FN(depthwise_conv2d_float_large_2) \
FN(depthwise_conv2d_float_large_2_relaxed) \
- FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
FN(depthwise_conv2d_float_large_relaxed) \
- FN(depthwise_conv2d_float_large_weights_as_inputs) \
FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
FN(depthwise_conv2d_float_relaxed) \
- FN(depthwise_conv2d_float_weights_as_inputs) \
FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \
- FN(depthwise_conv2d_quant8) \
- FN(depthwise_conv2d_quant8_2) \
- FN(depthwise_conv2d_quant8_large) \
- FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
- FN(depthwise_conv2d_quant8_weights_as_inputs) \
FN(depthwise_conv_relaxed) \
- FN(dequantize) \
FN(dequantize_relaxed) \
FN(div) \
FN(div_broadcast_float) \
FN(div_broadcast_float_relaxed) \
FN(div_relaxed) \
- FN(embedding_lookup) \
FN(embedding_lookup_relaxed) \
- FN(floor) \
FN(floor_relaxed) \
- FN(fully_connected_float) \
- FN(fully_connected_float_2) \
FN(fully_connected_float_2_relaxed) \
FN(fully_connected_float_4d_simple) \
FN(fully_connected_float_4d_simple_relaxed) \
- FN(fully_connected_float_large) \
FN(fully_connected_float_large_relaxed) \
- FN(fully_connected_float_large_weights_as_inputs) \
FN(fully_connected_float_large_weights_as_inputs_relaxed) \
FN(fully_connected_float_relaxed) \
- FN(fully_connected_float_weights_as_inputs) \
FN(fully_connected_float_weights_as_inputs_relaxed) \
- FN(fully_connected_quant8) \
- FN(fully_connected_quant8_2) \
- FN(fully_connected_quant8_large) \
- FN(fully_connected_quant8_large_weights_as_inputs) \
- FN(fully_connected_quant8_weights_as_inputs) \
- FN(hashtable_lookup_float) \
FN(hashtable_lookup_float_relaxed) \
- FN(hashtable_lookup_quant8) \
- FN(l2_normalization) \
- FN(l2_normalization_2) \
FN(l2_normalization_2_relaxed) \
- FN(l2_normalization_large) \
FN(l2_normalization_large_relaxed) \
FN(l2_normalization_relaxed) \
- FN(l2_pool_float) \
- FN(l2_pool_float_2) \
FN(l2_pool_float_2_relaxed) \
- FN(l2_pool_float_large) \
FN(l2_pool_float_large_relaxed) \
FN(l2_pool_float_relaxed) \
- FN(local_response_norm_float_1) \
FN(local_response_norm_float_1_relaxed) \
- FN(local_response_norm_float_2) \
FN(local_response_norm_float_2_relaxed) \
- FN(local_response_norm_float_3) \
FN(local_response_norm_float_3_relaxed) \
- FN(local_response_norm_float_4) \
FN(local_response_norm_float_4_relaxed) \
- FN(logistic_float_1) \
FN(logistic_float_1_relaxed) \
- FN(logistic_float_2) \
FN(logistic_float_2_relaxed) \
- FN(logistic_quant8_1) \
- FN(logistic_quant8_2) \
- FN(lsh_projection) \
- FN(lsh_projection_2) \
FN(lsh_projection_2_relaxed) \
FN(lsh_projection_relaxed) \
- FN(lsh_projection_weights_as_inputs) \
FN(lsh_projection_weights_as_inputs_relaxed) \
- FN(lstm) \
- FN(lstm2) \
FN(lstm2_relaxed) \
- FN(lstm2_state) \
- FN(lstm2_state2) \
FN(lstm2_state2_relaxed) \
FN(lstm2_state_relaxed) \
- FN(lstm3) \
FN(lstm3_relaxed) \
- FN(lstm3_state) \
- FN(lstm3_state2) \
FN(lstm3_state2_relaxed) \
- FN(lstm3_state3) \
FN(lstm3_state3_relaxed) \
FN(lstm3_state_relaxed) \
FN(lstm_relaxed) \
- FN(lstm_state) \
- FN(lstm_state2) \
FN(lstm_state2_relaxed) \
FN(lstm_state_relaxed) \
- FN(max_pool_float_1) \
FN(max_pool_float_1_relaxed) \
- FN(max_pool_float_2) \
FN(max_pool_float_2_relaxed) \
- FN(max_pool_float_3) \
FN(max_pool_float_3_relaxed) \
- FN(max_pool_float_4) \
FN(max_pool_float_4_relaxed) \
- FN(max_pool_quant8_1) \
- FN(max_pool_quant8_2) \
- FN(max_pool_quant8_3) \
- FN(max_pool_quant8_4) \
FN(mean) \
FN(mean_float_1) \
FN(mean_float_1_relaxed) \
@@ -229,57 +122,27 @@
FN(mean_quant8_1) \
FN(mean_quant8_2) \
FN(mean_relaxed) \
- FN(mobilenet_224_gender_basic_fixed) \
FN(mobilenet_224_gender_basic_fixed_relaxed) \
- FN(mobilenet_quantized) \
- FN(mul) \
- FN(mul_broadcast_quant8) \
- FN(mul_quant8) \
FN(mul_relaxed) \
- FN(mul_relu) \
FN(mul_relu_relaxed) \
FN(pad) \
FN(pad_float_1) \
FN(pad_float_1_relaxed) \
FN(pad_relaxed) \
- FN(relu1_float_1) \
FN(relu1_float_1_relaxed) \
- FN(relu1_float_2) \
FN(relu1_float_2_relaxed) \
- FN(relu1_quant8_1) \
- FN(relu1_quant8_2) \
- FN(relu6_float_1) \
FN(relu6_float_1_relaxed) \
- FN(relu6_float_2) \
FN(relu6_float_2_relaxed) \
- FN(relu6_quant8_1) \
- FN(relu6_quant8_2) \
- FN(relu_float_1) \
FN(relu_float_1_relaxed) \
- FN(relu_float_2) \
FN(relu_float_2_relaxed) \
- FN(relu_quant8_1) \
- FN(relu_quant8_2) \
- FN(reshape) \
- FN(reshape_quant8) \
- FN(reshape_quant8_weights_as_inputs) \
FN(reshape_relaxed) \
- FN(reshape_weights_as_inputs) \
FN(reshape_weights_as_inputs_relaxed) \
- FN(resize_bilinear) \
- FN(resize_bilinear_2) \
FN(resize_bilinear_2_relaxed) \
FN(resize_bilinear_relaxed) \
- FN(rnn) \
FN(rnn_relaxed) \
- FN(rnn_state) \
FN(rnn_state_relaxed) \
- FN(softmax_float_1) \
FN(softmax_float_1_relaxed) \
- FN(softmax_float_2) \
FN(softmax_float_2_relaxed) \
- FN(softmax_quant8_1) \
- FN(softmax_quant8_2) \
FN(space_to_batch) \
FN(space_to_batch_float_1) \
FN(space_to_batch_float_1_relaxed) \
@@ -291,14 +154,9 @@
FN(space_to_batch_quant8_2) \
FN(space_to_batch_quant8_3) \
FN(space_to_batch_relaxed) \
- FN(space_to_depth_float_1) \
FN(space_to_depth_float_1_relaxed) \
- FN(space_to_depth_float_2) \
FN(space_to_depth_float_2_relaxed) \
- FN(space_to_depth_float_3) \
FN(space_to_depth_float_3_relaxed) \
- FN(space_to_depth_quant8_1) \
- FN(space_to_depth_quant8_2) \
FN(squeeze) \
FN(squeeze_float_1) \
FN(squeeze_float_1_relaxed) \
@@ -343,13 +201,9 @@
FN(sub_broadcast_float) \
FN(sub_broadcast_float_relaxed) \
FN(sub_relaxed) \
- FN(svdf) \
- FN(svdf2) \
FN(svdf2_relaxed) \
FN(svdf_relaxed) \
- FN(svdf_state) \
FN(svdf_state_relaxed) \
- FN(tanh) \
FN(tanh_relaxed) \
FN(transpose) \
FN(transpose_float_1) \
diff --git a/neuralnetworks/1.1/vts/functional/ModelsV1_0.h b/neuralnetworks/1.1/vts/functional/ModelsV1_0.h
new file mode 100644
index 0000000..52c0346
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/ModelsV1_0.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_V1_0_H
+#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_V1_0_H
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "TestHarness.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_1 {
+namespace vts {
+namespace functional {
+
+using MixedTypedExample = test_helper::MixedTypedExample;
+
+#define FOR_EACH_TEST_MODEL(FN) \
+ FN(add_broadcast_quant8) \
+ FN(add) \
+ FN(add_quant8) \
+ FN(avg_pool_float_1) \
+ FN(avg_pool_float_2) \
+ FN(avg_pool_float_3) \
+ FN(avg_pool_float_4) \
+ FN(avg_pool_float_5) \
+ FN(avg_pool_quant8_1) \
+ FN(avg_pool_quant8_2) \
+ FN(avg_pool_quant8_3) \
+ FN(avg_pool_quant8_4) \
+ FN(avg_pool_quant8_5) \
+ FN(concat_float_1) \
+ FN(concat_float_2) \
+ FN(concat_float_3) \
+ FN(concat_quant8_1) \
+ FN(concat_quant8_2) \
+ FN(concat_quant8_3) \
+ FN(conv_1_h3_w2_SAME) \
+ FN(conv_1_h3_w2_VALID) \
+ FN(conv_3_h3_w2_SAME) \
+ FN(conv_3_h3_w2_VALID) \
+ FN(conv_float_2) \
+ FN(conv_float_channels) \
+ FN(conv_float_channels_weights_as_inputs) \
+ FN(conv_float_large) \
+ FN(conv_float_large_weights_as_inputs) \
+ FN(conv_float) \
+ FN(conv_float_weights_as_inputs) \
+ FN(conv_quant8_2) \
+ FN(conv_quant8_channels) \
+ FN(conv_quant8_channels_weights_as_inputs) \
+ FN(conv_quant8_large) \
+ FN(conv_quant8_large_weights_as_inputs) \
+ FN(conv_quant8) \
+ FN(conv_quant8_overflow) \
+ FN(conv_quant8_overflow_weights_as_inputs) \
+ FN(conv_quant8_weights_as_inputs) \
+ FN(depth_to_space_float_1) \
+ FN(depth_to_space_float_2) \
+ FN(depth_to_space_float_3) \
+ FN(depth_to_space_quant8_1) \
+ FN(depth_to_space_quant8_2) \
+ FN(depthwise_conv2d_float_2) \
+ FN(depthwise_conv2d_float_large_2) \
+ FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
+ FN(depthwise_conv2d_float_large) \
+ FN(depthwise_conv2d_float_large_weights_as_inputs) \
+ FN(depthwise_conv2d_float) \
+ FN(depthwise_conv2d_float_weights_as_inputs) \
+ FN(depthwise_conv2d_quant8_2) \
+ FN(depthwise_conv2d_quant8_large) \
+ FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
+ FN(depthwise_conv2d_quant8) \
+ FN(depthwise_conv2d_quant8_weights_as_inputs) \
+ FN(depthwise_conv) \
+ FN(dequantize) \
+ FN(embedding_lookup) \
+ FN(floor) \
+ FN(fully_connected_float_2) \
+ FN(fully_connected_float_large) \
+ FN(fully_connected_float_large_weights_as_inputs) \
+ FN(fully_connected_float) \
+ FN(fully_connected_float_weights_as_inputs) \
+ FN(fully_connected_quant8_2) \
+ FN(fully_connected_quant8_large) \
+ FN(fully_connected_quant8_large_weights_as_inputs) \
+ FN(fully_connected_quant8) \
+ FN(fully_connected_quant8_weights_as_inputs) \
+ FN(hashtable_lookup_float) \
+ FN(hashtable_lookup_quant8) \
+ FN(l2_normalization_2) \
+ FN(l2_normalization_large) \
+ FN(l2_normalization) \
+ FN(l2_pool_float_2) \
+ FN(l2_pool_float_large) \
+ FN(l2_pool_float) \
+ FN(local_response_norm_float_1) \
+ FN(local_response_norm_float_2) \
+ FN(local_response_norm_float_3) \
+ FN(local_response_norm_float_4) \
+ FN(logistic_float_1) \
+ FN(logistic_float_2) \
+ FN(logistic_quant8_1) \
+ FN(logistic_quant8_2) \
+ FN(lsh_projection_2) \
+ FN(lsh_projection) \
+ FN(lsh_projection_weights_as_inputs) \
+ FN(lstm2) \
+ FN(lstm2_state2) \
+ FN(lstm2_state) \
+ FN(lstm3) \
+ FN(lstm3_state2) \
+ FN(lstm3_state3) \
+ FN(lstm3_state) \
+ FN(lstm) \
+ FN(lstm_state2) \
+ FN(lstm_state) \
+ FN(max_pool_float_1) \
+ FN(max_pool_float_2) \
+ FN(max_pool_float_3) \
+ FN(max_pool_float_4) \
+ FN(max_pool_quant8_1) \
+ FN(max_pool_quant8_2) \
+ FN(max_pool_quant8_3) \
+ FN(max_pool_quant8_4) \
+ FN(mobilenet_224_gender_basic_fixed) \
+ FN(mobilenet_quantized) \
+ FN(mul_broadcast_quant8) \
+ FN(mul) \
+ FN(mul_quant8) \
+ FN(mul_relu) \
+ FN(relu1_float_1) \
+ FN(relu1_float_2) \
+ FN(relu1_quant8_1) \
+ FN(relu1_quant8_2) \
+ FN(relu6_float_1) \
+ FN(relu6_float_2) \
+ FN(relu6_quant8_1) \
+ FN(relu6_quant8_2) \
+ FN(relu_float_1) \
+ FN(relu_float_2) \
+ FN(relu_quant8_1) \
+ FN(relu_quant8_2) \
+ FN(reshape) \
+ FN(reshape_quant8) \
+ FN(reshape_quant8_weights_as_inputs) \
+ FN(reshape_weights_as_inputs) \
+ FN(resize_bilinear_2) \
+ FN(resize_bilinear) \
+ FN(rnn) \
+ FN(rnn_state) \
+ FN(softmax_float_1) \
+ FN(softmax_float_2) \
+ FN(softmax_quant8_1) \
+ FN(softmax_quant8_2) \
+ FN(space_to_depth_float_1) \
+ FN(space_to_depth_float_2) \
+ FN(space_to_depth_float_3) \
+ FN(space_to_depth_quant8_1) \
+ FN(space_to_depth_quant8_2) \
+ FN(svdf2) \
+ FN(svdf) \
+ FN(svdf_state) \
+ FN(tanh)
+
+#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
+ namespace function { \
+ extern std::vector<MixedTypedExample> examples; \
+ Model createTestModel(); \
+ }
+
+FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
+
+#undef FORWARD_DECLARE_GENERATED_OBJECTS
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_1
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_V1_0_H
diff --git a/neuralnetworks/1.1/vts/functional/ValidationTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/ValidationTestsV1_0.cpp
new file mode 100644
index 0000000..7e2af05
--- /dev/null
+++ b/neuralnetworks/1.1/vts/functional/ValidationTestsV1_0.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "ModelsV1_0.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_1 {
+namespace vts {
+namespace functional {
+
+// forward declarations
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// generate validation tests
+#define VTS_CURRENT_TEST_CASE(TestName) \
+ TEST_F(ValidationTest, TestName) { \
+ const Model model = TestName::createTestModel(); \
+ const std::vector<Request> requests = createRequests(TestName::examples); \
+ validateModel(model); \
+ validateRequests(model, requests); \
+ }
+
+FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
+
+#undef VTS_CURRENT_TEST_CASE
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_1
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 09d0dc3..087c12f 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -14,40 +14,33 @@
// limitations under the License.
//
+// Tests for V1_0 models using the V1_2 HAL.
+cc_test {
+ name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ srcs: [
+ "GeneratedTestsV1_0.cpp",
+ "ValidationTestsV1_0.cpp",
+ ]
+}
+
+// Tests for V1_1 models using the V1_2 HAL.
+cc_test {
+ name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+ srcs: [
+ "GeneratedTestsV1_1.cpp",
+ "ValidationTestsV1_1.cpp",
+ ],
+}
+
+// Tests for V1_2 models.
cc_test {
name: "VtsHalNeuralnetworksV1_2TargetTest",
+ defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
srcs: [
"BasicTests.cpp",
"GeneratedTests.cpp",
- "ValidateModel.cpp",
- "ValidateRequest.cpp",
"ValidationTests.cpp",
- "VtsHalNeuralnetworks.cpp",
],
- defaults: ["VtsHalTargetTestDefaults"],
- static_libs: [
- "android.hardware.neuralnetworks@1.0",
- "android.hardware.neuralnetworks@1.1",
- "android.hardware.neuralnetworks@1.2",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
- "libgmock",
- "libhidlmemory",
- "libneuralnetworks_utils",
- "VtsHalNeuralnetworksTest_utils",
- ],
- header_libs: [
- "libneuralnetworks_headers",
- "libneuralnetworks_generated_test_harness_headers",
- "libneuralnetworks_generated_tests",
- ],
- // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal
- // error in ld.gold.
- arch: {
- arm: {
- sanitize: {
- never: true,
- },
- },
- },
}
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index e87fa6b..0608139 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -46,8 +46,6 @@
using ::test_helper::MixedTypedExample;
// in frameworks/ml/nn/runtime/tests/generated/
-#include "all_generated_V1_0_vts_tests.cpp"
-#include "all_generated_V1_1_vts_tests.cpp"
#include "all_generated_V1_2_vts_tests.cpp"
} // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
new file mode 100644
index 0000000..8d685d1
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+} // namespace generated_tests
+
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_0_vts_tests.cpp"
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
new file mode 100644
index 0000000..8dbb586
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+
+namespace generated_tests {
+using ::test_helper::MixedTypedExample;
+extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>,
+ std::function<bool(int)>, const std::vector<MixedTypedExample>&);
+} // namespace generated_tests
+
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::nn::allocateSharedMemory;
+using ::test_helper::MixedTypedExample;
+
+// in frameworks/ml/nn/runtime/tests/generated/
+#include "all_generated_V1_1_vts_tests.cpp"
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/Models.h b/neuralnetworks/1.2/vts/functional/Models.h
index 2d512fe..2426ad0 100644
--- a/neuralnetworks/1.2/vts/functional/Models.h
+++ b/neuralnetworks/1.2/vts/functional/Models.h
@@ -34,330 +34,7 @@
using MixedTypedExample = test_helper::MixedTypedExample;
-#define FOR_EACH_TEST_MODEL(FN) \
- FN(add) \
- FN(add_broadcast_quant8) \
- FN(add_quant8) \
- FN(add_relaxed) \
- FN(avg_pool_float_1) \
- FN(avg_pool_float_1_relaxed) \
- FN(avg_pool_float_2) \
- FN(avg_pool_float_2_relaxed) \
- FN(avg_pool_float_3) \
- FN(avg_pool_float_3_relaxed) \
- FN(avg_pool_float_4) \
- FN(avg_pool_float_4_relaxed) \
- FN(avg_pool_float_5) \
- FN(avg_pool_float_5_relaxed) \
- FN(avg_pool_quant8_1) \
- FN(avg_pool_quant8_2) \
- FN(avg_pool_quant8_3) \
- FN(avg_pool_quant8_4) \
- FN(avg_pool_quant8_5) \
- FN(batch_to_space) \
- FN(batch_to_space_float_1) \
- FN(batch_to_space_float_1_relaxed) \
- FN(batch_to_space_quant8_1) \
- FN(batch_to_space_relaxed) \
- FN(concat_float_1) \
- FN(concat_float_1_relaxed) \
- FN(concat_float_2) \
- FN(concat_float_2_relaxed) \
- FN(concat_float_3) \
- FN(concat_float_3_relaxed) \
- FN(concat_quant8_1) \
- FN(concat_quant8_2) \
- FN(concat_quant8_3) \
- FN(conv_1_h3_w2_SAME) \
- FN(conv_1_h3_w2_SAME_relaxed) \
- FN(conv_1_h3_w2_VALID) \
- FN(conv_1_h3_w2_VALID_relaxed) \
- FN(conv_3_h3_w2_SAME) \
- FN(conv_3_h3_w2_SAME_relaxed) \
- FN(conv_3_h3_w2_VALID) \
- FN(conv_3_h3_w2_VALID_relaxed) \
- FN(conv_float) \
- FN(conv_float_2) \
- FN(conv_float_2_relaxed) \
- FN(conv_float_channels) \
- FN(conv_float_channels_relaxed) \
- FN(conv_float_channels_weights_as_inputs) \
- FN(conv_float_channels_weights_as_inputs_relaxed) \
- FN(conv_float_large) \
- FN(conv_float_large_relaxed) \
- FN(conv_float_large_weights_as_inputs) \
- FN(conv_float_large_weights_as_inputs_relaxed) \
- FN(conv_float_relaxed) \
- FN(conv_float_weights_as_inputs) \
- FN(conv_float_weights_as_inputs_relaxed) \
- FN(conv_quant8) \
- FN(conv_quant8_2) \
- FN(conv_quant8_channels) \
- FN(conv_quant8_channels_weights_as_inputs) \
- FN(conv_quant8_large) \
- FN(conv_quant8_large_weights_as_inputs) \
- FN(conv_quant8_overflow) \
- FN(conv_quant8_overflow_weights_as_inputs) \
- FN(conv_quant8_weights_as_inputs) \
- FN(depth_to_space_float_1) \
- FN(depth_to_space_float_1_relaxed) \
- FN(depth_to_space_float_2) \
- FN(depth_to_space_float_2_relaxed) \
- FN(depth_to_space_float_3) \
- FN(depth_to_space_float_3_relaxed) \
- FN(depth_to_space_quant8_1) \
- FN(depth_to_space_quant8_2) \
- FN(depthwise_conv) \
- FN(depthwise_conv2d_float) \
- FN(depthwise_conv2d_float_2) \
- FN(depthwise_conv2d_float_2_relaxed) \
- FN(depthwise_conv2d_float_large) \
- FN(depthwise_conv2d_float_large_2) \
- FN(depthwise_conv2d_float_large_2_relaxed) \
- FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
- FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
- FN(depthwise_conv2d_float_large_relaxed) \
- FN(depthwise_conv2d_float_large_weights_as_inputs) \
- FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
- FN(depthwise_conv2d_float_relaxed) \
- FN(depthwise_conv2d_float_weights_as_inputs) \
- FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \
- FN(depthwise_conv2d_quant8) \
- FN(depthwise_conv2d_quant8_2) \
- FN(depthwise_conv2d_quant8_large) \
- FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
- FN(depthwise_conv2d_quant8_weights_as_inputs) \
- FN(depthwise_conv_relaxed) \
- FN(dequantize) \
- FN(dequantize_relaxed) \
- FN(div) \
- FN(div_broadcast_float) \
- FN(div_broadcast_float_relaxed) \
- FN(div_relaxed) \
- FN(embedding_lookup) \
- FN(embedding_lookup_relaxed) \
- FN(floor) \
- FN(floor_relaxed) \
- FN(fully_connected_float) \
- FN(fully_connected_float_2) \
- FN(fully_connected_float_2_relaxed) \
- FN(fully_connected_float_4d_simple) \
- FN(fully_connected_float_4d_simple_relaxed) \
- FN(fully_connected_float_large) \
- FN(fully_connected_float_large_relaxed) \
- FN(fully_connected_float_large_weights_as_inputs) \
- FN(fully_connected_float_large_weights_as_inputs_relaxed) \
- FN(fully_connected_float_relaxed) \
- FN(fully_connected_float_weights_as_inputs) \
- FN(fully_connected_float_weights_as_inputs_relaxed) \
- FN(fully_connected_quant8) \
- FN(fully_connected_quant8_2) \
- FN(fully_connected_quant8_large) \
- FN(fully_connected_quant8_large_weights_as_inputs) \
- FN(fully_connected_quant8_weights_as_inputs) \
- FN(hashtable_lookup_float) \
- FN(hashtable_lookup_float_relaxed) \
- FN(hashtable_lookup_quant8) \
- FN(l2_normalization) \
- FN(l2_normalization_2) \
- FN(l2_normalization_2_relaxed) \
- FN(l2_normalization_large) \
- FN(l2_normalization_large_relaxed) \
- FN(l2_normalization_relaxed) \
- FN(l2_pool_float) \
- FN(l2_pool_float_2) \
- FN(l2_pool_float_2_relaxed) \
- FN(l2_pool_float_large) \
- FN(l2_pool_float_large_relaxed) \
- FN(l2_pool_float_relaxed) \
- FN(local_response_norm_float_1) \
- FN(local_response_norm_float_1_relaxed) \
- FN(local_response_norm_float_2) \
- FN(local_response_norm_float_2_relaxed) \
- FN(local_response_norm_float_3) \
- FN(local_response_norm_float_3_relaxed) \
- FN(local_response_norm_float_4) \
- FN(local_response_norm_float_4_relaxed) \
- FN(logistic_float_1) \
- FN(logistic_float_1_relaxed) \
- FN(logistic_float_2) \
- FN(logistic_float_2_relaxed) \
- FN(logistic_quant8_1) \
- FN(logistic_quant8_2) \
- FN(lsh_projection) \
- FN(lsh_projection_2) \
- FN(lsh_projection_2_relaxed) \
- FN(lsh_projection_relaxed) \
- FN(lsh_projection_weights_as_inputs) \
- FN(lsh_projection_weights_as_inputs_relaxed) \
- FN(lstm) \
- FN(lstm2) \
- FN(lstm2_relaxed) \
- FN(lstm2_state) \
- FN(lstm2_state2) \
- FN(lstm2_state2_relaxed) \
- FN(lstm2_state_relaxed) \
- FN(lstm3) \
- FN(lstm3_relaxed) \
- FN(lstm3_state) \
- FN(lstm3_state2) \
- FN(lstm3_state2_relaxed) \
- FN(lstm3_state3) \
- FN(lstm3_state3_relaxed) \
- FN(lstm3_state_relaxed) \
- FN(lstm_relaxed) \
- FN(lstm_state) \
- FN(lstm_state2) \
- FN(lstm_state2_relaxed) \
- FN(lstm_state_relaxed) \
- FN(max_pool_float_1) \
- FN(max_pool_float_1_relaxed) \
- FN(max_pool_float_2) \
- FN(max_pool_float_2_relaxed) \
- FN(max_pool_float_3) \
- FN(max_pool_float_3_relaxed) \
- FN(max_pool_float_4) \
- FN(max_pool_float_4_relaxed) \
- FN(max_pool_quant8_1) \
- FN(max_pool_quant8_2) \
- FN(max_pool_quant8_3) \
- FN(max_pool_quant8_4) \
- FN(mean) \
- FN(mean_float_1) \
- FN(mean_float_1_relaxed) \
- FN(mean_float_2) \
- FN(mean_float_2_relaxed) \
- FN(mean_quant8_1) \
- FN(mean_quant8_2) \
- FN(mean_relaxed) \
- FN(mobilenet_224_gender_basic_fixed) \
- FN(mobilenet_224_gender_basic_fixed_relaxed) \
- FN(mobilenet_quantized) \
- FN(mul) \
- FN(mul_broadcast_quant8) \
- FN(mul_quant8) \
- FN(mul_relaxed) \
- FN(mul_relu) \
- FN(mul_relu_relaxed) \
- FN(pad) \
- FN(pad_float_1) \
- FN(pad_float_1_relaxed) \
- FN(pad_relaxed) \
- FN(random_multinomial) \
- FN(relu1_float_1) \
- FN(relu1_float_1_relaxed) \
- FN(relu1_float_2) \
- FN(relu1_float_2_relaxed) \
- FN(relu1_quant8_1) \
- FN(relu1_quant8_2) \
- FN(relu6_float_1) \
- FN(relu6_float_1_relaxed) \
- FN(relu6_float_2) \
- FN(relu6_float_2_relaxed) \
- FN(relu6_quant8_1) \
- FN(relu6_quant8_2) \
- FN(relu_float_1) \
- FN(relu_float_1_relaxed) \
- FN(relu_float_2) \
- FN(relu_float_2_relaxed) \
- FN(relu_quant8_1) \
- FN(relu_quant8_2) \
- FN(reshape) \
- FN(reshape_quant8) \
- FN(reshape_quant8_weights_as_inputs) \
- FN(reshape_relaxed) \
- FN(reshape_weights_as_inputs) \
- FN(reshape_weights_as_inputs_relaxed) \
- FN(resize_bilinear) \
- FN(resize_bilinear_2) \
- FN(resize_bilinear_2_relaxed) \
- FN(resize_bilinear_relaxed) \
- FN(rnn) \
- FN(rnn_relaxed) \
- FN(rnn_state) \
- FN(rnn_state_relaxed) \
- FN(softmax_float_1) \
- FN(softmax_float_1_relaxed) \
- FN(softmax_float_2) \
- FN(softmax_float_2_relaxed) \
- FN(softmax_quant8_1) \
- FN(softmax_quant8_2) \
- FN(space_to_batch) \
- FN(space_to_batch_float_1) \
- FN(space_to_batch_float_1_relaxed) \
- FN(space_to_batch_float_2) \
- FN(space_to_batch_float_2_relaxed) \
- FN(space_to_batch_float_3) \
- FN(space_to_batch_float_3_relaxed) \
- FN(space_to_batch_quant8_1) \
- FN(space_to_batch_quant8_2) \
- FN(space_to_batch_quant8_3) \
- FN(space_to_batch_relaxed) \
- FN(space_to_depth_float_1) \
- FN(space_to_depth_float_1_relaxed) \
- FN(space_to_depth_float_2) \
- FN(space_to_depth_float_2_relaxed) \
- FN(space_to_depth_float_3) \
- FN(space_to_depth_float_3_relaxed) \
- FN(space_to_depth_quant8_1) \
- FN(space_to_depth_quant8_2) \
- FN(squeeze) \
- FN(squeeze_float_1) \
- FN(squeeze_float_1_relaxed) \
- FN(squeeze_quant8_1) \
- FN(squeeze_relaxed) \
- FN(strided_slice) \
- FN(strided_slice_float_1) \
- FN(strided_slice_float_10) \
- FN(strided_slice_float_10_relaxed) \
- FN(strided_slice_float_11) \
- FN(strided_slice_float_11_relaxed) \
- FN(strided_slice_float_1_relaxed) \
- FN(strided_slice_float_2) \
- FN(strided_slice_float_2_relaxed) \
- FN(strided_slice_float_3) \
- FN(strided_slice_float_3_relaxed) \
- FN(strided_slice_float_4) \
- FN(strided_slice_float_4_relaxed) \
- FN(strided_slice_float_5) \
- FN(strided_slice_float_5_relaxed) \
- FN(strided_slice_float_6) \
- FN(strided_slice_float_6_relaxed) \
- FN(strided_slice_float_7) \
- FN(strided_slice_float_7_relaxed) \
- FN(strided_slice_float_8) \
- FN(strided_slice_float_8_relaxed) \
- FN(strided_slice_float_9) \
- FN(strided_slice_float_9_relaxed) \
- FN(strided_slice_qaunt8_10) \
- FN(strided_slice_qaunt8_11) \
- FN(strided_slice_quant8_1) \
- FN(strided_slice_quant8_2) \
- FN(strided_slice_quant8_3) \
- FN(strided_slice_quant8_4) \
- FN(strided_slice_quant8_5) \
- FN(strided_slice_quant8_6) \
- FN(strided_slice_quant8_7) \
- FN(strided_slice_quant8_8) \
- FN(strided_slice_quant8_9) \
- FN(strided_slice_relaxed) \
- FN(sub) \
- FN(sub_broadcast_float) \
- FN(sub_broadcast_float_relaxed) \
- FN(sub_relaxed) \
- FN(svdf) \
- FN(svdf2) \
- FN(svdf2_relaxed) \
- FN(svdf_relaxed) \
- FN(svdf_state) \
- FN(svdf_state_relaxed) \
- FN(tanh) \
- FN(tanh_relaxed) \
- FN(transpose) \
- FN(transpose_float_1) \
- FN(transpose_float_1_relaxed) \
- FN(transpose_quant8_1) \
- FN(transpose_relaxed)
+#define FOR_EACH_TEST_MODEL(FN) FN(random_multinomial)
#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
namespace function { \
diff --git a/neuralnetworks/1.2/vts/functional/ModelsV1_0.h b/neuralnetworks/1.2/vts/functional/ModelsV1_0.h
new file mode 100644
index 0000000..e81e64b
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/ModelsV1_0.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_V1_0_H
+#define VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_V1_0_H
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "TestHarness.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using MixedTypedExample = test_helper::MixedTypedExample;
+
+#define FOR_EACH_TEST_MODEL(FN) \
+ FN(add_broadcast_quant8) \
+ FN(add) \
+ FN(add_quant8) \
+ FN(avg_pool_float_1) \
+ FN(avg_pool_float_2) \
+ FN(avg_pool_float_3) \
+ FN(avg_pool_float_4) \
+ FN(avg_pool_float_5) \
+ FN(avg_pool_quant8_1) \
+ FN(avg_pool_quant8_2) \
+ FN(avg_pool_quant8_3) \
+ FN(avg_pool_quant8_4) \
+ FN(avg_pool_quant8_5) \
+ FN(concat_float_1) \
+ FN(concat_float_2) \
+ FN(concat_float_3) \
+ FN(concat_quant8_1) \
+ FN(concat_quant8_2) \
+ FN(concat_quant8_3) \
+ FN(conv_1_h3_w2_SAME) \
+ FN(conv_1_h3_w2_VALID) \
+ FN(conv_3_h3_w2_SAME) \
+ FN(conv_3_h3_w2_VALID) \
+ FN(conv_float_2) \
+ FN(conv_float_channels) \
+ FN(conv_float_channels_weights_as_inputs) \
+ FN(conv_float_large) \
+ FN(conv_float_large_weights_as_inputs) \
+ FN(conv_float) \
+ FN(conv_float_weights_as_inputs) \
+ FN(conv_quant8_2) \
+ FN(conv_quant8_channels) \
+ FN(conv_quant8_channels_weights_as_inputs) \
+ FN(conv_quant8_large) \
+ FN(conv_quant8_large_weights_as_inputs) \
+ FN(conv_quant8) \
+ FN(conv_quant8_overflow) \
+ FN(conv_quant8_overflow_weights_as_inputs) \
+ FN(conv_quant8_weights_as_inputs) \
+ FN(depth_to_space_float_1) \
+ FN(depth_to_space_float_2) \
+ FN(depth_to_space_float_3) \
+ FN(depth_to_space_quant8_1) \
+ FN(depth_to_space_quant8_2) \
+ FN(depthwise_conv2d_float_2) \
+ FN(depthwise_conv2d_float_large_2) \
+ FN(depthwise_conv2d_float_large_2_weights_as_inputs) \
+ FN(depthwise_conv2d_float_large) \
+ FN(depthwise_conv2d_float_large_weights_as_inputs) \
+ FN(depthwise_conv2d_float) \
+ FN(depthwise_conv2d_float_weights_as_inputs) \
+ FN(depthwise_conv2d_quant8_2) \
+ FN(depthwise_conv2d_quant8_large) \
+ FN(depthwise_conv2d_quant8_large_weights_as_inputs) \
+ FN(depthwise_conv2d_quant8) \
+ FN(depthwise_conv2d_quant8_weights_as_inputs) \
+ FN(depthwise_conv) \
+ FN(dequantize) \
+ FN(embedding_lookup) \
+ FN(floor) \
+ FN(fully_connected_float_2) \
+ FN(fully_connected_float_large) \
+ FN(fully_connected_float_large_weights_as_inputs) \
+ FN(fully_connected_float) \
+ FN(fully_connected_float_weights_as_inputs) \
+ FN(fully_connected_quant8_2) \
+ FN(fully_connected_quant8_large) \
+ FN(fully_connected_quant8_large_weights_as_inputs) \
+ FN(fully_connected_quant8) \
+ FN(fully_connected_quant8_weights_as_inputs) \
+ FN(hashtable_lookup_float) \
+ FN(hashtable_lookup_quant8) \
+ FN(l2_normalization_2) \
+ FN(l2_normalization_large) \
+ FN(l2_normalization) \
+ FN(l2_pool_float_2) \
+ FN(l2_pool_float_large) \
+ FN(l2_pool_float) \
+ FN(local_response_norm_float_1) \
+ FN(local_response_norm_float_2) \
+ FN(local_response_norm_float_3) \
+ FN(local_response_norm_float_4) \
+ FN(logistic_float_1) \
+ FN(logistic_float_2) \
+ FN(logistic_quant8_1) \
+ FN(logistic_quant8_2) \
+ FN(lsh_projection_2) \
+ FN(lsh_projection) \
+ FN(lsh_projection_weights_as_inputs) \
+ FN(lstm2) \
+ FN(lstm2_state2) \
+ FN(lstm2_state) \
+ FN(lstm3) \
+ FN(lstm3_state2) \
+ FN(lstm3_state3) \
+ FN(lstm3_state) \
+ FN(lstm) \
+ FN(lstm_state2) \
+ FN(lstm_state) \
+ FN(max_pool_float_1) \
+ FN(max_pool_float_2) \
+ FN(max_pool_float_3) \
+ FN(max_pool_float_4) \
+ FN(max_pool_quant8_1) \
+ FN(max_pool_quant8_2) \
+ FN(max_pool_quant8_3) \
+ FN(max_pool_quant8_4) \
+ FN(mobilenet_224_gender_basic_fixed) \
+ FN(mobilenet_quantized) \
+ FN(mul_broadcast_quant8) \
+ FN(mul) \
+ FN(mul_quant8) \
+ FN(mul_relu) \
+ FN(relu1_float_1) \
+ FN(relu1_float_2) \
+ FN(relu1_quant8_1) \
+ FN(relu1_quant8_2) \
+ FN(relu6_float_1) \
+ FN(relu6_float_2) \
+ FN(relu6_quant8_1) \
+ FN(relu6_quant8_2) \
+ FN(relu_float_1) \
+ FN(relu_float_2) \
+ FN(relu_quant8_1) \
+ FN(relu_quant8_2) \
+ FN(reshape) \
+ FN(reshape_quant8) \
+ FN(reshape_quant8_weights_as_inputs) \
+ FN(reshape_weights_as_inputs) \
+ FN(resize_bilinear_2) \
+ FN(resize_bilinear) \
+ FN(rnn) \
+ FN(rnn_state) \
+ FN(softmax_float_1) \
+ FN(softmax_float_2) \
+ FN(softmax_quant8_1) \
+ FN(softmax_quant8_2) \
+ FN(space_to_depth_float_1) \
+ FN(space_to_depth_float_2) \
+ FN(space_to_depth_float_3) \
+ FN(space_to_depth_quant8_1) \
+ FN(space_to_depth_quant8_2) \
+ FN(svdf2) \
+ FN(svdf) \
+ FN(svdf_state) \
+ FN(tanh)
+
+#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
+ namespace function { \
+ extern std::vector<MixedTypedExample> examples; \
+ Model createTestModel(); \
+ }
+
+FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
+
+#undef FORWARD_DECLARE_GENERATED_OBJECTS
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_V1_0_H
diff --git a/neuralnetworks/1.2/vts/functional/ModelsV1_1.h b/neuralnetworks/1.2/vts/functional/ModelsV1_1.h
new file mode 100644
index 0000000..eb68de4
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/ModelsV1_1.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
+#define VTS_HAL_NEURALNETWORKS_V1_1_VTS_FUNCTIONAL_MODELS_H
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "TestHarness.h"
+
+#include <android/hardware/neuralnetworks/1.2/types.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using MixedTypedExample = test_helper::MixedTypedExample;
+
+#define FOR_EACH_TEST_MODEL(FN) \
+ FN(add_relaxed) \
+ FN(avg_pool_float_1_relaxed) \
+ FN(avg_pool_float_2_relaxed) \
+ FN(avg_pool_float_3_relaxed) \
+ FN(avg_pool_float_4_relaxed) \
+ FN(avg_pool_float_5_relaxed) \
+ FN(batch_to_space) \
+ FN(batch_to_space_float_1) \
+ FN(batch_to_space_float_1_relaxed) \
+ FN(batch_to_space_quant8_1) \
+ FN(batch_to_space_relaxed) \
+ FN(concat_float_1_relaxed) \
+ FN(concat_float_2_relaxed) \
+ FN(concat_float_3_relaxed) \
+ FN(conv_1_h3_w2_SAME_relaxed) \
+ FN(conv_1_h3_w2_VALID_relaxed) \
+ FN(conv_3_h3_w2_SAME_relaxed) \
+ FN(conv_3_h3_w2_VALID_relaxed) \
+ FN(conv_float_2_relaxed) \
+ FN(conv_float_channels_relaxed) \
+ FN(conv_float_channels_weights_as_inputs_relaxed) \
+ FN(conv_float_large_relaxed) \
+ FN(conv_float_large_weights_as_inputs_relaxed) \
+ FN(conv_float_relaxed) \
+ FN(conv_float_weights_as_inputs_relaxed) \
+ FN(depth_to_space_float_1_relaxed) \
+ FN(depth_to_space_float_2_relaxed) \
+ FN(depth_to_space_float_3_relaxed) \
+ FN(depthwise_conv2d_float_2_relaxed) \
+ FN(depthwise_conv2d_float_large_2_relaxed) \
+ FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \
+ FN(depthwise_conv2d_float_large_relaxed) \
+ FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \
+ FN(depthwise_conv2d_float_relaxed) \
+ FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \
+ FN(depthwise_conv_relaxed) \
+ FN(dequantize_relaxed) \
+ FN(div) \
+ FN(div_broadcast_float) \
+ FN(div_broadcast_float_relaxed) \
+ FN(div_relaxed) \
+ FN(embedding_lookup_relaxed) \
+ FN(floor_relaxed) \
+ FN(fully_connected_float_2_relaxed) \
+ FN(fully_connected_float_4d_simple) \
+ FN(fully_connected_float_4d_simple_relaxed) \
+ FN(fully_connected_float_large_relaxed) \
+ FN(fully_connected_float_large_weights_as_inputs_relaxed) \
+ FN(fully_connected_float_relaxed) \
+ FN(fully_connected_float_weights_as_inputs_relaxed) \
+ FN(hashtable_lookup_float_relaxed) \
+ FN(l2_normalization_2_relaxed) \
+ FN(l2_normalization_large_relaxed) \
+ FN(l2_normalization_relaxed) \
+ FN(l2_pool_float_2_relaxed) \
+ FN(l2_pool_float_large_relaxed) \
+ FN(l2_pool_float_relaxed) \
+ FN(local_response_norm_float_1_relaxed) \
+ FN(local_response_norm_float_2_relaxed) \
+ FN(local_response_norm_float_3_relaxed) \
+ FN(local_response_norm_float_4_relaxed) \
+ FN(logistic_float_1_relaxed) \
+ FN(logistic_float_2_relaxed) \
+ FN(lsh_projection_2_relaxed) \
+ FN(lsh_projection_relaxed) \
+ FN(lsh_projection_weights_as_inputs_relaxed) \
+ FN(lstm2_relaxed) \
+ FN(lstm2_state2_relaxed) \
+ FN(lstm2_state_relaxed) \
+ FN(lstm3_relaxed) \
+ FN(lstm3_state2_relaxed) \
+ FN(lstm3_state3_relaxed) \
+ FN(lstm3_state_relaxed) \
+ FN(lstm_relaxed) \
+ FN(lstm_state2_relaxed) \
+ FN(lstm_state_relaxed) \
+ FN(max_pool_float_1_relaxed) \
+ FN(max_pool_float_2_relaxed) \
+ FN(max_pool_float_3_relaxed) \
+ FN(max_pool_float_4_relaxed) \
+ FN(mean) \
+ FN(mean_float_1) \
+ FN(mean_float_1_relaxed) \
+ FN(mean_float_2) \
+ FN(mean_float_2_relaxed) \
+ FN(mean_quant8_1) \
+ FN(mean_quant8_2) \
+ FN(mean_relaxed) \
+ FN(mobilenet_224_gender_basic_fixed_relaxed) \
+ FN(mul_relaxed) \
+ FN(mul_relu_relaxed) \
+ FN(pad) \
+ FN(pad_float_1) \
+ FN(pad_float_1_relaxed) \
+ FN(pad_relaxed) \
+ FN(relu1_float_1_relaxed) \
+ FN(relu1_float_2_relaxed) \
+ FN(relu6_float_1_relaxed) \
+ FN(relu6_float_2_relaxed) \
+ FN(relu_float_1_relaxed) \
+ FN(relu_float_2_relaxed) \
+ FN(reshape_relaxed) \
+ FN(reshape_weights_as_inputs_relaxed) \
+ FN(resize_bilinear_2_relaxed) \
+ FN(resize_bilinear_relaxed) \
+ FN(rnn_relaxed) \
+ FN(rnn_state_relaxed) \
+ FN(softmax_float_1_relaxed) \
+ FN(softmax_float_2_relaxed) \
+ FN(space_to_batch) \
+ FN(space_to_batch_float_1) \
+ FN(space_to_batch_float_1_relaxed) \
+ FN(space_to_batch_float_2) \
+ FN(space_to_batch_float_2_relaxed) \
+ FN(space_to_batch_float_3) \
+ FN(space_to_batch_float_3_relaxed) \
+ FN(space_to_batch_quant8_1) \
+ FN(space_to_batch_quant8_2) \
+ FN(space_to_batch_quant8_3) \
+ FN(space_to_batch_relaxed) \
+ FN(space_to_depth_float_1_relaxed) \
+ FN(space_to_depth_float_2_relaxed) \
+ FN(space_to_depth_float_3_relaxed) \
+ FN(squeeze) \
+ FN(squeeze_float_1) \
+ FN(squeeze_float_1_relaxed) \
+ FN(squeeze_quant8_1) \
+ FN(squeeze_relaxed) \
+ FN(strided_slice) \
+ FN(strided_slice_float_1) \
+ FN(strided_slice_float_10) \
+ FN(strided_slice_float_10_relaxed) \
+ FN(strided_slice_float_11) \
+ FN(strided_slice_float_11_relaxed) \
+ FN(strided_slice_float_1_relaxed) \
+ FN(strided_slice_float_2) \
+ FN(strided_slice_float_2_relaxed) \
+ FN(strided_slice_float_3) \
+ FN(strided_slice_float_3_relaxed) \
+ FN(strided_slice_float_4) \
+ FN(strided_slice_float_4_relaxed) \
+ FN(strided_slice_float_5) \
+ FN(strided_slice_float_5_relaxed) \
+ FN(strided_slice_float_6) \
+ FN(strided_slice_float_6_relaxed) \
+ FN(strided_slice_float_7) \
+ FN(strided_slice_float_7_relaxed) \
+ FN(strided_slice_float_8) \
+ FN(strided_slice_float_8_relaxed) \
+ FN(strided_slice_float_9) \
+ FN(strided_slice_float_9_relaxed) \
+ FN(strided_slice_qaunt8_10) \
+ FN(strided_slice_qaunt8_11) \
+ FN(strided_slice_quant8_1) \
+ FN(strided_slice_quant8_2) \
+ FN(strided_slice_quant8_3) \
+ FN(strided_slice_quant8_4) \
+ FN(strided_slice_quant8_5) \
+ FN(strided_slice_quant8_6) \
+ FN(strided_slice_quant8_7) \
+ FN(strided_slice_quant8_8) \
+ FN(strided_slice_quant8_9) \
+ FN(strided_slice_relaxed) \
+ FN(sub) \
+ FN(sub_broadcast_float) \
+ FN(sub_broadcast_float_relaxed) \
+ FN(sub_relaxed) \
+ FN(svdf2_relaxed) \
+ FN(svdf_relaxed) \
+ FN(svdf_state_relaxed) \
+ FN(tanh_relaxed) \
+ FN(transpose) \
+ FN(transpose_float_1) \
+ FN(transpose_float_1_relaxed) \
+ FN(transpose_quant8_1) \
+ FN(transpose_relaxed)
+
+#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \
+ namespace function { \
+ extern std::vector<MixedTypedExample> examples; \
+ Model createTestModel(); \
+ }
+
+FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS)
+
+#undef FORWARD_DECLARE_GENERATED_OBJECTS
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
+
+#endif // VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_V1_1_H
diff --git a/neuralnetworks/1.2/vts/functional/ValidationTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/ValidationTestsV1_0.cpp
new file mode 100644
index 0000000..c54ed43
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/ValidationTestsV1_0.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "ModelsV1_0.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+// forward declarations
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// generate validation tests
+#define VTS_CURRENT_TEST_CASE(TestName) \
+ TEST_F(ValidationTest, TestName) { \
+ const Model model = TestName::createTestModel(); \
+ const std::vector<Request> requests = createRequests(TestName::examples); \
+ validateModel(model); \
+ validateRequests(model, requests); \
+ }
+
+FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
+
+#undef VTS_CURRENT_TEST_CASE
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/ValidationTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/ValidationTestsV1_1.cpp
new file mode 100644
index 0000000..95932d5
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/ValidationTestsV1_1.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "ModelsV1_1.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+// forward declarations
+std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples);
+
+// generate validation tests
+#define VTS_CURRENT_TEST_CASE(TestName) \
+ TEST_F(ValidationTest, TestName) { \
+ const Model model = TestName::createTestModel(); \
+ const std::vector<Request> requests = createRequests(TestName::examples); \
+ validateModel(model); \
+ validateRequests(model, requests); \
+ }
+
+FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE)
+
+#undef VTS_CURRENT_TEST_CASE
+
+} // namespace functional
+} // namespace vts
+} // namespace V1_2
+} // namespace neuralnetworks
+} // namespace hardware
+} // namespace android