Consolidate NNAPI VTS utility code

This CL does additional NNAPI VTS test cleanup, including consolidating
duplicate functionality. Specifically, this CL:
* consolidates the createPreparedModel function, removing the duplicate
* consolidates the std::out ErrorStatus and DeviceStatus code into Utils
* changes non-null constant pointers to constant references
* removes redudant leading namespace specifiers (V1_0::, ::testing, etc.)
* makes the Valdiation tests free functions
* renames device to kDevice and mTestModel to kTestModel

Bug: N/A
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest (with sample-all)
Change-Id: Ic401bb1f1760cc10384ac0d30c0c93409b63a9c7
Merged-In: Ic401bb1f1760cc10384ac0d30c0c93409b63a9c7
(cherry picked from commit e16af0a44ba9b76667a598b10a085f808efb8c7c)
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
index 5727ca4..551ea67 100644
--- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
@@ -25,7 +25,7 @@
 
 // status test
 TEST_F(NeuralnetworksHidlTest, StatusTest) {
-    Return<DeviceStatus> status = device->getStatus();
+    Return<DeviceStatus> status = kDevice->getStatus();
     ASSERT_TRUE(status.isOk());
     EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
 }
@@ -33,7 +33,7 @@
 // initialization
 TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     Return<void> ret =
-            device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
+            kDevice->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
                 EXPECT_EQ(ErrorStatus::NONE, status);
                 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
                 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 33a6fa5..1948c05 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -122,9 +122,15 @@
 
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
-void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
+void Execute(const sp<IDevice>& device, const TestModel& testModel) {
+    const Model model = createModel(testModel);
     const Request request = createRequest(testModel);
 
+    // Create IPreparedModel.
+    sp<IPreparedModel> preparedModel;
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
+
     // Launch execution.
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
     Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
@@ -143,53 +149,10 @@
 }
 
 // Tag for the generated tests
-class GeneratedTest : public GeneratedTestBase {
-  protected:
-    void Execute(const TestModel& testModel) {
-        Model model = createModel(testModel);
-
-        // see if service can handle model
-        bool fullySupportsModel = false;
-        Return<void> supportedCall = device->getSupportedOperations(
-                model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-                    ASSERT_EQ(ErrorStatus::NONE, status);
-                    ASSERT_NE(0ul, supported.size());
-                    fullySupportsModel = std::all_of(supported.begin(), supported.end(),
-                                                     [](bool valid) { return valid; });
-                });
-        ASSERT_TRUE(supportedCall.isOk());
-
-        // launch prepare model
-        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-        Return<ErrorStatus> prepareLaunchStatus =
-                device->prepareModel(model, preparedModelCallback);
-        ASSERT_TRUE(prepareLaunchStatus.isOk());
-        ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-        // retrieve prepared model
-        preparedModelCallback->wait();
-        ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-        sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
-
-        // early termination if vendor service cannot fully prepare model
-        if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-            ASSERT_EQ(nullptr, preparedModel.get());
-            LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
-                         "prepare model that it does not support.";
-            std::cout << "[          ]   Early termination of test because vendor service cannot "
-                         "prepare model that it does not support."
-                      << std::endl;
-            GTEST_SKIP();
-        }
-        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-        ASSERT_NE(nullptr, preparedModel.get());
-
-        EvaluatePreparedModel(preparedModel, testModel);
-    }
-};
+class GeneratedTest : public GeneratedTestBase {};
 
 TEST_P(GeneratedTest, Test) {
-    Execute(*mTestModel);
+    Execute(kDevice, kTestModel);
 }
 
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
index a42f271..10e46b7 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -25,32 +25,20 @@
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
-      public ::testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+      public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
   protected:
-    void SetUp() override {
-        NeuralnetworksHidlTest::SetUp();
-        ASSERT_NE(mTestModel, nullptr);
-    }
-
-    const test_helper::TestModel* mTestModel = GetParam().second;
+    const test_helper::TestModel& kTestModel = *GetParam().second;
 };
 
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                          \
-    INSTANTIATE_TEST_SUITE_P(                                                                  \
-            TestGenerated, TestSuite,                                                          \
-            ::testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                        \
+    INSTANTIATE_TEST_SUITE_P(                                                                \
+            TestGenerated, TestSuite,                                                        \
+            testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
             [](const auto& info) { return info.param.first; })
 
 // Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
 // TODO: Clean up the hierarchy for ValidationTest.
-class ValidationTest : public GeneratedTestBase {
-  protected:
-    void validateEverything(const Model& model, const Request& request);
-
-  private:
-    void validateModel(const Model& model);
-    void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
-};
+class ValidationTest : public GeneratedTestBase {};
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 5de99fd..307003c 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -26,6 +26,7 @@
 #include <hidlmemory/mapping.h>
 
 #include <algorithm>
+#include <iostream>
 #include <vector>
 
 namespace android::hardware::neuralnetworks {
@@ -117,3 +118,15 @@
 }
 
 }  // namespace android::hardware::neuralnetworks
+
+namespace android::hardware::neuralnetworks::V1_0 {
+
+::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
+    return os << toString(errorStatus);
+}
+
+::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
+    return os << toString(deviceStatus);
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_0
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index 9854395..cc15263 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -27,7 +27,7 @@
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
 static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
-                                           const V1_0::Model& model) {
+                                           const Model& model) {
     SCOPED_TRACE(message + " [getSupportedOperations]");
 
     Return<void> ret =
@@ -38,7 +38,7 @@
 }
 
 static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
-                                 const V1_0::Model& model) {
+                                 const Model& model) {
     SCOPED_TRACE(message + " [prepareModel]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
@@ -57,7 +57,7 @@
 // mutation to it to invalidate the model, then pass it to interface calls that
 // use the model. Note that the model here is passed by value, and any mutation
 // to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, V1_0::Model model,
+static void validate(const sp<IDevice>& device, const std::string& message, Model model,
                      const std::function<void(Model*)>& mutation) {
     mutation(&model);
     validateGetSupportedOperations(device, message, model);
@@ -113,7 +113,7 @@
         static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,      // upper bound OEM
 };
 
-static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         for (int32_t invalidOperandType : invalidOperandTypes) {
             const std::string message = "mutateOperandTypeTest: operand " +
@@ -143,7 +143,7 @@
     }
 }
 
-static void mutateOperandRankTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
         const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
@@ -172,7 +172,7 @@
     }
 }
 
-static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const float invalidScale = getInvalidScale(model.operands[operand].type);
         const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
@@ -200,7 +200,7 @@
     }
 }
 
-static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::vector<int32_t> invalidZeroPoints =
                 getInvalidZeroPoints(model.operands[operand].type);
@@ -257,7 +257,7 @@
     *operand = newOperand;
 }
 
-static bool mutateOperationOperandTypeSkip(size_t operand, const V1_0::Model& model) {
+static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
     // LSH_PROJECTION's second argument is allowed to have any type. This is the
     // only operation that currently has a type that can be anything independent
     // from any other type. Changing the operand type to any other type will
@@ -271,7 +271,7 @@
     return false;
 }
 
-static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         if (mutateOperationOperandTypeSkip(operand, model)) {
             continue;
@@ -302,7 +302,7 @@
         static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
 };
 
-static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (int32_t invalidOperationType : invalidOperationTypes) {
             const std::string message = "mutateOperationTypeTest: operation " +
@@ -318,8 +318,7 @@
 
 ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
 
-static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
-                                                 const V1_0::Model& model) {
+static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const uint32_t invalidOperand = model.operands.size();
         for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
@@ -335,8 +334,7 @@
 
 ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
 
-static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
-                                                  const V1_0::Model& model) {
+static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const uint32_t invalidOperand = model.operands.size();
         for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
@@ -374,7 +372,7 @@
     removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
 }
 
-static void removeOperandTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::string message = "removeOperandTest: operand " + std::to_string(operand);
         validate(device, message, model,
@@ -391,7 +389,7 @@
     hidl_vec_removeAt(&model->operations, index);
 }
 
-static void removeOperationTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message = "removeOperationTest: operation " + std::to_string(operation);
         validate(device, message, model,
@@ -401,14 +399,14 @@
 
 ///////////////////////// REMOVE OPERATION INPUT /////////////////////////
 
-static void removeOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
-            const V1_0::Operation& op = model.operations[operation];
+            const Operation& op = model.operations[operation];
             // CONCATENATION has at least 2 inputs, with the last element being
             // INT32. Skip this test if removing one of CONCATENATION's
             // inputs still produces a valid model.
-            if (op.type == V1_0::OperationType::CONCATENATION && op.inputs.size() > 2 &&
+            if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 &&
                 input != op.inputs.size() - 1) {
                 continue;
             }
@@ -426,7 +424,7 @@
 
 ///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
 
-static void removeOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
             const std::string message = "removeOperationOutputTest: operation " +
@@ -447,7 +445,7 @@
 
 ///////////////////////// ADD OPERATION INPUT /////////////////////////
 
-static void addOperationInputTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
         validate(device, message, model, [operation](Model* model) {
@@ -460,7 +458,7 @@
 
 ///////////////////////// ADD OPERATION OUTPUT /////////////////////////
 
-static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
+static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message =
                 "addOperationOutputTest: operation " + std::to_string(operation);
@@ -474,7 +472,7 @@
 
 ////////////////////////// ENTRY POINT //////////////////////////////
 
-void ValidationTest::validateModel(const V1_0::Model& model) {
+void validateModel(const sp<IDevice>& device, const Model& model) {
     mutateOperandTypeTest(device, model);
     mutateOperandRankTest(device, model);
     mutateOperandScaleTest(device, model);
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index d8f3e65..05eefd1 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -87,8 +87,7 @@
 
 ///////////////////////////// ENTRY POINT //////////////////////////////////
 
-void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
-                                     const Request& request) {
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
     removeInputTest(preparedModel, request);
     removeOutputTest(preparedModel, request);
 }
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index 9ee4e37..20b4565 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -28,30 +28,32 @@
 
 using implementation::PreparedModelCallback;
 
-static void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                                sp<IPreparedModel>* preparedModel) {
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<IPreparedModel>* preparedModel) {
     ASSERT_NE(nullptr, preparedModel);
+    *preparedModel = nullptr;
 
     // see if service can handle model
     bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
+    const Return<void> supportedCall = device->getSupportedOperations(
             model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                 ASSERT_EQ(ErrorStatus::NONE, status);
                 ASSERT_NE(0ul, supported.size());
                 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                  [](bool valid) { return valid; });
             });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+    ASSERT_TRUE(supportedCall.isOk());
 
     // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
+    const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    const Return<ErrorStatus> prepareLaunchStatus =
+            device->prepareModel(model, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
 
     // retrieve prepared model
     preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
     *preparedModel = preparedModelCallback->getPreparedModel();
 
     // The getSupportedOperations call returns a list of operations that are
@@ -63,12 +65,12 @@
     // can continue.
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
         ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
+        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
+                     "model that it does not support.";
+        std::cout << "[          ]   Early termination of test because vendor service cannot "
+                     "prepare model that it does not support."
                   << std::endl;
-        return;
+        GTEST_SKIP();
     }
     ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel->get());
@@ -77,7 +79,7 @@
 // A class for test environment setup
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
-    // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
+    // testing::AddGlobalTestEnvironment when the gtest is being torn down
     static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
     return instance;
 }
@@ -88,28 +90,29 @@
 
 // The main test class for NEURALNETWORK HIDL HAL.
 void NeuralnetworksHidlTest::SetUp() {
-    ::testing::VtsHalHidlTargetTestBase::SetUp();
+    testing::VtsHalHidlTargetTestBase::SetUp();
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
             NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
     const std::string sampleDriver = "sample-";
-    if (device == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
+    if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
         GTEST_SKIP();
     }
 #endif  // PRESUBMIT_NOT_VTS
 
-    ASSERT_NE(nullptr, device.get());
+    ASSERT_NE(nullptr, kDevice.get());
 }
 
-void NeuralnetworksHidlTest::TearDown() {
-    ::testing::VtsHalHidlTargetTestBase::TearDown();
-}
+// Forward declaration from ValidateModel.cpp
+void validateModel(const sp<IDevice>& device, const Model& model);
+// Forward declaration from ValidateRequest.cpp
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
 
-void ValidationTest::validateEverything(const Model& model, const Request& request) {
-    validateModel(model);
+void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
+    validateModel(device, model);
 
-    // create IPreparedModel
+    // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
     createPreparedModel(device, model, &preparedModel);
     if (preparedModel == nullptr) return;
@@ -118,33 +121,21 @@
 }
 
 TEST_P(ValidationTest, Test) {
-    const Model model = createModel(*mTestModel);
-    const Request request = createRequest(*mTestModel);
-    ASSERT_FALSE(mTestModel->expectFailure);
-    validateEverything(model, request);
+    const Model model = createModel(kTestModel);
+    const Request request = createRequest(kTestModel);
+    ASSERT_FALSE(kTestModel.expectFailure);
+    validateEverything(kDevice, model, request);
 }
 
 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
 
 }  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
-    return os << toString(errorStatus);
-}
-
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
-    return os << toString(deviceStatus);
-}
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
 
 int main(int argc, char** argv) {
-    ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
-    ::testing::InitGoogleTest(&argc, argv);
+    testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
+    testing::InitGoogleTest(&argc, argv);
     NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
 
     int status = RUN_ALL_TESTS();
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index fa9ad3b..48dc237 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -25,15 +25,11 @@
 
 #include <android-base/macros.h>
 #include <gtest/gtest.h>
-#include <iostream>
-#include <vector>
-
-#include "TestHarness.h"
 
 namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
 // A class for test environment setup
-class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
     NeuralnetworksHidlEnvironment() = default;
 
@@ -43,27 +39,23 @@
 };
 
 // The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
   public:
     NeuralnetworksHidlTest() = default;
     void SetUp() override;
-    void TearDown() override;
 
   protected:
-    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+    const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
             NeuralnetworksHidlEnvironment::getInstance());
 };
 
+// Create an IPreparedModel object. If the model cannot be prepared,
+// "preparedModel" will be nullptr instead.
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<IPreparedModel>* preparedModel);
+
 }  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-// pretty-print values for error messages
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 274cb58..1ce751c 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -20,6 +20,7 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <algorithm>
+#include <iosfwd>
 #include <vector>
 #include "TestHarness.h"
 
@@ -52,4 +53,12 @@
 
 }  // namespace android::hardware::neuralnetworks
 
+namespace android::hardware::neuralnetworks::V1_0 {
+
+// pretty-print values for error messages
+::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
+::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
+
+}  // namespace android::hardware::neuralnetworks::V1_0
+
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
index c239c51..2791e80 100644
--- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -28,7 +28,7 @@
 
 // status test
 TEST_F(NeuralnetworksHidlTest, StatusTest) {
-    Return<DeviceStatus> status = device->getStatus();
+    Return<DeviceStatus> status = kDevice->getStatus();
     ASSERT_TRUE(status.isOk());
     EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
 }
@@ -36,7 +36,7 @@
 // initialization
 TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     Return<void> ret =
-            device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
+            kDevice->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
                 EXPECT_EQ(ErrorStatus::NONE, status);
                 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
                 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index 6ed5bc1..fddfc2b 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -130,9 +130,15 @@
 
 // Top level driver for models and examples generated by test_generator.py
 // Test driver for those generated from ml/nn/runtime/test/spec
-void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel) {
+void Execute(const sp<IDevice>& device, const TestModel& testModel) {
+    const Model model = createModel(testModel);
     const Request request = createRequest(testModel);
 
+    // Create IPreparedModel.
+    sp<IPreparedModel> preparedModel;
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
+
     // Launch execution.
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
     Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(request, executionCallback);
@@ -151,53 +157,10 @@
 }
 
 // Tag for the generated tests
-class GeneratedTest : public GeneratedTestBase {
-  protected:
-    void Execute(const TestModel& testModel) {
-        Model model = createModel(testModel);
-
-        // see if service can handle model
-        bool fullySupportsModel = false;
-        Return<void> supportedCall = device->getSupportedOperations_1_1(
-                model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-                    ASSERT_EQ(ErrorStatus::NONE, status);
-                    ASSERT_NE(0ul, supported.size());
-                    fullySupportsModel = std::all_of(supported.begin(), supported.end(),
-                                                     [](bool valid) { return valid; });
-                });
-        ASSERT_TRUE(supportedCall.isOk());
-
-        // launch prepare model
-        sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-        Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
-                model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
-        ASSERT_TRUE(prepareLaunchStatus.isOk());
-        ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-        // retrieve prepared model
-        preparedModelCallback->wait();
-        ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-        sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
-
-        // early termination if vendor service cannot fully prepare model
-        if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-            ASSERT_EQ(nullptr, preparedModel.get());
-            LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
-                         "prepare model that it does not support.";
-            std::cout << "[          ]   Early termination of test because vendor service cannot "
-                         "prepare model that it does not support."
-                      << std::endl;
-            GTEST_SKIP();
-        }
-        EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-        ASSERT_NE(nullptr, preparedModel.get());
-
-        EvaluatePreparedModel(preparedModel, testModel);
-    }
-};
+class GeneratedTest : public GeneratedTestBase {};
 
 TEST_P(GeneratedTest, Test) {
-    Execute(*mTestModel);
+    Execute(kDevice, kTestModel);
 }
 
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
index 7cb9bdc..273d1ec 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
@@ -25,33 +25,20 @@
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
-      public ::testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+      public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
   protected:
-    void SetUp() override {
-        NeuralnetworksHidlTest::SetUp();
-        ASSERT_NE(mTestModel, nullptr);
-    }
-
-    const test_helper::TestModel* mTestModel = GetParam().second;
+    const test_helper::TestModel& kTestModel = *GetParam().second;
 };
 
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                          \
-    INSTANTIATE_TEST_SUITE_P(                                                                  \
-            TestGenerated, TestSuite,                                                          \
-            ::testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                        \
+    INSTANTIATE_TEST_SUITE_P(                                                                \
+            TestGenerated, TestSuite,                                                        \
+            testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
             [](const auto& info) { return info.param.first; })
 
 // Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
 // TODO: Clean up the hierarchy for ValidationTest.
-class ValidationTest : public GeneratedTestBase {
-  protected:
-    void validateEverything(const Model& model, const V1_0::Request& request);
-
-  private:
-    void validateModel(const Model& model);
-    void validateRequest(const sp<V1_0::IPreparedModel>& preparedModel,
-                         const V1_0::Request& request);
-};
+class ValidationTest : public GeneratedTestBase {};
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index e617219..0629a1e 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -33,7 +33,7 @@
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
 static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
-                                           const V1_1::Model& model) {
+                                           const Model& model) {
     SCOPED_TRACE(message + " [getSupportedOperations_1_1]");
 
     Return<void> ret = device->getSupportedOperations_1_1(
@@ -44,7 +44,7 @@
 }
 
 static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
-                                 const V1_1::Model& model, ExecutionPreference preference) {
+                                 const Model& model, ExecutionPreference preference) {
     SCOPED_TRACE(message + " [prepareModel_1_1]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
@@ -70,7 +70,7 @@
 // mutation to it to invalidate the model, then pass it to interface calls that
 // use the model. Note that the model here is passed by value, and any mutation
 // to the model does not leave this function.
-static void validate(const sp<IDevice>& device, const std::string& message, V1_1::Model model,
+static void validate(const sp<IDevice>& device, const std::string& message, Model model,
                      const std::function<void(Model*)>& mutation,
                      ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
     mutation(&model);
@@ -109,7 +109,7 @@
         static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,      // upper bound OEM
 };
 
-static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         for (int32_t invalidOperandType : invalidOperandTypes) {
             const std::string message = "mutateOperandTypeTest: operand " +
@@ -139,7 +139,7 @@
     }
 }
 
-static void mutateOperandRankTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
         const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
@@ -168,7 +168,7 @@
     }
 }
 
-static void mutateOperandScaleTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const float invalidScale = getInvalidScale(model.operands[operand].type);
         const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
@@ -196,7 +196,7 @@
     }
 }
 
-static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::vector<int32_t> invalidZeroPoints =
                 getInvalidZeroPoints(model.operands[operand].type);
@@ -253,7 +253,7 @@
     *operand = newOperand;
 }
 
-static bool mutateOperationOperandTypeSkip(size_t operand, const V1_1::Model& model) {
+static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) {
     // LSH_PROJECTION's second argument is allowed to have any type. This is the
     // only operation that currently has a type that can be anything independent
     // from any other type. Changing the operand type to any other type will
@@ -267,7 +267,7 @@
     return false;
 }
 
-static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         if (mutateOperationOperandTypeSkip(operand, model)) {
             continue;
@@ -298,7 +298,7 @@
         static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
 };
 
-static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (int32_t invalidOperationType : invalidOperationTypes) {
             const std::string message = "mutateOperationTypeTest: operation " +
@@ -314,8 +314,7 @@
 
 ///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
 
-static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device,
-                                                 const V1_1::Model& model) {
+static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const uint32_t invalidOperand = model.operands.size();
         for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
@@ -331,8 +330,7 @@
 
 ///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
 
-static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device,
-                                                  const V1_1::Model& model) {
+static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const uint32_t invalidOperand = model.operands.size();
         for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
@@ -370,7 +368,7 @@
     removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
 }
 
-static void removeOperandTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::string message = "removeOperandTest: operand " + std::to_string(operand);
         validate(device, message, model,
@@ -387,7 +385,7 @@
     hidl_vec_removeAt(&model->operations, index);
 }
 
-static void removeOperationTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message = "removeOperationTest: operation " + std::to_string(operation);
         validate(device, message, model,
@@ -397,14 +395,14 @@
 
 ///////////////////////// REMOVE OPERATION INPUT /////////////////////////
 
-static void removeOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
-            const V1_1::Operation& op = model.operations[operation];
+            const Operation& op = model.operations[operation];
             // CONCATENATION has at least 2 inputs, with the last element being
             // INT32. Skip this test if removing one of CONCATENATION's
             // inputs still produces a valid model.
-            if (op.type == V1_1::OperationType::CONCATENATION && op.inputs.size() > 2 &&
+            if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 &&
                 input != op.inputs.size() - 1) {
                 continue;
             }
@@ -422,7 +420,7 @@
 
 ///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
 
-static void removeOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
             const std::string message = "removeOperationOutputTest: operation " +
@@ -443,7 +441,7 @@
 
 ///////////////////////// ADD OPERATION INPUT /////////////////////////
 
-static void addOperationInputTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
         validate(device, message, model, [operation](Model* model) {
@@ -456,7 +454,7 @@
 
 ///////////////////////// ADD OPERATION OUTPUT /////////////////////////
 
-static void addOperationOutputTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message =
                 "addOperationOutputTest: operation " + std::to_string(operation);
@@ -475,7 +473,7 @@
         static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1,  // upper bound
 };
 
-static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const V1_1::Model& model) {
+static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
     for (int32_t preference : invalidExecutionPreferences) {
         const std::string message =
                 "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
@@ -487,7 +485,7 @@
 
 ////////////////////////// ENTRY POINT //////////////////////////////
 
-void ValidationTest::validateModel(const V1_1::Model& model) {
+void validateModel(const sp<IDevice>& device, const Model& model) {
     mutateOperandTypeTest(device, model);
     mutateOperandRankTest(device, model);
     mutateOperandScaleTest(device, model);
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index a4e4ade..9684eb2 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -71,8 +71,7 @@
 
 ///////////////////////////// ENTRY POINT //////////////////////////////////
 
-void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
-                                     const Request& request) {
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
     removeInputTest(preparedModel, request);
     removeOutputTest(preparedModel, request);
 }
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 2c1a839..d53d43e 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -31,31 +31,32 @@
 using V1_0::Request;
 using V1_0::implementation::PreparedModelCallback;
 
-static void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                                sp<IPreparedModel>* preparedModel) {
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<IPreparedModel>* preparedModel) {
     ASSERT_NE(nullptr, preparedModel);
+    *preparedModel = nullptr;
 
     // see if service can handle model
     bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
+    const Return<void> supportedCall = device->getSupportedOperations_1_1(
             model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                 ASSERT_EQ(ErrorStatus::NONE, status);
                 ASSERT_NE(0ul, supported.size());
                 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                  [](bool valid) { return valid; });
             });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+    ASSERT_TRUE(supportedCall.isOk());
 
     // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+    const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
             model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
 
     // retrieve prepared model
     preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
     *preparedModel = preparedModelCallback->getPreparedModel();
 
     // The getSupportedOperations_1_1 call returns a list of operations that are
@@ -67,12 +68,12 @@
     // can continue.
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
         ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
+        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
+                     "model that it does not support.";
+        std::cout << "[          ]   Early termination of test because vendor service cannot "
+                     "prepare model that it does not support."
                   << std::endl;
-        return;
+        GTEST_SKIP();
     }
     ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel->get());
@@ -81,7 +82,7 @@
 // A class for test environment setup
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
-    // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
+    // testing::AddGlobalTestEnvironment when the gtest is being torn down
     static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
     return instance;
 }
@@ -92,65 +93,52 @@
 
 // The main test class for NEURALNETWORK HIDL HAL.
 void NeuralnetworksHidlTest::SetUp() {
-    ::testing::VtsHalHidlTargetTestBase::SetUp();
+    testing::VtsHalHidlTargetTestBase::SetUp();
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
             NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
     const std::string sampleDriver = "sample-";
-    if (device == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
+    if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
         GTEST_SKIP();
     }
 #endif  // PRESUBMIT_NOT_VTS
 
-    ASSERT_NE(nullptr, device.get());
+    ASSERT_NE(nullptr, kDevice.get());
 }
 
-void NeuralnetworksHidlTest::TearDown() {
-    ::testing::VtsHalHidlTargetTestBase::TearDown();
-}
+// Forward declaration from ValidateModel.cpp
+void validateModel(const sp<IDevice>& device, const Model& model);
+// Forward declaration from ValidateRequest.cpp
+void validateRequest(const sp<V1_0::IPreparedModel>& preparedModel, const V1_0::Request& request);
 
-void ValidationTest::validateEverything(const Model& model, const Request& request) {
-    validateModel(model);
+void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
+    validateModel(device, model);
 
-    // create IPreparedModel
+    // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
 
     validateRequest(preparedModel, request);
 }
 
 TEST_P(ValidationTest, Test) {
-    const Model model = createModel(*mTestModel);
-    const Request request = createRequest(*mTestModel);
-    ASSERT_FALSE(mTestModel->expectFailure);
-    validateEverything(model, request);
+    const Model model = createModel(kTestModel);
+    const Request request = createRequest(kTestModel);
+    ASSERT_FALSE(kTestModel.expectFailure);
+    validateEverything(kDevice, model, request);
 }
 
 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
 
 }  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
-    return os << toString(errorStatus);
-}
-
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
-    return os << toString(deviceStatus);
-}
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
 
 int main(int argc, char** argv) {
-    ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
-    ::testing::InitGoogleTest(&argc, argv);
+    testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
+    testing::InitGoogleTest(&argc, argv);
     NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
 
     int status = RUN_ALL_TESTS();
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index 3d6f2ea..9d6194a 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -26,15 +26,11 @@
 
 #include <android-base/macros.h>
 #include <gtest/gtest.h>
-#include <iostream>
-#include <vector>
-
-#include "TestHarness.h"
 
 namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
 // A class for test environment setup
-class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
     NeuralnetworksHidlEnvironment() = default;
 
@@ -44,27 +40,23 @@
 };
 
 // The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
   public:
     NeuralnetworksHidlTest() = default;
     void SetUp() override;
-    void TearDown() override;
 
   protected:
-    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+    const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
             NeuralnetworksHidlEnvironment::getInstance());
 };
 
+// Create an IPreparedModel object. If the model cannot be prepared,
+// "preparedModel" will be nullptr instead.
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<V1_0::IPreparedModel>* preparedModel);
+
 }  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-// pretty-print values for error messages
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 86849d5..8f95b96 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -29,7 +29,7 @@
 
 // status test
 TEST_F(NeuralnetworksHidlTest, StatusTest) {
-    Return<DeviceStatus> status = device->getStatus();
+    Return<DeviceStatus> status = kDevice->getStatus();
     ASSERT_TRUE(status.isOk());
     EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
 }
@@ -37,8 +37,8 @@
 // initialization
 TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     using OperandPerformance = Capabilities::OperandPerformance;
-    Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
-                                                      const Capabilities& capabilities) {
+    Return<void> ret = kDevice->getCapabilities_1_2([](ErrorStatus status,
+                                                       const Capabilities& capabilities) {
         EXPECT_EQ(ErrorStatus::NONE, status);
 
         auto isPositive = [](const PerformanceInfo& perf) {
@@ -61,16 +61,17 @@
 
 // device version test
 TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
-    Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {
-        EXPECT_EQ(ErrorStatus::NONE, status);
-        EXPECT_LT(0, version.size());
-    });
+    Return<void> ret =
+            kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) {
+                EXPECT_EQ(ErrorStatus::NONE, status);
+                EXPECT_LT(0, version.size());
+            });
     EXPECT_TRUE(ret.isOk());
 }
 
 // device type test
 TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) {
-    Return<void> ret = device->getType([](ErrorStatus status, DeviceType type) {
+    Return<void> ret = kDevice->getType([](ErrorStatus status, DeviceType type) {
         EXPECT_EQ(ErrorStatus::NONE, status);
         EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
                     type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
@@ -80,7 +81,7 @@
 
 // device supported extensions test
 TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
-    Return<void> ret = device->getSupportedExtensions(
+    Return<void> ret = kDevice->getSupportedExtensions(
             [](ErrorStatus status, const hidl_vec<Extension>& extensions) {
                 EXPECT_EQ(ErrorStatus::NONE, status);
                 for (auto& extension : extensions) {
@@ -101,7 +102,7 @@
 
 // getNumberOfCacheFilesNeeded test
 TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
-    Return<void> ret = device->getNumberOfCacheFilesNeeded(
+    Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
             [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
                 EXPECT_EQ(ErrorStatus::NONE, status);
                 EXPECT_LE(numModelCache,
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 90872d4..bb46e06 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -223,7 +223,7 @@
 
     void SetUp() override {
         NeuralnetworksHidlTest::SetUp();
-        ASSERT_NE(device.get(), nullptr);
+        ASSERT_NE(kDevice.get(), nullptr);
 
         // Create cache directory. The cache directory and a temporary cache file is always created
         // to test the behavior of prepareModelFromCache, even when caching is not supported.
@@ -233,7 +233,7 @@
         mCacheDir = cacheDir;
         mCacheDir.push_back('/');
 
-        Return<void> ret = device->getNumberOfCacheFilesNeeded(
+        Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
                 [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
                     EXPECT_EQ(ErrorStatus::NONE, status);
                     mNumModelCache = numModelCache;
@@ -267,7 +267,7 @@
 
     void TearDown() override {
         // If the test passes, remove the tmp directory.  Otherwise, keep it for debugging purposes.
-        if (!::testing::Test::HasFailure()) {
+        if (!testing::Test::HasFailure()) {
             // Recursively remove the cache directory specified by mCacheDir.
             auto callback = [](const char* entry, const struct stat*, int, struct FTW*) {
                 return remove(entry);
@@ -300,7 +300,7 @@
     // See if the service can handle the model.
     bool isModelFullySupported(const Model& model) {
         bool fullySupportsModel = false;
-        Return<void> supportedCall = device->getSupportedOperations_1_2(
+        Return<void> supportedCall = kDevice->getSupportedOperations_1_2(
                 model,
                 [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
                     ASSERT_EQ(ErrorStatus::NONE, status);
@@ -321,8 +321,8 @@
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
         Return<ErrorStatus> prepareLaunchStatus =
-                device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
-                                         dataCache, cacheToken, preparedModelCallback);
+                kDevice->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER,
+                                          modelCache, dataCache, cacheToken, preparedModelCallback);
         ASSERT_TRUE(prepareLaunchStatus.isOk());
         ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
 
@@ -365,7 +365,7 @@
         // Launch prepare model from cache.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
-        Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
+        Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache(
                 modelCache, dataCache, cacheToken, preparedModelCallback);
         ASSERT_TRUE(prepareLaunchStatus.isOk());
         if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
@@ -405,7 +405,7 @@
 // A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
 // pass running with float32 models and the second pass running with quant8 models.
 class CompilationCachingTest : public CompilationCachingTestBase,
-                               public ::testing::WithParamInterface<OperandType> {
+                               public testing::WithParamInterface<OperandType> {
   protected:
     CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
 };
@@ -1193,13 +1193,13 @@
 }
 
 static const auto kOperandTypeChoices =
-        ::testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
+        testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
 
 INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
 
 class CompilationCachingSecurityTest
     : public CompilationCachingTestBase,
-      public ::testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
+      public testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
   protected:
     CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
 
@@ -1339,6 +1339,6 @@
 }
 
 INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
-                        ::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
+                        testing::Combine(kOperandTypeChoices, testing::Range(0U, 10U)));
 
 }  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index b8ca080..a2d3792 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -358,74 +358,31 @@
     }
 }
 
-void PrepareModel(const sp<IDevice>& device, const Model& model,
-                  sp<IPreparedModel>* preparedModel) {
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedCall = device->getSupportedOperations_1_2(
-            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-                ASSERT_EQ(ErrorStatus::NONE, status);
-                ASSERT_NE(0ul, supported.size());
-                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
-                                                 [](bool valid) { return valid; });
-            });
-    ASSERT_TRUE(supportedCall.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
-            model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
-            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
-    *preparedModel = IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
-
-    // early termination if vendor service cannot fully prepare model
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Early termination of test because vendor service cannot "
-                     "prepare model that it does not support."
-                  << std::endl;
-        return;
+void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+    Model model = createModel(testModel);
+    if (testDynamicOutputShape) {
+        makeOutputDimensionsUnspecified(&model);
     }
-    EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
+
+    sp<IPreparedModel> preparedModel;
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
+
+    EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
 }
 
 // Tag for the generated tests
-class GeneratedTest : public GeneratedTestBase {
-  protected:
-    void Execute(const TestModel& testModel, bool testDynamicOutputShape) {
-        Model model = createModel(testModel);
-        if (testDynamicOutputShape) {
-            makeOutputDimensionsUnspecified(&model);
-        }
-
-        sp<IPreparedModel> preparedModel = nullptr;
-        PrepareModel(device, model, &preparedModel);
-        if (preparedModel == nullptr) {
-            GTEST_SKIP();
-        }
-        EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
-    }
-};
+class GeneratedTest : public GeneratedTestBase {};
 
 // Tag for the dynamic output shape tests
 class DynamicOutputShapeTest : public GeneratedTest {};
 
 TEST_P(GeneratedTest, Test) {
-    Execute(*mTestModel, /*testDynamicOutputShape=*/false);
+    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
 }
 
 TEST_P(DynamicOutputShapeTest, Test) {
-    Execute(*mTestModel, /*testDynamicOutputShape=*/true);
+    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
 }
 
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
index cb01b91..0b8b917 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -29,36 +29,20 @@
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
-      public ::testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+      public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
   protected:
-    void SetUp() override {
-        NeuralnetworksHidlTest::SetUp();
-        ASSERT_NE(mTestModel, nullptr);
-    }
-
-    const test_helper::TestModel* mTestModel = GetParam().second;
+    const test_helper::TestModel& kTestModel = *GetParam().second;
 };
 
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                          \
-    INSTANTIATE_TEST_SUITE_P(                                                                  \
-            TestGenerated, TestSuite,                                                          \
-            ::testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                        \
+    INSTANTIATE_TEST_SUITE_P(                                                                \
+            TestGenerated, TestSuite,                                                        \
+            testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
             [](const auto& info) { return info.param.first; })
 
 // Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
 // TODO: Clean up the hierarchy for ValidationTest.
-class ValidationTest : public GeneratedTestBase {
-  protected:
-    void validateEverything(const Model& model, const V1_0::Request& request);
-    void validateFailure(const Model& model, const V1_0::Request& request);
-
-  private:
-    void validateModel(const Model& model);
-    void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
-    void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
-                                const V1_0::Request& request);
-    void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
-};
+class ValidationTest : public GeneratedTestBase {};
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 844e879..c02d020 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -391,8 +391,7 @@
 
 ///////////////////////////// ENTRY POINT //////////////////////////////////
 
-void ValidationTest::validateBurst(const sp<IPreparedModel>& preparedModel,
-                                   const Request& request) {
+void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request) {
     ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request));
     ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request));
     ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request));
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index ea9aa4f..a14b86b 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -692,7 +692,7 @@
 
 ////////////////////////// ENTRY POINT //////////////////////////////
 
-void ValidationTest::validateModel(const Model& model) {
+void validateModel(const sp<IDevice>& device, const Model& model) {
     mutateOperandTypeTest(device, model);
     mutateOperandRankTest(device, model);
     mutateOperandScaleTest(device, model);
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 684b433..5c52de5 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -148,14 +148,12 @@
 
 ///////////////////////////// ENTRY POINT //////////////////////////////////
 
-void ValidationTest::validateRequest(const sp<IPreparedModel>& preparedModel,
-                                     const Request& request) {
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
     removeInputTest(preparedModel, request);
     removeOutputTest(preparedModel, request);
 }
 
-void ValidationTest::validateRequestFailure(const sp<IPreparedModel>& preparedModel,
-                                            const Request& request) {
+void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
     SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
     Return<void> executeStatus = preparedModel->executeSynchronously(
             request, MeasureTiming::NO,
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index ea9d684..aa4f1f2 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -33,24 +33,25 @@
 using V1_1::ExecutionPreference;
 
 // internal helper function
-static void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                                sp<IPreparedModel>* preparedModel) {
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<IPreparedModel>* preparedModel) {
     ASSERT_NE(nullptr, preparedModel);
+    *preparedModel = nullptr;
 
     // see if service can handle model
     bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_2(
+    const Return<void> supportedCall = device->getSupportedOperations_1_2(
             model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
                 ASSERT_EQ(ErrorStatus::NONE, status);
                 ASSERT_NE(0ul, supported.size());
                 fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                  [](bool valid) { return valid; });
             });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+    ASSERT_TRUE(supportedCall.isOk());
 
     // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
+    const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
             model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
             hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
@@ -58,7 +59,7 @@
 
     // retrieve prepared model
     preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
     *preparedModel = getPreparedModel_1_2(preparedModelCallback);
 
     // The getSupportedOperations_1_2 call returns a list of operations that are
@@ -70,12 +71,12 @@
     // can continue.
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
         ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
+        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
+                     "model that it does not support.";
+        std::cout << "[          ]   Early termination of test because vendor service cannot "
+                     "prepare model that it does not support."
                   << std::endl;
-        return;
+        GTEST_SKIP();
     }
     ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
     ASSERT_NE(nullptr, preparedModel->get());
@@ -84,7 +85,7 @@
 // A class for test environment setup
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
-    // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
+    // testing::AddGlobalTestEnvironment when the gtest is being torn down
     static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
     return instance;
 }
@@ -95,59 +96,61 @@
 
 // The main test class for NEURALNETWORK HIDL HAL.
 void NeuralnetworksHidlTest::SetUp() {
-    ::testing::VtsHalHidlTargetTestBase::SetUp();
+    testing::VtsHalHidlTargetTestBase::SetUp();
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
             NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
     const std::string sampleDriver = "sample-";
-    if (device == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
+    if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
         GTEST_SKIP();
     }
 #endif  // PRESUBMIT_NOT_VTS
 
-    ASSERT_NE(nullptr, device.get());
+    ASSERT_NE(nullptr, kDevice.get());
 }
 
-void NeuralnetworksHidlTest::TearDown() {
-    ::testing::VtsHalHidlTargetTestBase::TearDown();
-}
+// Forward declaration from ValidateModel.cpp
+void validateModel(const sp<IDevice>& device, const Model& model);
+// Forward declaration from ValidateRequest.cpp
+void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+// Forward declaration from ValidateRequest.cpp
+void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+// Forward declaration from ValidateBurst.cpp
+void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
 
-void ValidationTest::validateEverything(const Model& model, const Request& request) {
-    validateModel(model);
+void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
+    validateModel(device, model);
 
-    // create IPreparedModel
+    // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
 
     validateRequest(preparedModel, request);
     validateBurst(preparedModel, request);
 }
 
-void ValidationTest::validateFailure(const Model& model, const Request& request) {
+void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request) {
     // TODO: Should this always succeed?
     //       What if the invalid input is part of the model (i.e., a parameter).
-    validateModel(model);
+    validateModel(device, model);
 
+    // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
 
     validateRequestFailure(preparedModel, request);
 }
 
 TEST_P(ValidationTest, Test) {
-    const Model model = createModel(*mTestModel);
-    const Request request = createRequest(*mTestModel);
-    if (mTestModel->expectFailure) {
-        validateFailure(model, request);
+    const Model model = createModel(kTestModel);
+    const Request request = createRequest(kTestModel);
+    if (kTestModel.expectFailure) {
+        validateFailure(kDevice, model, request);
     } else {
-        validateEverything(model, request);
+        validateEverything(kDevice, model, request);
     }
 }
 
@@ -160,23 +163,11 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
-    return os << toString(errorStatus);
-}
-
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) {
-    return os << toString(deviceStatus);
-}
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 using android::hardware::neuralnetworks::V1_2::vts::functional::NeuralnetworksHidlEnvironment;
 
 int main(int argc, char** argv) {
-    ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
-    ::testing::InitGoogleTest(&argc, argv);
+    testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
+    testing::InitGoogleTest(&argc, argv);
     NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
 
     int status = RUN_ALL_TESTS();
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 4a6d33b..9981696 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -26,16 +26,12 @@
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <gtest/gtest.h>
 
-#include <iostream>
-#include <vector>
-
 #include "1.2/Callbacks.h"
-#include "TestHarness.h"
 
 namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
 // A class for test environment setup
-class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
+class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
     NeuralnetworksHidlEnvironment() = default;
 
@@ -45,30 +41,26 @@
 };
 
 // The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
   public:
     NeuralnetworksHidlTest() = default;
     void SetUp() override;
-    void TearDown() override;
 
   protected:
-    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+    const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
             NeuralnetworksHidlEnvironment::getInstance());
 };
 
+// Create an IPreparedModel object. If the model cannot be prepared,
+// "preparedModel" will be nullptr instead.
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                         sp<IPreparedModel>* preparedModel);
+
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback);
 
 }  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
 
-namespace android::hardware::neuralnetworks::V1_0 {
-
-// pretty-print values for error messages
-::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
-::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus);
-
-}  // namespace android::hardware::neuralnetworks::V1_0
-
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H