Cleanup NNAPI VTS tests

This CL includes the following cleanups:
* namespace compression
* remove "using" from header files
* remove no-op code, default no-op constructors
* clang-formats the code

Bug: N/A
Test: mma
Test: VtsHalNeuralnetworksV1_*TargetTest
Change-Id: I023997d8686ca65223858eed3a0881f5444ed2d6
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
index 945c406..5727ca4 100644
--- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
@@ -18,12 +18,7 @@
 
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
 // create device test
 TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -38,19 +33,14 @@
 // initialization
 TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     Return<void> ret =
-        device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
-            EXPECT_EQ(ErrorStatus::NONE, status);
-            EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
-            EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
-        });
+            device->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
+                EXPECT_EQ(ErrorStatus::NONE, status);
+                EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
+                EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
+                EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
+                EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
+            });
     EXPECT_TRUE(ret.isOk());
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 5f96539..33a6fa5 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -33,23 +33,12 @@
 #include <gtest/gtest.h>
 #include <iostream>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
 using namespace test_helper;
-using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
-using ::android::hardware::neuralnetworks::V1_0::IDevice;
-using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_0::Model;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
-using ::android::hidl::memory::V1_0::IMemory;
+using hidl::memory::V1_0::IMemory;
+using implementation::ExecutionCallback;
+using implementation::PreparedModelCallback;
 
 Model createModel(const TestModel& testModel) {
     // Model operands.
@@ -206,9 +195,4 @@
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
                            [](const TestModel& testModel) { return !testModel.expectFailure; });
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
index f86e8b3..a42f271 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -21,12 +21,7 @@
 #include "TestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
@@ -59,11 +54,6 @@
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 5aa2751..5de99fd 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -28,15 +28,13 @@
 #include <algorithm>
 #include <vector>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
+namespace android::hardware::neuralnetworks {
 
 using namespace test_helper;
-using ::android::hardware::neuralnetworks::V1_0::DataLocation;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
-using ::android::hidl::memory::V1_0::IMemory;
+using hidl::memory::V1_0::IMemory;
+using V1_0::DataLocation;
+using V1_0::Request;
+using V1_0::RequestArgument;
 
 constexpr uint32_t kInputPoolIndex = 0;
 constexpr uint32_t kOutputPoolIndex = 1;
@@ -118,6 +116,4 @@
     return outputBuffers;
 }
 
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index 5845aab..9854395 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -20,15 +20,9 @@
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using implementation::PreparedModelCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -37,9 +31,9 @@
     SCOPED_TRACE(message + " [getSupportedOperations]");
 
     Return<void> ret =
-        device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
-            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
-        });
+            device->getSupportedOperations(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+                EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            });
     EXPECT_TRUE(ret.isOk());
 }
 
@@ -48,7 +42,6 @@
     SCOPED_TRACE(message + " [prepareModel]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -94,13 +87,13 @@
 static uint32_t addOperand(Model* model) {
     return hidl_vec_push_back(&model->operands,
                               {
-                                  .type = OperandType::INT32,
-                                  .dimensions = {},
-                                  .numberOfConsumers = 0,
-                                  .scale = 0.0f,
-                                  .zeroPoint = 0,
-                                  .lifetime = OperandLifeTime::MODEL_INPUT,
-                                  .location = {.poolIndex = 0, .offset = 0, .length = 0},
+                                      .type = OperandType::INT32,
+                                      .dimensions = {},
+                                      .numberOfConsumers = 0,
+                                      .scale = 0.0f,
+                                      .zeroPoint = 0,
+                                      .lifetime = OperandLifeTime::MODEL_INPUT,
+                                      .location = {.poolIndex = 0, .offset = 0, .length = 0},
                               });
 }
 
@@ -114,10 +107,10 @@
 ///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
 
 static const int32_t invalidOperandTypes[] = {
-    static_cast<int32_t>(OperandType::FLOAT32) - 1,              // lower bound fundamental
-    static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1,  // upper bound fundamental
-    static_cast<int32_t>(OperandType::OEM) - 1,                  // lower bound OEM
-    static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,      // upper bound OEM
+        static_cast<int32_t>(OperandType::FLOAT32) - 1,              // lower bound fundamental
+        static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1,  // upper bound fundamental
+        static_cast<int32_t>(OperandType::OEM) - 1,                  // lower bound OEM
+        static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1,      // upper bound OEM
 };
 
 static void mutateOperandTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
@@ -210,7 +203,7 @@
 static void mutateOperandZeroPointTest(const sp<IDevice>& device, const V1_0::Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::vector<int32_t> invalidZeroPoints =
-            getInvalidZeroPoints(model.operands[operand].type);
+                getInvalidZeroPoints(model.operands[operand].type);
         for (int32_t invalidZeroPoint : invalidZeroPoints) {
             const std::string message = "mutateOperandZeroPointTest: operand " +
                                         std::to_string(operand) + " has zero point of " +
@@ -242,18 +235,18 @@
             break;
         case OperandType::TENSOR_FLOAT32:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = 0.0f;
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_INT32:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
             break;
         case OperandType::OEM:
@@ -303,10 +296,10 @@
 ///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
 
 static const int32_t invalidOperationTypes[] = {
-    static_cast<int32_t>(OperationType::ADD) - 1,            // lower bound fundamental
-    static_cast<int32_t>(OperationType::TANH) + 1,           // upper bound fundamental
-    static_cast<int32_t>(OperationType::OEM_OPERATION) - 1,  // lower bound OEM
-    static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
+        static_cast<int32_t>(OperationType::ADD) - 1,            // lower bound fundamental
+        static_cast<int32_t>(OperationType::TANH) + 1,           // upper bound fundamental
+        static_cast<int32_t>(OperationType::OEM_OPERATION) - 1,  // lower bound OEM
+        static_cast<int32_t>(OperationType::OEM_OPERATION) + 1,  // upper bound OEM
 };
 
 static void mutateOperationTypeTest(const sp<IDevice>& device, const V1_0::Model& model) {
@@ -317,7 +310,7 @@
                                         std::to_string(invalidOperationType);
             validate(device, message, model, [operation, invalidOperationType](Model* model) {
                 model->operations[operation].type =
-                    static_cast<OperationType>(invalidOperationType);
+                        static_cast<OperationType>(invalidOperationType);
             });
         }
     }
@@ -470,7 +463,7 @@
 static void addOperationOutputTest(const sp<IDevice>& device, const V1_0::Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message =
-            "addOperationOutputTest: operation " + std::to_string(operation);
+                "addOperationOutputTest: operation " + std::to_string(operation);
         validate(device, message, model, [operation](Model* model) {
             uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
             hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -498,9 +491,4 @@
     addOperationOutputTest(device, model);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 730e054..d8f3e65 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -20,14 +20,9 @@
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
+using implementation::ExecutionCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -41,7 +36,6 @@
     SCOPED_TRACE(message + " [execute]");
 
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-    ASSERT_NE(nullptr, executionCallback.get());
     Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
     ASSERT_TRUE(executeLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
@@ -99,9 +93,4 @@
     removeOutputTest(preparedModel, request);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index a51f71f..9ee4e37 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -24,16 +24,11 @@
 
 #include <android-base/logging.h>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using implementation::PreparedModelCallback;
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
+static void createPreparedModel(const sp<IDevice>& device, const Model& model,
                                 sp<IPreparedModel>* preparedModel) {
     ASSERT_NE(nullptr, preparedModel);
 
@@ -50,7 +45,6 @@
 
     // launch prepare model
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
@@ -81,10 +75,6 @@
 }
 
 // A class for test environment setup
-NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
-
-NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
-
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
     // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
@@ -97,14 +87,8 @@
 }
 
 // The main test class for NEURALNETWORK HIDL HAL.
-NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
-
-NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
-
 void NeuralnetworksHidlTest::SetUp() {
     ::testing::VtsHalHidlTargetTestBase::SetUp();
-    device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
-            NeuralnetworksHidlEnvironment::getInstance());
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
@@ -119,7 +103,6 @@
 }
 
 void NeuralnetworksHidlTest::TearDown() {
-    device = nullptr;
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
@@ -128,10 +111,8 @@
 
     // create IPreparedModel
     sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
+    createPreparedModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) return;
 
     validateRequest(preparedModel, request);
 }
@@ -145,12 +126,7 @@
 
 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index 9638a0e..fa9ad3b 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -30,20 +30,14 @@
 
 #include "TestHarness.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_0 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_0::vts::functional {
 
 // A class for test environment setup
 class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
-    NeuralnetworksHidlEnvironment();
-    ~NeuralnetworksHidlEnvironment() override;
+    NeuralnetworksHidlEnvironment() = default;
 
-   public:
+  public:
     static NeuralnetworksHidlEnvironment* getInstance();
     void registerTestServices() override;
 };
@@ -52,22 +46,17 @@
 class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
-   public:
-    NeuralnetworksHidlTest();
-    ~NeuralnetworksHidlTest() override;
+  public:
+    NeuralnetworksHidlTest() = default;
     void SetUp() override;
     void TearDown() override;
 
-   protected:
-    sp<IDevice> device;
+  protected:
+    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+            NeuralnetworksHidlEnvironment::getInstance());
 };
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_0
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 2955b6e..274cb58 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -17,14 +17,13 @@
 #ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
 #define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
 
+#include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <algorithm>
 #include <vector>
 #include "TestHarness.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
+namespace android::hardware::neuralnetworks {
 
 // Create HIDL Request from the TestModel struct.
 V1_0::Request createRequest(const ::test_helper::TestModel& testModel);
@@ -37,23 +36,20 @@
 // resizing the hidl_vec to one less.
 template <typename Type>
 inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
-    if (vec) {
-        std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
-        vec->resize(vec->size() - 1);
-    }
+    CHECK(vec != nullptr);
+    std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
+    vec->resize(vec->size() - 1);
 }
 
 template <typename Type>
 inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
-    // assume vec is valid
+    CHECK(vec != nullptr);
     const uint32_t index = vec->size();
     vec->resize(index + 1);
     (*vec)[index] = value;
     return index;
 }
 
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_UTILS_H
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
index ed59a2d..c239c51 100644
--- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -18,12 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
+
+using V1_0::DeviceStatus;
+using V1_0::ErrorStatus;
 
 // create device test
 TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -38,21 +36,16 @@
 // initialization
 TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     Return<void> ret =
-        device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
-            EXPECT_EQ(ErrorStatus::NONE, status);
-            EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
-            EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
-            EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
-            EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
-        });
+            device->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
+                EXPECT_EQ(ErrorStatus::NONE, status);
+                EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
+                EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
+                EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
+                EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
+                EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
+                EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
+            });
     EXPECT_TRUE(ret.isOk());
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index d8d1a31..6ed5bc1 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -33,28 +33,19 @@
 #include "TestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
 using namespace test_helper;
-using ::android::hardware::neuralnetworks::V1_0::DataLocation;
-using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
-using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_0::Operand;
-using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
-using ::android::hardware::neuralnetworks::V1_0::OperandType;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
-using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
-using ::android::hardware::neuralnetworks::V1_1::IDevice;
-using ::android::hardware::neuralnetworks::V1_1::Model;
-using ::android::hidl::memory::V1_0::IMemory;
+using hidl::memory::V1_0::IMemory;
+using V1_0::DataLocation;
+using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
+using V1_0::Operand;
+using V1_0::OperandLifeTime;
+using V1_0::OperandType;
+using V1_0::Request;
+using V1_0::implementation::ExecutionCallback;
+using V1_0::implementation::PreparedModelCallback;
 
 Model createModel(const TestModel& testModel) {
     // Model operands.
@@ -212,9 +203,4 @@
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
                            [](const TestModel& testModel) { return !testModel.expectFailure; });
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
index 82fc551..7cb9bdc 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
@@ -21,12 +21,7 @@
 #include "TestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
@@ -50,20 +45,16 @@
 // TODO: Clean up the hierarchy for ValidationTest.
 class ValidationTest : public GeneratedTestBase {
   protected:
-    void validateEverything(const Model& model, const Request& request);
+    void validateEverything(const Model& model, const V1_0::Request& request);
 
   private:
     void validateModel(const Model& model);
-    void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
+    void validateRequest(const sp<V1_0::IPreparedModel>& preparedModel,
+                         const V1_0::Request& request);
 };
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index d20dcd0..e617219 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -21,18 +21,14 @@
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_0::Operand;
-using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
-using ::android::hardware::neuralnetworks::V1_0::OperandType;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
+using V1_0::Operand;
+using V1_0::OperandLifeTime;
+using V1_0::OperandType;
+using V1_0::implementation::PreparedModelCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -52,7 +48,6 @@
     SCOPED_TRACE(message + " [prepareModel_1_1]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus =
             device->prepareModel_1_1(model, preference, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
@@ -484,8 +479,9 @@
     for (int32_t preference : invalidExecutionPreferences) {
         const std::string message =
                 "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
-        validate(device, message, model, [](Model*) {},
-                 static_cast<ExecutionPreference>(preference));
+        validate(
+                device, message, model, [](Model*) {},
+                static_cast<ExecutionPreference>(preference));
     }
 }
 
@@ -509,9 +505,4 @@
     mutateExecutionPreferenceTest(device, model);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index e0710f1..a4e4ade 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -21,17 +21,12 @@
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_1::IPreparedModel;
+using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
+using V1_0::Request;
+using V1_0::implementation::ExecutionCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -45,7 +40,6 @@
     SCOPED_TRACE(message + " [execute]");
 
     sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-    ASSERT_NE(nullptr, executionCallback.get());
     Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
     ASSERT_TRUE(executeLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
@@ -83,9 +77,4 @@
     removeOutputTest(preparedModel, request);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 9a11b10..2c1a839 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -24,16 +24,14 @@
 
 #include <android-base/logging.h>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
+using V1_0::Request;
+using V1_0::implementation::PreparedModelCallback;
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
+static void createPreparedModel(const sp<IDevice>& device, const Model& model,
                                 sp<IPreparedModel>* preparedModel) {
     ASSERT_NE(nullptr, preparedModel);
 
@@ -50,7 +48,6 @@
 
     // launch prepare model
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
             model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
@@ -82,10 +79,6 @@
 }
 
 // A class for test environment setup
-NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
-
-NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
-
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
     // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
@@ -98,14 +91,8 @@
 }
 
 // The main test class for NEURALNETWORK HIDL HAL.
-NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
-
-NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
-
 void NeuralnetworksHidlTest::SetUp() {
     ::testing::VtsHalHidlTargetTestBase::SetUp();
-    device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
-            NeuralnetworksHidlEnvironment::getInstance());
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
@@ -120,7 +107,6 @@
 }
 
 void NeuralnetworksHidlTest::TearDown() {
-    device = nullptr;
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
@@ -146,12 +132,7 @@
 
 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index 8d44deb..3d6f2ea 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -31,28 +31,14 @@
 
 #include "TestHarness.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_1 {
-
-using V1_0::DeviceStatus;
-using V1_0::ErrorStatus;
-using V1_0::IPreparedModel;
-using V1_0::Operand;
-using V1_0::OperandType;
-using V1_0::Request;
-
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_1::vts::functional {
 
 // A class for test environment setup
 class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
-    NeuralnetworksHidlEnvironment();
-    ~NeuralnetworksHidlEnvironment() override;
+    NeuralnetworksHidlEnvironment() = default;
 
-   public:
+  public:
     static NeuralnetworksHidlEnvironment* getInstance();
     void registerTestServices() override;
 };
@@ -61,22 +47,17 @@
 class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
-   public:
-    NeuralnetworksHidlTest();
-    ~NeuralnetworksHidlTest() override;
+  public:
+    NeuralnetworksHidlTest() = default;
     void SetUp() override;
     void TearDown() override;
 
-   protected:
-    sp<IDevice> device;
+  protected:
+    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+            NeuralnetworksHidlEnvironment::getInstance());
 };
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_1
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 5c269df..86849d5 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -18,13 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
+using V1_0::DeviceStatus;
+using V1_0::ErrorStatus;
 using V1_0::PerformanceInfo;
 
 // create device test
@@ -113,9 +110,4 @@
             });
     EXPECT_TRUE(ret.isOk());
 }
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/Callbacks.cpp b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
index a607a08..3972ad6 100644
--- a/neuralnetworks/1.2/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
@@ -24,6 +24,8 @@
 
 namespace android::hardware::neuralnetworks::V1_2::implementation {
 
+using V1_0::ErrorStatus;
+
 constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
                               .timeInDriver = std::numeric_limits<uint64_t>::max()};
 
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index bde700e..90872d4 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -17,7 +17,6 @@
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
 #include <android-base/logging.h>
-#include <android/hidl/memory/1.0/IMemory.h>
 #include <ftw.h>
 #include <gtest/gtest.h>
 #include <hidlmemory/mapping.h>
@@ -45,20 +44,12 @@
 const ::test_helper::TestModel& get_test_model();
 }  // namespace generated_tests::mobilenet_quantized
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
 using namespace test_helper;
-using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
-using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
-using ::android::hidl::memory::V1_0::IMemory;
-using ::android::nn::allocateSharedMemory;
+using implementation::PreparedModelCallback;
+using V1_0::ErrorStatus;
+using V1_1::ExecutionPreference;
 
 namespace float32_model {
 
@@ -307,7 +298,7 @@
     }
 
     // See if the service can handle the model.
-    bool isModelFullySupported(const V1_2::Model& model) {
+    bool isModelFullySupported(const Model& model) {
         bool fullySupportsModel = false;
         Return<void> supportedCall = device->getSupportedOperations_1_2(
                 model,
@@ -321,14 +312,13 @@
         return fullySupportsModel;
     }
 
-    void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
+    void saveModelToCache(const Model& model, const hidl_vec<hidl_handle>& modelCache,
                           const hidl_vec<hidl_handle>& dataCache,
                           sp<IPreparedModel>* preparedModel = nullptr) {
         if (preparedModel != nullptr) *preparedModel = nullptr;
 
         // Launch prepare model.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-        ASSERT_NE(nullptr, preparedModelCallback.get());
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
         Return<ErrorStatus> prepareLaunchStatus =
                 device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
@@ -340,9 +330,8 @@
         preparedModelCallback->wait();
         ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
         if (preparedModel != nullptr) {
-            *preparedModel =
-                    V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
-                            .withDefault(nullptr);
+            *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+                                     .withDefault(nullptr);
         }
     }
 
@@ -358,7 +347,7 @@
         return false;
     }
 
-    bool checkEarlyTermination(const V1_2::Model& model) {
+    bool checkEarlyTermination(const Model& model) {
         if (!isModelFullySupported(model)) {
             LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                          "prepare model that it does not support.";
@@ -375,7 +364,6 @@
                                sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
         // Launch prepare model from cache.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-        ASSERT_NE(nullptr, preparedModelCallback.get());
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
         Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
                 modelCache, dataCache, cacheToken, preparedModelCallback);
@@ -389,7 +377,7 @@
         // Retrieve prepared model.
         preparedModelCallback->wait();
         *status = preparedModelCallback->getStatus();
-        *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+        *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
                                  .withDefault(nullptr);
     }
 
@@ -1353,9 +1341,4 @@
 INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
                         ::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 1d302e2..b8ca080 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -44,30 +44,17 @@
 #include "Utils.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
 using namespace test_helper;
-using ::android::hardware::neuralnetworks::V1_0::DataLocation;
-using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
-using ::android::hardware::neuralnetworks::V1_0::OperandLifeTime;
-using ::android::hardware::neuralnetworks::V1_0::Request;
-using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
-using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
-using ::android::hardware::neuralnetworks::V1_2::Constant;
-using ::android::hardware::neuralnetworks::V1_2::IDevice;
-using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
-using ::android::hardware::neuralnetworks::V1_2::MeasureTiming;
-using ::android::hardware::neuralnetworks::V1_2::Model;
-using ::android::hardware::neuralnetworks::V1_2::OutputShape;
-using ::android::hardware::neuralnetworks::V1_2::Timing;
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
-using ::android::hidl::memory::V1_0::IMemory;
+using hidl::memory::V1_0::IMemory;
+using implementation::ExecutionCallback;
+using implementation::PreparedModelCallback;
+using V1_0::DataLocation;
+using V1_0::ErrorStatus;
+using V1_0::OperandLifeTime;
+using V1_0::Request;
+using V1_1::ExecutionPreference;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
 enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
@@ -447,9 +434,4 @@
 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
                            [](const TestModel& testModel) { return !testModel.expectFailure; });
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
index 27208ce..cb01b91 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -25,12 +25,7 @@
 #include "TestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
 class GeneratedTestBase
     : public NeuralnetworksHidlTest,
@@ -54,29 +49,24 @@
 // TODO: Clean up the hierarchy for ValidationTest.
 class ValidationTest : public GeneratedTestBase {
   protected:
-    void validateEverything(const Model& model, const Request& request);
-    void validateFailure(const Model& model, const Request& request);
+    void validateEverything(const Model& model, const V1_0::Request& request);
+    void validateFailure(const Model& model, const V1_0::Request& request);
 
   private:
     void validateModel(const Model& model);
-    void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request);
-    void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request);
-    void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request);
+    void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+    void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
+                                const V1_0::Request& request);
+    void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
 };
 
 Model createModel(const ::test_helper::TestModel& testModel);
 
-void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
-                  sp<V1_2::IPreparedModel>* preparedModel);
+void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
 
-void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
                            const ::test_helper::TestModel& testModel, bool testDynamicOutputShape);
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index cb801eb..844e879 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -28,17 +28,14 @@
 #include <android-base/logging.h>
 #include <cstring>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
-using ::android::nn::ExecutionBurstController;
-using ::android::nn::RequestChannelSender;
-using ::android::nn::ResultChannelReceiver;
-using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
+using nn::ExecutionBurstController;
+using nn::RequestChannelSender;
+using nn::ResultChannelReceiver;
+using V1_0::ErrorStatus;
+using V1_0::Request;
+using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
 
 // This constant value represents the length of an FMQ that is large enough to
 // return a result from a burst execution for all of the generated test cases.
@@ -401,9 +398,4 @@
     ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request));
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index 7dfcff2..74840a6 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -21,19 +21,12 @@
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
+using implementation::PreparedModelCallback;
+using V1_0::ErrorStatus;
 using V1_0::OperandLifeTime;
 using V1_1::ExecutionPreference;
-
-namespace vts {
-namespace functional {
-
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -54,7 +47,6 @@
     SCOPED_TRACE(message + " [prepareModel_1_2]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus =
             device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
                                      hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
@@ -692,8 +684,9 @@
     for (int32_t preference : invalidExecutionPreferences) {
         const std::string message =
                 "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
-        validate(device, message, model, [](Model*) {},
-                 static_cast<ExecutionPreference>(preference));
+        validate(
+                device, message, model, [](Model*) {},
+                static_cast<ExecutionPreference>(preference));
     }
 }
 
@@ -717,9 +710,4 @@
     mutateExecutionPreferenceTest(device, model);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 20c740e..684b433 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -24,14 +24,11 @@
 #include "Utils.h"
 #include "VtsHalNeuralnetworks.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using implementation::ExecutionCallback;
+using V1_0::ErrorStatus;
+using V1_0::Request;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -62,7 +59,6 @@
         SCOPED_TRACE(message + " [execute_1_2]");
 
         sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-        ASSERT_NE(nullptr, executionCallback.get());
         Return<ErrorStatus> executeLaunchStatus =
                 preparedModel->execute_1_2(request, measure, executionCallback);
         ASSERT_TRUE(executeLaunchStatus.isOk());
@@ -171,9 +167,4 @@
     ASSERT_TRUE(executeStatus.isOk());
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index b87384e..ea9d684 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -24,15 +24,12 @@
 
 #include <android-base/logging.h>
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using implementation::PreparedModelCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using V1_0::ErrorStatus;
+using V1_0::Request;
 using V1_1::ExecutionPreference;
 
 // internal helper function
@@ -53,7 +50,6 @@
 
     // launch prepare model
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
             model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
             hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
@@ -86,10 +82,6 @@
 }
 
 // A class for test environment setup
-NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
-
-NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
-
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
     // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
@@ -102,14 +94,8 @@
 }
 
 // The main test class for NEURALNETWORK HIDL HAL.
-NeuralnetworksHidlTest::NeuralnetworksHidlTest() {}
-
-NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
-
 void NeuralnetworksHidlTest::SetUp() {
     ::testing::VtsHalHidlTargetTestBase::SetUp();
-    device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
-            NeuralnetworksHidlEnvironment::getInstance());
 
 #ifdef PRESUBMIT_NOT_VTS
     const std::string name =
@@ -124,7 +110,6 @@
 }
 
 void NeuralnetworksHidlTest::TearDown() {
-    device = nullptr;
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
@@ -168,18 +153,12 @@
 
 INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
 
-sp<IPreparedModel> getPreparedModel_1_2(
-    const sp<V1_2::implementation::PreparedModelCallback>& callback) {
+sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback) {
     sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
-    return V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+    return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
 }
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 8729a6f..4a6d33b 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -32,23 +32,12 @@
 #include "1.2/Callbacks.h"
 #include "TestHarness.h"
 
-namespace android {
-namespace hardware {
-namespace neuralnetworks {
-namespace V1_2 {
-
-using V1_0::DeviceStatus;
-using V1_0::ErrorStatus;
-using V1_0::Request;
-
-namespace vts {
-namespace functional {
+namespace android::hardware::neuralnetworks::V1_2::vts::functional {
 
 // A class for test environment setup
 class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
-    NeuralnetworksHidlEnvironment();
-    ~NeuralnetworksHidlEnvironment() override;
+    NeuralnetworksHidlEnvironment() = default;
 
   public:
     static NeuralnetworksHidlEnvironment* getInstance();
@@ -60,25 +49,19 @@
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
   public:
-    NeuralnetworksHidlTest();
-    ~NeuralnetworksHidlTest() override;
+    NeuralnetworksHidlTest() = default;
     void SetUp() override;
     void TearDown() override;
 
   protected:
-    sp<IDevice> device;
+    const sp<IDevice> device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
+            NeuralnetworksHidlEnvironment::getInstance());
 };
 
 // Utility function to get PreparedModel from callback and downcast to V1_2.
-sp<IPreparedModel> getPreparedModel_1_2(
-        const sp<V1_2::implementation::PreparedModelCallback>& callback);
+sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback);
 
-}  // namespace functional
-}  // namespace vts
-}  // namespace V1_2
-}  // namespace neuralnetworks
-}  // namespace hardware
-}  // namespace android
+}  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
 
 namespace android::hardware::neuralnetworks::V1_0 {
 
diff --git a/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
index 2992c0c..bf4792c 100644
--- a/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
+++ b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
@@ -46,8 +46,6 @@
 
 namespace android::hardware::neuralnetworks::V1_2::implementation {
 
-using V1_0::ErrorStatus;
-
 /**
  * The PreparedModelCallback class is used to receive the error status of
  * preparing a model as well as the prepared model from a task executing
@@ -87,7 +85,8 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify(V1_0::ErrorStatus status,
+                        const sp<V1_0::IPreparedModel>& preparedModel) override;
 
     /**
      * IPreparedModelCallback::notify_1_2 marks the callback object with the
@@ -112,7 +111,7 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    Return<void> notify_1_2(ErrorStatus status,
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
                             const sp<V1_2::IPreparedModel>& preparedModel) override;
 
     /**
@@ -134,7 +133,7 @@
      *     - GENERAL_FAILURE if there is an unspecified error
      *     - INVALID_ARGUMENT if the input model is invalid
      */
-    ErrorStatus getStatus() const;
+    V1_0::ErrorStatus getStatus() const;
 
     /**
      * Retrieves the model that has been prepared for execution from the
@@ -152,7 +151,7 @@
     mutable std::mutex mMutex;
     mutable std::condition_variable mCondition;
     bool mNotified GUARDED_BY(mMutex) = false;
-    ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     sp<V1_0::IPreparedModel> mPreparedModel;
 };
 
@@ -195,7 +194,7 @@
      *         enough to store the resultant values
      *     - INVALID_ARGUMENT if the input request is invalid
      */
-    Return<void> notify(ErrorStatus status) override;
+    Return<void> notify(V1_0::ErrorStatus status) override;
 
     /**
      * IExecutionCallback::notify_1_2 marks the callback object with the results
@@ -230,11 +229,11 @@
      *     reported as UINT64_MAX. A driver may choose to report any time as
      *     UINT64_MAX, indicating that particular measurement is not available.
      */
-    Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+    Return<void> notify_1_2(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
                             const Timing& timing) override;
 
     // An overload of the latest notify interface to hide the version from ExecutionBuilder.
-    Return<void> notify(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+    Return<void> notify(V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
                         const Timing& timing) {
         return notify_1_2(status, outputShapes, timing);
     }
@@ -264,7 +263,7 @@
      *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
      *         invalid
      */
-    ErrorStatus getStatus() const;
+    V1_0::ErrorStatus getStatus() const;
 
     /**
      * Retrieves the output shapes returned from the asynchronous task launched
@@ -309,14 +308,14 @@
      * object before any call to wait or get* return. It then enables all prior
      * and future wait calls on the ExecutionCallback object to proceed.
      */
-    void notifyInternal(ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes,
+    void notifyInternal(V1_0::ErrorStatus errorStatus, const hidl_vec<OutputShape>& outputShapes,
                         const Timing& timing);
 
     // members
     mutable std::mutex mMutex;
     mutable std::condition_variable mCondition;
     bool mNotified GUARDED_BY(mMutex) = false;
-    ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     std::vector<OutputShape> mOutputShapes = {};
     Timing mTiming = {};
 };