Merge "Update OWNERS for VtsHalGraphicsComposer tests."
diff --git a/automotive/sv/1.0/vts/functional/OWNERS b/automotive/sv/1.0/vts/functional/OWNERS
new file mode 100644
index 0000000..2ba00a3
--- /dev/null
+++ b/automotive/sv/1.0/vts/functional/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 821659
+tanmayp@google.com
+ankitarora@google.com
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 7fa586e..0c7e58e 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -396,6 +396,7 @@
     </hal>
     <hal format="aidl" optional="true">
         <name>android.hardware.neuralnetworks</name>
+        <version>1-2</version>
         <interface>
             <name>IDevice</name>
             <regex-instance>.*</regex-instance>
diff --git a/contexthub/1.0/vts/functional/OWNERS b/contexthub/1.0/vts/functional/OWNERS
index 1a33a9e..f254cd5 100644
--- a/contexthub/1.0/vts/functional/OWNERS
+++ b/contexthub/1.0/vts/functional/OWNERS
@@ -1,7 +1,5 @@
+# Bug component: 156070
 #Context Hub team
 arthuri@google.com
 bduddie@google.com
 stange@google.com
-
-#VTS team
-dshi@google.com
diff --git a/contexthub/1.1/vts/functional/OWNERS b/contexthub/1.1/vts/functional/OWNERS
index 1a33a9e..2cf5bca 100644
--- a/contexthub/1.1/vts/functional/OWNERS
+++ b/contexthub/1.1/vts/functional/OWNERS
@@ -1,7 +1,2 @@
-#Context Hub team
-arthuri@google.com
-bduddie@google.com
-stange@google.com
-
-#VTS team
-dshi@google.com
+# Bug component: 156070
+include ../../../1.0/vts/functional/OWNERS
diff --git a/contexthub/1.2/vts/functional/OWNERS b/contexthub/1.2/vts/functional/OWNERS
index 1a33a9e..2cf5bca 100644
--- a/contexthub/1.2/vts/functional/OWNERS
+++ b/contexthub/1.2/vts/functional/OWNERS
@@ -1,7 +1,2 @@
-#Context Hub team
-arthuri@google.com
-bduddie@google.com
-stange@google.com
-
-#VTS team
-dshi@google.com
+# Bug component: 156070
+include ../../../1.0/vts/functional/OWNERS
diff --git a/graphics/composer/2.4/IComposerClient.hal b/graphics/composer/2.4/IComposerClient.hal
index 1a59bbd..9e3cf0e 100644
--- a/graphics/composer/2.4/IComposerClient.hal
+++ b/graphics/composer/2.4/IComposerClient.hal
@@ -34,9 +34,7 @@
         /**
          * The configuration group ID (as int32_t) this config is associated to.
          * Switching between configurations within the same group may be done seamlessly
-         * in some conditions via setActiveConfigWithConstraints. Configurations which
-         * share the same config group are similar in all attributes except for the
-         * vsync period.
+         * in some conditions via setActiveConfigWithConstraints.
          */
         CONFIG_GROUP = 7,
     };
diff --git a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
index 5aceda7..b071f71 100644
--- a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
+++ b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
@@ -19,8 +19,6 @@
 #include <algorithm>
 #include <regex>
 #include <thread>
-#include <unordered_map>
-#include <utility>
 
 #include <android-base/logging.h>
 #include <android-base/properties.h>
@@ -317,59 +315,6 @@
     }
 }
 
-TEST_P(GraphicsComposerHidlTest, GetDisplayAttribute_2_4_ConfigsInAGroupDifferOnlyByVsyncPeriod) {
-    struct Resolution {
-        int32_t width, height;
-    };
-    struct Dpi {
-        int32_t x, y;
-    };
-    for (const auto& display : mDisplays) {
-        std::vector<Config> configs = mComposerClient->getDisplayConfigs(display.get());
-        std::unordered_map<int32_t, Resolution> configGroupToResolutionMap;
-        std::unordered_map<int32_t, Dpi> configGroupToDpiMap;
-        for (auto config : configs) {
-            const auto configGroup = mComposerClient->getDisplayAttribute_2_4(
-                    display.get(), config, IComposerClient::Attribute::CONFIG_GROUP);
-            const auto width = mComposerClient->getDisplayAttribute_2_4(
-                    display.get(), config, IComposerClient::Attribute::WIDTH);
-            const auto height = mComposerClient->getDisplayAttribute_2_4(
-                    display.get(), config, IComposerClient::Attribute::HEIGHT);
-            if (configGroupToResolutionMap.find(configGroup) == configGroupToResolutionMap.end()) {
-                configGroupToResolutionMap[configGroup] = {width, height};
-            }
-            EXPECT_EQ(configGroupToResolutionMap[configGroup].width, width);
-            EXPECT_EQ(configGroupToResolutionMap[configGroup].height, height);
-
-            int32_t dpiX = -1;
-            mComposerClient->getRaw()->getDisplayAttribute_2_4(
-                    display.get(), config, IComposerClient::Attribute::DPI_X,
-                    [&](const auto& tmpError, const auto& value) {
-                        if (tmpError == Error::NONE) {
-                            dpiX = value;
-                        }
-                    });
-            int32_t dpiY = -1;
-            mComposerClient->getRaw()->getDisplayAttribute_2_4(
-                    display.get(), config, IComposerClient::Attribute::DPI_Y,
-                    [&](const auto& tmpError, const auto& value) {
-                        if (tmpError == Error::NONE) {
-                            dpiY = value;
-                        }
-                    });
-            if (dpiX == -1 && dpiY == -1) {
-                continue;
-            }
-
-            if (configGroupToDpiMap.find(configGroup) == configGroupToDpiMap.end()) {
-                configGroupToDpiMap[configGroup] = {dpiX, dpiY};
-            }
-            EXPECT_EQ(configGroupToDpiMap[configGroup].x, dpiX);
-            EXPECT_EQ(configGroupToDpiMap[configGroup].y, dpiY);
-        }
-    }
-}
-
 TEST_P(GraphicsComposerHidlTest, getDisplayVsyncPeriod_BadDisplay) {
     VsyncPeriodNanos vsyncPeriodNanos;
     EXPECT_EQ(Error::BAD_DISPLAY,
diff --git a/graphics/mapper/2.0/vts/OWNERS b/graphics/mapper/2.0/vts/OWNERS
index 4177296..62e3f2a 100644
--- a/graphics/mapper/2.0/vts/OWNERS
+++ b/graphics/mapper/2.0/vts/OWNERS
@@ -1,8 +1,5 @@
-# Graphics team
+# Bug component: 25423
 chrisforbes@google.com
 jreck@google.com
 lpy@google.com
-
-# VTS team
-yim@google.com
-zhuoyao@google.com
+sumir@google.com
diff --git a/graphics/mapper/2.0/vts/functional/OWNERS b/graphics/mapper/2.0/vts/functional/OWNERS
deleted file mode 100644
index a2ed8c8..0000000
--- a/graphics/mapper/2.0/vts/functional/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# Bug component: 25423
-sumir@google.com
diff --git a/graphics/mapper/2.1/vts/OWNERS b/graphics/mapper/2.1/vts/OWNERS
index 4177296..43c018a 100644
--- a/graphics/mapper/2.1/vts/OWNERS
+++ b/graphics/mapper/2.1/vts/OWNERS
@@ -1,8 +1,2 @@
-# Graphics team
-chrisforbes@google.com
-jreck@google.com
-lpy@google.com
-
-# VTS team
-yim@google.com
-zhuoyao@google.com
+# Bug component: 25423
+include ../../2.0/vts/OWNERS
diff --git a/graphics/mapper/2.1/vts/functional/OWNERS b/graphics/mapper/2.1/vts/functional/OWNERS
deleted file mode 100644
index a2ed8c8..0000000
--- a/graphics/mapper/2.1/vts/functional/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# Bug component: 25423
-sumir@google.com
diff --git a/graphics/mapper/3.0/vts/OWNERS b/graphics/mapper/3.0/vts/OWNERS
index c9f24d0..43c018a 100644
--- a/graphics/mapper/3.0/vts/OWNERS
+++ b/graphics/mapper/3.0/vts/OWNERS
@@ -1,4 +1,2 @@
-# Graphics team
-chrisforbes@google.com
-jreck@google.com
-lpy@google.com
+# Bug component: 25423
+include ../../2.0/vts/OWNERS
diff --git a/graphics/mapper/4.0/vts/OWNERS b/graphics/mapper/4.0/vts/OWNERS
index c9f24d0..43c018a 100644
--- a/graphics/mapper/4.0/vts/OWNERS
+++ b/graphics/mapper/4.0/vts/OWNERS
@@ -1,4 +1,2 @@
-# Graphics team
-chrisforbes@google.com
-jreck@google.com
-lpy@google.com
+# Bug component: 25423
+include ../../2.0/vts/OWNERS
diff --git a/health/2.0/utils/libhealthhalutils/HealthHalUtils.cpp b/health/2.0/utils/libhealthhalutils/HealthHalUtils.cpp
index 9e1cc70..3c353e6 100644
--- a/health/2.0/utils/libhealthhalutils/HealthHalUtils.cpp
+++ b/health/2.0/utils/libhealthhalutils/HealthHalUtils.cpp
@@ -25,7 +25,26 @@
 namespace V2_0 {
 
 sp<IHealth> get_health_service() {
-    for (auto&& instanceName : {"default", "backup"}) {
+    // For the core and vendor variant, the "backup" instance points to healthd,
+    // which is removed.
+    // For the recovery variant, the "backup" instance has a different
+    // meaning. It points to android.hardware.health@2.0-impl-default.recovery
+    // which was assumed by OEMs to be always installed when a
+    // vendor-specific libhealthd is not necessary. Hence, its behavior
+    // is kept. See health/2.0/README.md.
+    // android.hardware.health@2.0-impl-default.recovery, and subsequently the
+    // special handling of recovery mode below, can be removed once health@2.1
+    // is the minimum required version (i.e. compatibility matrix level 5 is the
+    // minimum supported level). Health 2.1 requires OEMs to install the
+    // implementation to the recovery partition when it is necessary (i.e. on
+    // non-A/B devices, where IsBatteryOk() is needed in recovery).
+    for (auto&& instanceName :
+#ifdef __ANDROID_RECOVERY__
+         { "default", "backup" }
+#else
+         {"default"}
+#endif
+    ) {
         auto ret = IHealth::getService(instanceName);
         if (ret != nullptr) {
             return ret;
diff --git a/health/aidl/OWNERS b/health/aidl/OWNERS
new file mode 100644
index 0000000..cd06415
--- /dev/null
+++ b/health/aidl/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 30545
+elsk@google.com
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
index 1baabdf..360b338 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
@@ -50,8 +50,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 00970c0..3060c65 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -63,12 +63,11 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+    const auto hidlRequest = NN_TRY(convert(requestInShared));
 
     return executeInternal(hidlRequest, relocation);
 }
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
index a8cf8cf..09d9fe8 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
@@ -52,8 +52,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 09691b6..5c3b8a7 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -61,8 +61,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
index 9f54bb1..01b5e12 100644
--- a/neuralnetworks/1.2/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -75,8 +75,7 @@
                << "execution failed with " << toString(status);
     }
     HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
-    return hal::utils::makeExecutionFailure(
-            convertExecutionGeneralResultsHelper(outputShapes, timing));
+    return convertExecutionGeneralResultsHelper(outputShapes, timing);
 }
 
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 29945b7..6a80b42 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -120,9 +120,8 @@
             NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
     auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
 
-    auto table = NN_TRY(hal::utils::makeGeneralFailure(
-            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
-            nn::ErrorStatus::GENERAL_FAILURE));
+    auto table =
+            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index b4b6f68..2746965 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -176,7 +176,10 @@
         std::lock_guard guard(mMutex);
         const int32_t slot = mMemoryIdToSlot.at(memory);
         if (mBurstContext) {
-            mBurstContext->freeMemory(slot);
+            const auto ret = mBurstContext->freeMemory(slot);
+            if (!ret.isOk()) {
+                LOG(ERROR) << "IBustContext::freeMemory failed: " << ret.description();
+            }
         }
         mMemoryIdToSlot.erase(memory);
         mMemoryCache[slot] = {};
@@ -317,8 +320,7 @@
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
-    if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
-        version > nn::Version::ANDROID_Q) {
+    if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
         // fallback to another execution path if the packet could not be sent
         return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
     }
@@ -326,17 +328,15 @@
     // ensure that request is ready for IPC
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
             .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
-    auto hidlRequest = NN_TRY(
-            hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
-    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+    auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
+    const auto hidlMeasure = NN_TRY(convert(measure));
 
     std::vector<int32_t> slots;
     std::vector<OptionalCacheHold> holds;
@@ -364,8 +364,7 @@
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
-    if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
-        version > nn::Version::ANDROID_Q) {
+    if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) {
         // fallback to another execution path if the packet could not be sent
         return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
     }
@@ -427,8 +426,7 @@
     }
 
     // get result packet
-    const auto [status, outputShapes, timing] =
-            NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
+    const auto [status, outputShapes, timing] = NN_TRY(mResultChannelReceiver->getBlocking());
 
     if (relocation.output) {
         relocation.output->flush();
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
index c67159e..65ec7f5 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
@@ -45,8 +45,6 @@
 namespace android::hardware::neuralnetworks::V1_2::utils {
 namespace {
 
-using neuralnetworks::utils::makeExecutionFailure;
-
 constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
                                     std::numeric_limits<uint64_t>::max()};
 
@@ -241,28 +239,25 @@
                  "ExecutionBurstServer getting memory, executing, and returning results");
 
     // ensure executor with cache has required memory
-    const auto cacheEntries =
-            NN_TRY(makeExecutionFailure(mMemoryCache.getCacheEntries(slotsOfPools)));
+    const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
 
     // convert request, populating its pools
     // This code performs an unvalidated convert because the request object without its pools is
     // invalid because it is incomplete. Instead, the validation is performed after the memory pools
     // have been added to the request.
-    auto canonicalRequest =
-            NN_TRY(makeExecutionFailure(nn::unvalidatedConvert(requestWithoutPools)));
+    auto canonicalRequest = NN_TRY(nn::unvalidatedConvert(requestWithoutPools));
     CHECK(canonicalRequest.pools.empty());
     std::transform(cacheEntries.begin(), cacheEntries.end(),
                    std::back_inserter(canonicalRequest.pools),
                    [](const auto& cacheEntry) { return cacheEntry.first; });
-    NN_TRY(makeExecutionFailure(validate(canonicalRequest)));
+    NN_TRY(validate(canonicalRequest));
 
-    nn::MeasureTiming canonicalMeasure = NN_TRY(makeExecutionFailure(nn::convert(measure)));
+    nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
 
     const auto [outputShapes, timing] =
             NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
 
-    return std::make_pair(NN_TRY(makeExecutionFailure(convert(outputShapes))),
-                          NN_TRY(makeExecutionFailure(convert(timing))));
+    return std::make_pair(NN_TRY(convert(outputShapes)), NN_TRY(convert(timing)));
 }
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index d0ef36e..c261184 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -95,13 +95,12 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+    const auto hidlRequest = NN_TRY(convert(requestInShared));
+    const auto hidlMeasure = NN_TRY(convert(measure));
 
     return executeInternal(hidlRequest, hidlMeasure, relocation);
 }
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
index 1d76caa..28525bd 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
@@ -61,8 +61,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
index 8e9fb83..156216f 100644
--- a/neuralnetworks/1.3/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -91,8 +91,7 @@
                << "execution failed with " << toString(status);
     }
     HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
-    return hal::utils::makeExecutionFailure(
-            convertExecutionGeneralResultsHelper(outputShapes, timing));
+    return convertExecutionGeneralResultsHelper(outputShapes, timing);
 }
 
 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 11225cf..b35b2cd 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -131,9 +131,8 @@
     }
 
     auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
-    auto table = NN_TRY(hal::utils::makeGeneralFailure(
-            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
-            nn::ErrorStatus::GENERAL_FAILURE));
+    auto table =
+            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp
index 3d17cc3..4dc0ddf 100644
--- a/neuralnetworks/1.3/utils/src/Execution.cpp
+++ b/neuralnetworks/1.3/utils/src/Execution.cpp
@@ -65,7 +65,7 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
         const nn::OptionalTimePoint& deadline) const {
-    const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+    const auto hidlDeadline = NN_TRY(convert(deadline));
     return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
                                            kRelocation);
 }
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 1623de5..d5dee9d 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -62,8 +62,7 @@
     auto resultSyncFence = nn::SyncFence::createAsSignaled();
     if (syncFence.getNativeHandle() != nullptr) {
         auto sharedHandle = NN_TRY(nn::convert(syncFence));
-        resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure(
-                nn::SyncFence::create(std::move(sharedHandle)), nn::ErrorStatus::GENERAL_FAILURE));
+        resultSyncFence = NN_TRY(nn::SyncFence::create(std::move(sharedHandle)));
     }
 
     if (callback == nullptr) {
@@ -141,16 +140,14 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
-    const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
-    const auto hidlLoopTimeoutDuration =
-            NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+    const auto hidlRequest = NN_TRY(convert(requestInShared));
+    const auto hidlMeasure = NN_TRY(convert(measure));
+    const auto hidlDeadline = NN_TRY(convert(deadline));
+    const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
 
     return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
                            relocation);
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index 1382bdb..ab0a018 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -66,7 +66,7 @@
         "VtsHalNeuralNetworksV1_0_utils",
         "VtsHalNeuralNetworksV1_2_utils",
         "VtsHalNeuralNetworksV1_3_utils",
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
         "android.hardware.neuralnetworks@1.2",
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
index de3b438..2eff11b 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl
@@ -136,4 +136,6 @@
   HARD_SWISH = 99,
   FILL = 100,
   RANK = 101,
+  BATCH_MATMUL = 102,
+  PACK = 103,
 }
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
index 52d2d70..2ec91ac 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl
@@ -1471,6 +1471,7 @@
      * * {@link OperandType::TENSOR_FLOAT32}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+     * * {@link OperandType::TENSOR_INT32} (since NNAPI feature level 6)
      *
      * Supported tensor rank: up to 4.
      *
@@ -5236,4 +5237,89 @@
      *      of the input tensor.
      */
     RANK = 101,
+
+    /**
+     * Performs multiplication of two tensors in batches.
+     *
+     * Multiplies all slices of two input tensors and arranges the individual
+     * results in a single output tensor of the same batch size. Each pair of
+     * slices in the same batch have identical {@link OperandType}. Each
+     * slice can optionally be adjointed (transpose and conjugate) before
+     * multiplication.
+     *
+     * The two input tensors and the output tensor must be 2-D or higher and
+     * have the same batch size.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+     * * {@link OperandType::TENSOR_INT32}
+     *
+     * Supported tensor rank: at least 2 and up to 4
+     *
+     * Inputs:
+     * * 0: A tensor with 2-D or higher shape [..., r_x, c_x].
+     * * 1: A tensor with 2-D or higher shape [..., r_y, c_y]. It has the same
+     *      {@link OperandType} and batch size as input0.
+     * * 2: An optional {@link OperandType::BOOL} scalar adj_x, default
+     *      to false. Set to true to adjoint the slices of input0.
+     * * 3: An optional {@link OperandType::BOOL} scalar adj_y, default
+     *      to false. Set to true to adjoint the slices of input1.
+     *
+     * Outputs:
+     * * 0: A tensor with 2-D or higher shape [..., r_o, c_o], where
+     *      r_o = c_x if adj_x else r_x
+     *      c_o = r_y if adj_y else c_y
+     */
+    BATCH_MATMUL = 102,
+
+    /**
+     * Packs N input tensors (N >= 1) of rank R into one output tensor of rank R+1.
+     * The tensors are packed along a given axis.
+     *
+     * The input tensors must have identical {@link OperandType} and dimensions.
+     *
+     * For example, suppose there are N input tensors of shape (A, B, C).
+     * If axis is 0, the output tensor will have shape (N, A, B, C).
+     * If axis is 1, the output tensor will have shape (A, N, B, C).
+     *
+     * All dimensions through the axis dimension determine the output tile count;
+     * the remaining dimensions determine the tile shape.
+     *
+     * Return to the example of N input tensors of shape (A, B, C).
+     * If axis is 0, there are N tiles in the output, each of shape (A, B, C).
+     * If axis is 1, there are A*N tiles in the output, each of shape (B, C).
+     *
+     * The coordinates of a tile within the output tensor are (t[0],...,t[axis]).
+     * The coordinates of a tile within an input tensor are (t[0],...,t[axis-1]).
+     * (If axis is 0, an input tensor consists of a single tile.)
+     * If we index input tensors starting with 0 (rather than by operand number),
+     * then output_tile[t[0],...,t[axis]] = input_tile[t[axis]][t[0],...,t[axis-1]].
+     * That is, all output tile coordinates except for the axis coordinate select
+     * the corresponding location within some input tensor; and the axis coordinate
+     * selects the input tensor.
+     *
+     * Supported tensor {@link OperandType}:
+     * * {@link OperandType::TENSOR_FLOAT16}
+     * * {@link OperandType::TENSOR_FLOAT32}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+     * * {@link OperandType::TENSOR_INT32}
+     *
+     * Supported input tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: A scalar of type {@link OperandType::INT32}, specifying
+     *      the axis along which to pack.  The valid range is [0, R+1).
+     * * 1 ~ N: Input tensors to be packed together.
+     *          For {@link OperandType::TENSOR_QUANT8_ASYMM} and
+     *          {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensors,
+     *          the scales and zeroPoint must be the same for all input tensors,
+     *          and will be the same for the output tensor.
+     *
+     * Outputs:
+     * * 0: The packed tensor.
+     */
+    PACK = 103,
 }
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 508b1ea..cb67b84 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -38,7 +38,7 @@
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "libbinder_ndk",
         "libhidlbase",
     ],
@@ -58,7 +58,7 @@
     static_libs: [
         "android.hardware.common-V2-ndk",
         "android.hardware.graphics.common-V2-ndk",
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "libaidlcommonsupport",
         "libgmock",
         "libneuralnetworks_common",
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index 316d34f..1b149e4 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -50,9 +50,8 @@
 }
 
 template <typename Type>
-nn::GeneralResult<void> compliantVersion(const Type& canonical) {
-    const auto version = NN_TRY(::android::hardware::neuralnetworks::utils::makeGeneralFailure(
-            nn::validate(canonical)));
+nn::Result<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(nn::validate(canonical));
     if (version > kVersion) {
         return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
     }
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index 800ac32..c59c10b 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -176,16 +176,14 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
-    const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
-    const auto aidlLoopTimeoutDuration =
-            NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+    const auto aidlRequest = NN_TRY(convert(requestInShared));
+    const auto aidlMeasure = NN_TRY(convert(measure));
+    const auto aidlDeadline = NN_TRY(convert(deadline));
+    const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
 
     std::vector<int64_t> memoryIdentifierTokens;
     std::vector<OptionalCacheHold> holds;
@@ -233,8 +231,8 @@
         return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
                << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
     }
-    auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
-            convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
+    auto [outputShapes, timing] =
+            NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
 
     if (relocation.output) {
         relocation.output->flush();
@@ -308,7 +306,7 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
         const nn::OptionalTimePoint& deadline) const {
-    const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+    const auto aidlDeadline = NN_TRY(convert(deadline));
     return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
                                    kLoopTimeoutDuration, kRelocation);
 }
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index f087156..ddff3f2 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -178,9 +178,8 @@
     }
 
     auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
-    auto table = NN_TRY(hal::utils::makeGeneralFailure(
-            Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
-            nn::ErrorStatus::GENERAL_FAILURE));
+    auto table =
+            NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
 
     return Capabilities{
             .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index 2aee8a6..13d4f32 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -60,7 +60,7 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
         const nn::OptionalTimePoint& deadline) const {
-    const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+    const auto aidlDeadline = NN_TRY(convert(deadline));
     return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
                                            kRelocation);
 }
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index f861d74..0769016 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -78,16 +78,14 @@
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
-    const nn::Request& requestInShared =
-            NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
-                    &maybeRequestInShared, &relocation)));
+    const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
-    const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
-    const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
-    const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
-    const auto aidlLoopTimeoutDuration =
-            NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+    const auto aidlRequest = NN_TRY(convert(requestInShared));
+    const auto aidlMeasure = NN_TRY(convert(measure));
+    const auto aidlDeadline = NN_TRY(convert(deadline));
+    const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
     return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
                            relocation);
 }
@@ -110,8 +108,8 @@
         return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
                << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
     }
-    auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
-            convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
+    auto [outputShapes, timing] =
+            NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
 
     if (relocation.output) {
         relocation.output->flush();
diff --git a/neuralnetworks/aidl/vts/OWNERS b/neuralnetworks/aidl/vts/OWNERS
index 6719a5b..f1a757a 100644
--- a/neuralnetworks/aidl/vts/OWNERS
+++ b/neuralnetworks/aidl/vts/OWNERS
@@ -2,11 +2,8 @@
 butlermichael@google.com
 dgross@google.com
 jeanluc@google.com
-levp@google.com
 miaowang@google.com
 mikie@google.com
-mks@google.com
 pszczepaniak@google.com
-slavash@google.com
-vddang@google.com
 xusongw@google.com
+ianhua@google.com
diff --git a/neuralnetworks/aidl/vts/functional/Android.bp b/neuralnetworks/aidl/vts/functional/Android.bp
index 8fa9756..4dc2ed0 100644
--- a/neuralnetworks/aidl/vts/functional/Android.bp
+++ b/neuralnetworks/aidl/vts/functional/Android.bp
@@ -51,7 +51,7 @@
     static_libs: [
         "android.hardware.common-V2-ndk",
         "android.hardware.graphics.common-V2-ndk",
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "android.hidl.allocator@1.0",
         "android.hidl.memory@1.0",
         "libaidlcommonsupport",
diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
index 8968c2c..7397def 100644
--- a/neuralnetworks/utils/adapter/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
@@ -57,6 +57,15 @@
     return result;
 }
 
+nn::GeneralResult<nn::Version> validateRequestForModel(const nn::Request& request,
+                                                       const nn::Model& model) {
+    nn::GeneralResult<nn::Version> version = nn::validateRequestForModel(request, model);
+    if (!version.ok()) {
+        version.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return version;
+}
+
 class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback {
   public:
     explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback)
@@ -148,8 +157,7 @@
     const std::any resource = preparedModel->getUnderlyingResource();
     if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
         CHECK(*model != nullptr);
-        NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
-                                         nn::ErrorStatus::INVALID_ARGUMENT));
+        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
     }
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
@@ -175,8 +183,7 @@
     const std::any resource = preparedModel->getUnderlyingResource();
     if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
         CHECK(*model != nullptr);
-        NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
-                                         nn::ErrorStatus::INVALID_ARGUMENT));
+        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
     }
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
@@ -206,8 +213,7 @@
     const std::any resource = preparedModel->getUnderlyingResource();
     if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
         CHECK(*model != nullptr);
-        NN_TRY(utils::makeGeneralFailure(nn::validateRequestForModel(nnRequest, **model),
-                                         nn::ErrorStatus::INVALID_ARGUMENT));
+        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
     }
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
@@ -224,14 +230,14 @@
 nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> executeSynchronously(
         const nn::SharedPreparedModel& preparedModel, const V1_0::Request& request,
         V1_2::MeasureTiming measure) {
-    const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request)));
-    const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure)));
+    const auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnMeasure = NN_TRY(convertInput(measure));
 
     const auto [outputShapes, timing] =
             NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}));
 
-    auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(outputShapes)));
-    const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_2::utils::convert(timing)));
+    auto hidlOutputShapes = NN_TRY(V1_2::utils::convert(outputShapes));
+    const auto hidlTiming = NN_TRY(V1_2::utils::convert(timing));
     return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
 }
 
@@ -239,29 +245,30 @@
         const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
         V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
         const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) {
-    const auto nnRequest = NN_TRY(utils::makeExecutionFailure(convertInput(request)));
-    const auto nnMeasure = NN_TRY(utils::makeExecutionFailure(convertInput(measure)));
-    const auto nnDeadline = NN_TRY(utils::makeExecutionFailure(convertInput(deadline)));
-    const auto nnLoopTimeoutDuration =
-            NN_TRY(utils::makeExecutionFailure(convertInput(loopTimeoutDuration)));
+    const auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnMeasure = NN_TRY(convertInput(measure));
+    const auto nnDeadline = NN_TRY(convertInput(deadline));
+    const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
 
     const auto [outputShapes, timing] =
             NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration));
 
-    auto hidlOutputShapes = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(outputShapes)));
-    const auto hidlTiming = NN_TRY(utils::makeExecutionFailure(V1_3::utils::convert(timing)));
+    auto hidlOutputShapes = NN_TRY(V1_3::utils::convert(outputShapes));
+    const auto hidlTiming = NN_TRY(V1_3::utils::convert(timing));
     return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
 }
 
 nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
         const hidl_vec<hidl_handle>& handles) {
+    auto nnHandles = NN_TRY(convertInput(handles));
     std::vector<nn::SyncFence> syncFences;
     syncFences.reserve(handles.size());
-    for (const auto& handle : handles) {
-        auto nativeHandle = NN_TRY(convertInput(handle));
-        auto syncFence = NN_TRY(utils::makeGeneralFailure(
-                nn::SyncFence::create(std::move(nativeHandle)), nn::ErrorStatus::INVALID_ARGUMENT));
-        syncFences.push_back(std::move(syncFence));
+    for (auto&& handle : nnHandles) {
+        if (auto syncFence = nn::SyncFence::create(std::move(handle)); !syncFence.ok()) {
+            return nn::error(nn::ErrorStatus::INVALID_ARGUMENT) << std::move(syncFence).error();
+        } else {
+            syncFences.push_back(std::move(syncFence).value());
+        }
     }
     return syncFences;
 }
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index 431885c..f88e407 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -35,7 +35,7 @@
         "neuralnetworks_types",
     ],
     shared_libs: [
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "libhidlbase",
         "libbinder_ndk",
     ],
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
index 209b663..e51f916 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
@@ -52,38 +52,6 @@
         std::move(result).value();                                                           \
     })
 
-template <typename Type>
-nn::GeneralResult<Type> makeGeneralFailure(
-        nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
-    if (!result.has_value()) {
-        return nn::error(status) << std::move(result).error();
-    }
-    if constexpr (!std::is_same_v<Type, void>) {
-        return std::move(result).value();
-    } else {
-        return {};
-    }
-}
-
-template <typename Type>
-nn::ExecutionResult<Type> makeExecutionFailure(nn::GeneralResult<Type> result) {
-    if (!result.has_value()) {
-        const auto [message, status] = std::move(result).error();
-        return nn::error(status) << message;
-    }
-    if constexpr (!std::is_same_v<Type, void>) {
-        return std::move(result).value();
-    } else {
-        return {};
-    }
-}
-
-template <typename Type>
-nn::ExecutionResult<Type> makeExecutionFailure(
-        nn::Result<Type> result, nn::ErrorStatus status = nn::ErrorStatus::GENERAL_FAILURE) {
-    return makeExecutionFailure(makeGeneralFailure(result, status));
-}
-
 #define HANDLE_HAL_STATUS(status)                                       \
     if (const auto canonical = ::android::nn::convert(status).value_or( \
                 ::android::nn::ErrorStatus::GENERAL_FAILURE);           \
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index ae02c88..235ba29 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -333,7 +333,7 @@
 
 nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
         size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
-    return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
+    return nn::countNumberOfConsumers(numberOfOperands, operations);
 }
 
 nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index 653e51a..fbb8679 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -39,7 +39,7 @@
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
-        "android.hardware.neuralnetworks-V1-ndk",
+        "android.hardware.neuralnetworks-V2-ndk",
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
         "android.hardware.neuralnetworks@1.2",
diff --git a/oemlock/1.0/vts/functional/OWNERS b/oemlock/1.0/vts/functional/OWNERS
new file mode 100644
index 0000000..ec8c304
--- /dev/null
+++ b/oemlock/1.0/vts/functional/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 186411
+chengyouho@google.com
+frankwoo@google.com
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index e07d2ba..d3f0cf9 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -32,23 +32,17 @@
     EXPECT_EQ(std::cv_status::no_timeout, wait());
     EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
     EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
-
-    if (getRadioHalCapabilities()) {
-        ASSERT_TRUE(CheckAnyOfErrors(
-                radioRsp_v1_6->rspInfo.error,
-                {::android::hardware::radio::V1_6::RadioError::REQUEST_NOT_SUPPORTED}));
-    } else {
-        ASSERT_TRUE(CheckAnyOfErrors(
-                radioRsp_v1_6->rspInfo.error,
-                {::android::hardware::radio::V1_6::RadioError::NONE,
-                 ::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
-                 ::android::hardware::radio::V1_6::RadioError::OPERATION_NOT_ALLOWED,
-                 ::android::hardware::radio::V1_6::RadioError::MODE_NOT_SUPPORTED,
-                 ::android::hardware::radio::V1_6::RadioError::INTERNAL_ERR,
-                 ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS,
-                 ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
-                 ::android::hardware::radio::V1_6::RadioError::NO_RESOURCES}));
-    }
+    ASSERT_TRUE(
+            CheckAnyOfErrors(radioRsp_v1_6->rspInfo.error,
+                             {::android::hardware::radio::V1_6::RadioError::NONE,
+                              ::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+                              ::android::hardware::radio::V1_6::RadioError::OPERATION_NOT_ALLOWED,
+                              ::android::hardware::radio::V1_6::RadioError::MODE_NOT_SUPPORTED,
+                              ::android::hardware::radio::V1_6::RadioError::INTERNAL_ERR,
+                              ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS,
+                              ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+                              ::android::hardware::radio::V1_6::RadioError::REQUEST_NOT_SUPPORTED,
+                              ::android::hardware::radio::V1_6::RadioError::NO_RESOURCES}));
 }
 
 /*
@@ -74,23 +68,17 @@
         EXPECT_EQ(std::cv_status::no_timeout, wait());
         EXPECT_EQ(RadioResponseType::SOLICITED, radioRsp_v1_6->rspInfo.type);
         EXPECT_EQ(serial, radioRsp_v1_6->rspInfo.serial);
-
-        if (getRadioHalCapabilities()) {
-            ASSERT_TRUE(CheckAnyOfErrors(
-                    radioRsp_v1_6->rspInfo.error,
-                    {::android::hardware::radio::V1_6::RadioError::REQUEST_NOT_SUPPORTED}));
-        } else {
-            ASSERT_TRUE(CheckAnyOfErrors(
-                    radioRsp_v1_6->rspInfo.error,
-                    {::android::hardware::radio::V1_6::RadioError::NONE,
-                     ::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
-                     ::android::hardware::radio::V1_6::RadioError::OPERATION_NOT_ALLOWED,
-                     ::android::hardware::radio::V1_6::RadioError::MODE_NOT_SUPPORTED,
-                     ::android::hardware::radio::V1_6::RadioError::INTERNAL_ERR,
-                     ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS,
-                     ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
-                     ::android::hardware::radio::V1_6::RadioError::NO_RESOURCES}));
-        }
+        ASSERT_TRUE(CheckAnyOfErrors(
+                radioRsp_v1_6->rspInfo.error,
+                {::android::hardware::radio::V1_6::RadioError::NONE,
+                 ::android::hardware::radio::V1_6::RadioError::RADIO_NOT_AVAILABLE,
+                 ::android::hardware::radio::V1_6::RadioError::OPERATION_NOT_ALLOWED,
+                 ::android::hardware::radio::V1_6::RadioError::MODE_NOT_SUPPORTED,
+                 ::android::hardware::radio::V1_6::RadioError::INTERNAL_ERR,
+                 ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS,
+                 ::android::hardware::radio::V1_6::RadioError::MODEM_ERR,
+                 ::android::hardware::radio::V1_6::RadioError::REQUEST_NOT_SUPPORTED,
+                 ::android::hardware::radio::V1_6::RadioError::NO_RESOURCES}));
     }
 }
 
diff --git a/vibrator/1.0/vts/OWNERS b/vibrator/1.0/vts/OWNERS
new file mode 100644
index 0000000..75b9a4b
--- /dev/null
+++ b/vibrator/1.0/vts/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 345036
+michaelwr@google.com
+leungv@google.com
diff --git a/vibrator/1.1/vts/OWNERS b/vibrator/1.1/vts/OWNERS
new file mode 100644
index 0000000..44bfe56
--- /dev/null
+++ b/vibrator/1.1/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 345036
+include ../../1.0/vts/OWNERS
diff --git a/vibrator/1.2/vts/OWNERS b/vibrator/1.2/vts/OWNERS
new file mode 100644
index 0000000..44bfe56
--- /dev/null
+++ b/vibrator/1.2/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 345036
+include ../../1.0/vts/OWNERS
diff --git a/vibrator/1.3/vts/OWNERS b/vibrator/1.3/vts/OWNERS
new file mode 100644
index 0000000..44bfe56
--- /dev/null
+++ b/vibrator/1.3/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 345036
+include ../../1.0/vts/OWNERS
diff --git a/vibrator/aidl/OWNERS b/vibrator/aidl/OWNERS
index e3d7e6b..ae10db6 100644
--- a/vibrator/aidl/OWNERS
+++ b/vibrator/aidl/OWNERS
@@ -1,3 +1,4 @@
+# Bug component: 345036
 include platform/frameworks/base:/services/core/java/com/android/server/vibrator/OWNERS
 chasewu@google.com
 leungv@google.com
diff --git a/wifi/1.0/vts/OWNERS b/wifi/1.0/vts/OWNERS
index cf81c79..287152d 100644
--- a/wifi/1.0/vts/OWNERS
+++ b/wifi/1.0/vts/OWNERS
@@ -1,2 +1,3 @@
+# Bug component: 33618
 arabawy@google.com
 etancohen@google.com
diff --git a/wifi/1.0/vts/functional/Android.bp b/wifi/1.0/vts/functional/Android.bp
index e4948b4..6c0ebf7 100644
--- a/wifi/1.0/vts/functional/Android.bp
+++ b/wifi/1.0/vts/functional/Android.bp
@@ -107,8 +107,10 @@
     static_libs: [
         "VtsHalWifiV1_0TargetTestUtil",
         "android.hardware.wifi@1.0",
+        "android.hardware.wifi.hostapd@1.0",
         "libwifi-system-iface",
     ],
+    disable_framework: true,
     test_suites: [
         "general-tests",
         "vts",
diff --git a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
index 96b4501..28b1616 100644
--- a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -15,9 +15,9 @@
  */
 
 #include <android-base/logging.h>
-
 #include <android/hardware/wifi/1.0/IWifi.h>
 #include <android/hardware/wifi/1.0/IWifiApIface.h>
+#include <android/hardware/wifi/hostapd/1.0/IHostapd.h>
 #include <gtest/gtest.h>
 #include <hidl/GtestPrinter.h>
 #include <hidl/ServiceManagement.h>
@@ -26,6 +26,7 @@
 #include "wifi_hidl_test_utils.h"
 
 using ::android::sp;
+using ::android::hardware::wifi::hostapd::V1_0::IHostapd;
 using ::android::hardware::wifi::V1_0::IfaceType;
 using ::android::hardware::wifi::V1_0::IWifi;
 using ::android::hardware::wifi::V1_0::IWifiApIface;
@@ -38,6 +39,10 @@
 class WifiApIfaceHidlTest : public ::testing::TestWithParam<std::string> {
    public:
     virtual void SetUp() override {
+        if (android::hardware::getAllHalInstanceNames(IHostapd::descriptor)
+                .empty()) {
+            GTEST_SKIP() << "Device does not support AP";
+        }
         // Make sure test starts with a clean state
         stopWifi(GetInstanceName());
 
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
index 2e6ad32..66e1a80 100644
--- a/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
@@ -15,9 +15,9 @@
  */
 
 #include <android-base/logging.h>
-
 #include <android/hardware/wifi/1.0/IWifi.h>
 #include <android/hardware/wifi/1.0/IWifiChip.h>
+#include <android/hardware/wifi/hostapd/1.0/IHostapd.h>
 #include <gtest/gtest.h>
 #include <hidl/GtestPrinter.h>
 #include <hidl/ServiceManagement.h>
@@ -26,6 +26,7 @@
 #include "wifi_hidl_test_utils.h"
 
 using ::android::sp;
+using ::android::hardware::wifi::hostapd::V1_0::IHostapd;
 using ::android::hardware::wifi::V1_0::ChipModeId;
 using ::android::hardware::wifi::V1_0::IfaceType;
 using ::android::hardware::wifi::V1_0::IWifi;
@@ -41,6 +42,10 @@
 class WifiChipHidlApTest : public ::testing::TestWithParam<std::string> {
    public:
     virtual void SetUp() override {
+        if (android::hardware::getAllHalInstanceNames(IHostapd::descriptor)
+                .empty()) {
+            GTEST_SKIP() << "Device does not support AP";
+        }
         // Make sure test starts with a clean state
         stopWifi(GetInstanceName());
 
diff --git a/wifi/1.1/vts/OWNERS b/wifi/1.1/vts/OWNERS
index cf81c79..294fc82 100644
--- a/wifi/1.1/vts/OWNERS
+++ b/wifi/1.1/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.0/vts/OWNERS
diff --git a/wifi/1.1/vts/functional/Android.bp b/wifi/1.1/vts/functional/Android.bp
index 8048642..a8f3470 100644
--- a/wifi/1.1/vts/functional/Android.bp
+++ b/wifi/1.1/vts/functional/Android.bp
@@ -39,6 +39,7 @@
         "android.hardware.wifi@1.5",
         "libwifi-system-iface",
     ],
+    disable_framework: true,
     test_suites: [
         "general-tests",
         "vts",
diff --git a/wifi/1.2/vts/OWNERS b/wifi/1.2/vts/OWNERS
index cf81c79..294fc82 100644
--- a/wifi/1.2/vts/OWNERS
+++ b/wifi/1.2/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.0/vts/OWNERS
diff --git a/wifi/1.3/vts/OWNERS b/wifi/1.3/vts/OWNERS
index cf81c79..294fc82 100644
--- a/wifi/1.3/vts/OWNERS
+++ b/wifi/1.3/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.0/vts/OWNERS
diff --git a/wifi/1.4/vts/OWNERS b/wifi/1.4/vts/OWNERS
index cf81c79..294fc82 100644
--- a/wifi/1.4/vts/OWNERS
+++ b/wifi/1.4/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.0/vts/OWNERS
diff --git a/wifi/1.4/vts/functional/Android.bp b/wifi/1.4/vts/functional/Android.bp
index 14ebbe3..f86869b 100644
--- a/wifi/1.4/vts/functional/Android.bp
+++ b/wifi/1.4/vts/functional/Android.bp
@@ -14,7 +14,6 @@
 // limitations under the License.
 //
 
-// SoftAP-specific tests, similar to VtsHalWifiApV1_0TargetTest.
 package {
     // See: http://go/android-license-faq
     // A large-scale-change added 'default_applicable_licenses' to import
@@ -25,10 +24,9 @@
 }
 
 cc_test {
-    name: "VtsHalWifiApV1_4TargetTest",
+    name: "VtsHalWifiV1_4TargetTest",
     defaults: ["VtsHalTargetTestDefaults"],
     srcs: [
-        "wifi_ap_iface_hidl_test.cpp",
         "wifi_chip_hidl_test.cpp",
     ],
     static_libs: [
@@ -46,6 +44,30 @@
     ],
 }
 
+// SoftAP-specific tests, similar to VtsHalWifiApV1_0TargetTest.
+cc_test {
+    name: "VtsHalWifiApV1_4TargetTest",
+    defaults: ["VtsHalTargetTestDefaults"],
+    srcs: [
+        "wifi_ap_iface_hidl_test.cpp",
+    ],
+    static_libs: [
+        "VtsHalWifiV1_0TargetTestUtil",
+        "android.hardware.wifi@1.0",
+        "android.hardware.wifi@1.1",
+        "android.hardware.wifi@1.2",
+        "android.hardware.wifi@1.3",
+        "android.hardware.wifi@1.4",
+        "android.hardware.wifi.hostapd@1.0",
+        "libwifi-system-iface",
+    ],
+    disable_framework: true,
+    test_suites: [
+        "general-tests",
+        "vts",
+    ],
+}
+
 // These tests are split out so that they can be conditioned on presence of the
 // "android.hardware.wifi.aware" feature.
 cc_test {
diff --git a/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
index 5b0f173..756afa5 100644
--- a/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
+++ b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -16,6 +16,7 @@
 
 #include <android/hardware/wifi/1.4/IWifi.h>
 #include <android/hardware/wifi/1.4/IWifiApIface.h>
+#include <android/hardware/wifi/hostapd/1.0/IHostapd.h>
 #include <gtest/gtest.h>
 #include <hidl/GtestPrinter.h>
 #include <hidl/ServiceManagement.h>
@@ -25,6 +26,7 @@
 
 using ::android::sp;
 using ::android::hardware::hidl_array;
+using ::android::hardware::wifi::hostapd::V1_0::IHostapd;
 using ::android::hardware::wifi::V1_0::WifiStatus;
 using ::android::hardware::wifi::V1_0::WifiStatusCode;
 using ::android::hardware::wifi::V1_4::IWifi;
@@ -36,6 +38,10 @@
 class WifiApIfaceHidlTest : public ::testing::TestWithParam<std::string> {
    public:
     virtual void SetUp() override {
+        if (android::hardware::getAllHalInstanceNames(IHostapd::descriptor)
+                .empty()) {
+            GTEST_SKIP() << "Device does not support AP";
+        }
         // Make sure to start with a clean state
         stopWifi(GetInstanceName());
 
diff --git a/wifi/1.5/vts/OWNERS b/wifi/1.5/vts/OWNERS
index cf81c79..294fc82 100644
--- a/wifi/1.5/vts/OWNERS
+++ b/wifi/1.5/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.0/vts/OWNERS
diff --git a/wifi/offload/1.0/vts/OWNERS b/wifi/offload/1.0/vts/OWNERS
new file mode 100644
index 0000000..287152d
--- /dev/null
+++ b/wifi/offload/1.0/vts/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 33618
+arabawy@google.com
+etancohen@google.com
diff --git a/wifi/supplicant/1.0/vts/OWNERS b/wifi/supplicant/1.0/vts/OWNERS
new file mode 100644
index 0000000..b16dc11
--- /dev/null
+++ b/wifi/supplicant/1.0/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 33618
+include ../../1.3/vts/OWNERS
diff --git a/wifi/supplicant/1.1/vts/OWNERS b/wifi/supplicant/1.1/vts/OWNERS
new file mode 100644
index 0000000..b16dc11
--- /dev/null
+++ b/wifi/supplicant/1.1/vts/OWNERS
@@ -0,0 +1,2 @@
+# Bug component: 33618
+include ../../1.3/vts/OWNERS
diff --git a/wifi/supplicant/1.2/vts/OWNERS b/wifi/supplicant/1.2/vts/OWNERS
index cf81c79..b16dc11 100644
--- a/wifi/supplicant/1.2/vts/OWNERS
+++ b/wifi/supplicant/1.2/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.3/vts/OWNERS
diff --git a/wifi/supplicant/1.3/vts/OWNERS b/wifi/supplicant/1.3/vts/OWNERS
index cf81c79..287152d 100644
--- a/wifi/supplicant/1.3/vts/OWNERS
+++ b/wifi/supplicant/1.3/vts/OWNERS
@@ -1,2 +1,3 @@
+# Bug component: 33618
 arabawy@google.com
 etancohen@google.com
diff --git a/wifi/supplicant/1.4/vts/OWNERS b/wifi/supplicant/1.4/vts/OWNERS
index cf81c79..b16dc11 100644
--- a/wifi/supplicant/1.4/vts/OWNERS
+++ b/wifi/supplicant/1.4/vts/OWNERS
@@ -1,2 +1,2 @@
-arabawy@google.com
-etancohen@google.com
+# Bug component: 33618
+include ../../1.3/vts/OWNERS