Merge "Consolidate OWNERS files"
diff --git a/cas/1.0/vts/functional/OWNERS b/cas/1.0/vts/functional/OWNERS
index aec93b0..7d8c2ee 100644
--- a/cas/1.0/vts/functional/OWNERS
+++ b/cas/1.0/vts/functional/OWNERS
@@ -1,2 +1,2 @@
 # Bug component: 1344
-quxiangfang@google.com
+include ../../../1.2/vts/functional/OWNERS
diff --git a/cas/1.1/vts/functional/OWNERS b/cas/1.1/vts/functional/OWNERS
index 29246ed..7d8c2ee 100644
--- a/cas/1.1/vts/functional/OWNERS
+++ b/cas/1.1/vts/functional/OWNERS
@@ -1,3 +1,2 @@
-nchalko@google.com
-chz@google.com
-quxiangfang@google.com
+# Bug component: 1344
+include ../../../1.2/vts/functional/OWNERS
diff --git a/cas/1.2/vts/functional/OWNERS b/cas/1.2/vts/functional/OWNERS
index 29246ed..4c55752 100644
--- a/cas/1.2/vts/functional/OWNERS
+++ b/cas/1.2/vts/functional/OWNERS
@@ -1,3 +1,3 @@
-nchalko@google.com
-chz@google.com
+# Bug component: 1344
 quxiangfang@google.com
+hgchen@google.com
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index b33c581..29b31d2 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -50,7 +50,7 @@
         "libgmock",
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
+        "libneuralnetworks_common",
     ],
     header_libs: [
         "libneuralnetworks_headers",
@@ -81,7 +81,7 @@
         "libgmock",
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
+        "libneuralnetworks_common",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_V1_0_example",
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index c001112..e9d4b76 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -48,7 +48,7 @@
         "libgmock",
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
+        "libneuralnetworks_common",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_V1_0_example",
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
similarity index 83%
rename from neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
rename to neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
index 8078693..ac9411c 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
@@ -14,10 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
 
-#include "ExecutionBurstUtils.h"
+#include "nnapi/hal/1.2/BurstUtils.h"
 
 #include <android-base/thread_annotations.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
@@ -49,13 +49,11 @@
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
 /**
- * The ExecutionBurstController class manages both the serialization and deserialization of data
- * across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
- * this class manages the burst's memory cache.
+ * The Burst class manages both the serialization and deserialization of data across FMQ, making it
+ * appear to the runtime as a regular synchronous inference. Additionally, this class manages the
+ * burst's memory cache.
  */
-class ExecutionBurstController final
-    : public nn::IBurst,
-      public std::enable_shared_from_this<ExecutionBurstController> {
+class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
     struct PrivateConstructorTag {};
 
   public:
@@ -150,21 +148,21 @@
      * Creates a burst controller on a prepared model.
      *
      * @param preparedModel Model prepared for execution to execute on.
-     * @param pollingTimeWindow How much time (in microseconds) the ExecutionBurstController is
-     *     allowed to poll the FMQ before waiting on the blocking futex. Polling may result in lower
-     *     latencies at the potential cost of more power usage.
-     * @return ExecutionBurstController Execution burst controller object.
+     * @param pollingTimeWindow How much time (in microseconds) the Burst is allowed to poll the FMQ
+     *     before waiting on the blocking futex. Polling may result in lower latencies at the
+     *     potential cost of more power usage.
+     * @return Burst Execution burst controller object.
      */
-    static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
+    static nn::GeneralResult<std::shared_ptr<const Burst>> create(
             nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
             std::chrono::microseconds pollingTimeWindow);
 
-    ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
-                             std::unique_ptr<RequestChannelSender> requestChannelSender,
-                             std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
-                             sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
-                             std::shared_ptr<MemoryCache> memoryCache,
-                             neuralnetworks::utils::DeathHandler deathHandler);
+    Burst(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
+          std::unique_ptr<RequestChannelSender> requestChannelSender,
+          std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
+          sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
+          std::shared_ptr<MemoryCache> memoryCache,
+          neuralnetworks::utils::DeathHandler deathHandler);
 
     // See IBurst::cacheMemory for information on this method.
     OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
@@ -202,4 +200,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_CONTROLLER_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/BurstUtils.h
similarity index 98%
rename from neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h
rename to neuralnetworks/1.2/utils/include/nnapi/hal/1.2/BurstUtils.h
index c081305..7a6a241 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/BurstUtils.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
 
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
@@ -298,4 +298,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_UTILS_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_BURST_UTILS_H
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp
similarity index 79%
rename from neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
rename to neuralnetworks/1.2/utils/src/Burst.cpp
index a8ded9e..e0a23f1 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/Burst.cpp
@@ -14,10 +14,8 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "ExecutionBurstController"
-
-#include "ExecutionBurstController.h"
-#include "ExecutionBurstUtils.h"
+#include "Burst.h"
+#include "BurstUtils.h"
 
 #include <android-base/logging.h>
 #include <android-base/thread_annotations.h>
@@ -57,14 +55,13 @@
 
   public:
     static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
-            std::shared_ptr<const ExecutionBurstController> controller,
-            std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
-            std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+            std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
+            hal::utils::RequestRelocation relocation,
+            std::vector<Burst::OptionalCacheHold> cacheHolds);
 
-    BurstExecution(PrivateConstructorTag tag,
-                   std::shared_ptr<const ExecutionBurstController> controller,
+    BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> controller,
                    std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
-                   std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+                   std::vector<Burst::OptionalCacheHold> cacheHolds);
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
             const nn::OptionalTimePoint& deadline) const override;
@@ -74,10 +71,10 @@
             const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
   private:
-    const std::shared_ptr<const ExecutionBurstController> kController;
+    const std::shared_ptr<const Burst> kController;
     const std::vector<FmqRequestDatum> kRequest;
     const hal::utils::RequestRelocation kRelocation;
-    const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
+    const std::vector<Burst::OptionalCacheHold> kCacheHolds;
 };
 
 nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
@@ -92,8 +89,7 @@
 }
 
 nn::GeneralResult<hidl_vec<hidl_memory>> getMemoriesHelper(
-        const hidl_vec<int32_t>& slots,
-        const std::shared_ptr<ExecutionBurstController::MemoryCache>& memoryCache) {
+        const hidl_vec<int32_t>& slots, const std::shared_ptr<Burst::MemoryCache>& memoryCache) {
     hidl_vec<hidl_memory> memories(slots.size());
     for (size_t i = 0; i < slots.size(); ++i) {
         const int32_t slot = slots[i];
@@ -110,7 +106,7 @@
 
 // MemoryCache methods
 
-ExecutionBurstController::MemoryCache::MemoryCache() {
+Burst::MemoryCache::MemoryCache() {
     constexpr size_t kPreallocatedCount = 1024;
     std::vector<int32_t> freeSlotsSpace;
     freeSlotsSpace.reserve(kPreallocatedCount);
@@ -119,13 +115,13 @@
     mCacheCleaner.reserve(kPreallocatedCount);
 }
 
-void ExecutionBurstController::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
+void Burst::MemoryCache::setBurstContext(sp<IBurstContext> burstContext) {
     std::lock_guard guard(mMutex);
     mBurstContext = std::move(burstContext);
 }
 
-std::pair<int32_t, ExecutionBurstController::MemoryCache::SharedCleanup>
-ExecutionBurstController::MemoryCache::cacheMemory(const nn::SharedMemory& memory) {
+std::pair<int32_t, Burst::MemoryCache::SharedCleanup> Burst::MemoryCache::cacheMemory(
+        const nn::SharedMemory& memory) {
     std::unique_lock lock(mMutex);
     base::ScopedLockAssertion lockAssert(mMutex);
 
@@ -163,7 +159,7 @@
     return std::make_pair(slot, std::move(cleaner));
 }
 
-nn::GeneralResult<nn::SharedMemory> ExecutionBurstController::MemoryCache::getMemory(int32_t slot) {
+nn::GeneralResult<nn::SharedMemory> Burst::MemoryCache::getMemory(int32_t slot) {
     std::lock_guard guard(mMutex);
     if (slot < 0 || static_cast<size_t>(slot) >= mMemoryCache.size()) {
         return NN_ERROR() << "Invalid slot: " << slot << " vs " << mMemoryCache.size();
@@ -171,7 +167,7 @@
     return mMemoryCache[slot];
 }
 
-void ExecutionBurstController::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
+void Burst::MemoryCache::freeMemory(const nn::SharedMemory& memory) {
     {
         std::lock_guard guard(mMutex);
         const int32_t slot = mMemoryIdToSlot.at(memory);
@@ -189,7 +185,7 @@
     mCond.notify_all();
 }
 
-int32_t ExecutionBurstController::MemoryCache::allocateSlotLocked() {
+int32_t Burst::MemoryCache::allocateSlotLocked() {
     constexpr size_t kMaxNumberOfSlots = std::numeric_limits<int32_t>::max();
 
     // If there is a free slot, use it.
@@ -210,18 +206,18 @@
 
 // ExecutionBurstCallback methods
 
-ExecutionBurstController::ExecutionBurstCallback::ExecutionBurstCallback(
+Burst::ExecutionBurstCallback::ExecutionBurstCallback(
         const std::shared_ptr<MemoryCache>& memoryCache)
     : kMemoryCache(memoryCache) {
     CHECK(memoryCache != nullptr);
 }
 
-Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories(
-        const hidl_vec<int32_t>& slots, getMemories_cb cb) {
+Return<void> Burst::ExecutionBurstCallback::getMemories(const hidl_vec<int32_t>& slots,
+                                                        getMemories_cb cb) {
     const auto memoryCache = kMemoryCache.lock();
     if (memoryCache == nullptr) {
-        LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories called after "
-                      "the MemoryCache has been freed";
+        LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories called after the MemoryCache has "
+                      "been freed";
         cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
         return Void();
     }
@@ -229,8 +225,8 @@
     const auto maybeMemories = getMemoriesHelper(slots, memoryCache);
     if (!maybeMemories.has_value()) {
         const auto& [message, code] = maybeMemories.error();
-        LOG(ERROR) << "ExecutionBurstController::ExecutionBurstCallback::getMemories failed with "
-                   << code << ": " << message;
+        LOG(ERROR) << "Burst::ExecutionBurstCallback::getMemories failed with " << code << ": "
+                   << message;
         cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {});
         return Void();
     }
@@ -239,14 +235,14 @@
     return Void();
 }
 
-// ExecutionBurstController methods
+// Burst methods
 
-nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
+nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
         nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
         std::chrono::microseconds pollingTimeWindow) {
     // check inputs
     if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
-        return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
+        return NN_ERROR() << "Burst::create passed a nullptr";
     }
 
     // create FMQ objects
@@ -282,18 +278,18 @@
     deathHandler.protectCallbackForLifetimeOfDeathHandler(resultChannelReceiver.get());
 
     // make and return controller
-    return std::make_shared<const ExecutionBurstController>(
+    return std::make_shared<const Burst>(
             PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
             std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
             std::move(memoryCache), std::move(deathHandler));
 }
 
-ExecutionBurstController::ExecutionBurstController(
-        PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
-        std::unique_ptr<RequestChannelSender> requestChannelSender,
-        std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
-        sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
-        std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
+Burst::Burst(PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
+             std::unique_ptr<RequestChannelSender> requestChannelSender,
+             std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
+             sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
+             std::shared_ptr<MemoryCache> memoryCache,
+             neuralnetworks::utils::DeathHandler deathHandler)
     : kPreparedModel(std::move(preparedModel)),
       mRequestChannelSender(std::move(requestChannelSender)),
       mResultChannelReceiver(std::move(resultChannelReceiver)),
@@ -302,21 +298,20 @@
       mMemoryCache(std::move(memoryCache)),
       kDeathHandler(std::move(deathHandler)) {}
 
-ExecutionBurstController::OptionalCacheHold ExecutionBurstController::cacheMemory(
-        const nn::SharedMemory& memory) const {
+Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) const {
     auto [slot, hold] = mMemoryCache->cacheMemory(memory);
     return hold;
 }
 
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming measure,
-                                  const nn::OptionalTimePoint& deadline,
-                                  const nn::OptionalDuration& loopTimeoutDuration) const {
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
     // This is the first point when we know an execution is occurring, so begin to collect
     // systraces. Note that the first point we can begin collecting systraces in
     // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
     // ExecutionBurstServer collects systraces at different points in the code.
-    NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
+    NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::execute");
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
@@ -357,10 +352,10 @@
 }
 
 // See IBurst::createReusableExecution for information on this method.
-nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
+nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalDuration& loopTimeoutDuration) const {
-    NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
+    NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution");
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
@@ -397,12 +392,10 @@
                                   std::move(relocation), std::move(holds));
 }
 
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
-                                          const hal::utils::RequestRelocation& relocation,
-                                          FallbackFunction fallback) const {
-    NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
-                 "ExecutionBurstController::executeInternal");
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
+        const std::vector<FmqRequestDatum>& requestPacket,
+        const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const {
+    NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "Burst::executeInternal");
 
     // Ensure that at most one execution is in flight at any given time.
     const bool alreadyInFlight = mExecutionInFlight.test_and_set();
@@ -435,9 +428,9 @@
 }
 
 nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
-        std::shared_ptr<const ExecutionBurstController> controller,
-        std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
-        std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
+        std::shared_ptr<const Burst> controller, std::vector<FmqRequestDatum> request,
+        hal::utils::RequestRelocation relocation,
+        std::vector<Burst::OptionalCacheHold> cacheHolds) {
     if (controller == nullptr) {
         return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
     }
@@ -448,10 +441,10 @@
 }
 
 BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
-                               std::shared_ptr<const ExecutionBurstController> controller,
+                               std::shared_ptr<const Burst> controller,
                                std::vector<FmqRequestDatum> request,
                                hal::utils::RequestRelocation relocation,
-                               std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
+                               std::vector<Burst::OptionalCacheHold> cacheHolds)
     : kController(std::move(controller)),
       kRequest(std::move(request)),
       kRelocation(std::move(relocation)),
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp b/neuralnetworks/1.2/utils/src/BurstUtils.cpp
similarity index 99%
rename from neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp
rename to neuralnetworks/1.2/utils/src/BurstUtils.cpp
index e0d029a..b589c46 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp
+++ b/neuralnetworks/1.2/utils/src/BurstUtils.cpp
@@ -14,9 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "ExecutionBurstUtils"
-
-#include "ExecutionBurstUtils.h"
+#include "BurstUtils.h"
 
 #include <android-base/logging.h>
 #include <android-base/properties.h>
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index b8a5ae0..6df3df3 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -16,11 +16,11 @@
 
 #include "PreparedModel.h"
 
+#include "Burst.h"
+#include "BurstUtils.h"
 #include "Callbacks.h"
 #include "Conversions.h"
 #include "Execution.h"
-#include "ExecutionBurstController.h"
-#include "ExecutionBurstUtils.h"
 #include "Utils.h"
 
 #include <android/hardware/neuralnetworks/1.0/types.h>
@@ -150,16 +150,8 @@
 }
 
 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
-    auto self = shared_from_this();
-    auto fallback = [preparedModel = std::move(self)](
-                            const nn::Request& request, nn::MeasureTiming measure,
-                            const nn::OptionalTimePoint& deadline,
-                            const nn::OptionalDuration& loopTimeoutDuration)
-            -> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
-        return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
-    };
     const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
-    return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
+    return Burst::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index e313b47..52d51e2 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -71,7 +71,7 @@
         "libgmock",
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
+        "libneuralnetworks_common",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_V1_0_example",
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 2c81cb2..ce977e5 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -32,9 +32,9 @@
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/HandleError.h>
 #include <nnapi/hal/1.0/ProtectCallback.h>
+#include <nnapi/hal/1.2/Burst.h>
+#include <nnapi/hal/1.2/BurstUtils.h>
 #include <nnapi/hal/1.2/Conversions.h>
-#include <nnapi/hal/1.2/ExecutionBurstController.h>
-#include <nnapi/hal/1.2/ExecutionBurstUtils.h>
 #include <nnapi/hal/CommonUtils.h>
 
 #include <memory>
@@ -246,17 +246,8 @@
 }
 
 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
-    auto self = shared_from_this();
-    auto fallback = [preparedModel = std::move(self)](
-                            const nn::Request& request, nn::MeasureTiming measure,
-                            const nn::OptionalTimePoint& deadline,
-                            const nn::OptionalDuration& loopTimeoutDuration)
-            -> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
-        return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
-    };
     const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
-    return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
-                                                         pollingTimeWindow);
+    return V1_2::utils::Burst::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index ab0a018..8951760 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -66,7 +66,6 @@
         "VtsHalNeuralNetworksV1_0_utils",
         "VtsHalNeuralNetworksV1_2_utils",
         "VtsHalNeuralNetworksV1_3_utils",
-        "android.hardware.neuralnetworks-V2-ndk",
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
         "android.hardware.neuralnetworks@1.2",
@@ -76,7 +75,7 @@
         "libgmock",
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
+        "libneuralnetworks_common",
         "libsync",
     ],
     whole_static_libs: [
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 2ff7534..bf8fc2d 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -23,8 +23,8 @@
     default_applicable_licenses: ["hardware_interfaces_license"],
 }
 
-cc_library_static {
-    name: "neuralnetworks_utils_hal_aidl",
+cc_defaults {
+    name: "neuralnetworks_utils_hal_aidl_defaults",
     defaults: ["neuralnetworks_utils_defaults"],
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal/aidl/"],
@@ -38,7 +38,6 @@
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
-        "android.hardware.neuralnetworks-V2-ndk",
         "libbinder_ndk",
     ],
     target: {
@@ -48,21 +47,49 @@
     },
 }
 
-cc_test {
-    name: "neuralnetworks_utils_hal_aidl_test",
-    defaults: ["neuralnetworks_utils_defaults"],
-    srcs: [
-        "test/*.cpp",
+cc_library_static {
+    name: "neuralnetworks_utils_hal_aidl_v1",
+    defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
+    shared_libs: [
+        "android.hardware.neuralnetworks-V1-ndk",
     ],
+}
+
+cc_library_static {
+    name: "neuralnetworks_utils_hal_aidl",
+    defaults: ["neuralnetworks_utils_hal_aidl_defaults"],
+    shared_libs: [
+        "android.hardware.neuralnetworks-V2-ndk",
+    ],
+}
+
+// A cc_defaults that includes the latest non-experimental AIDL utilities and other AIDL libraries
+// that are commonly used together. Modules that always depend on the latest non-experimental
+// AIDL features can include this cc_defaults to avoid managing dependency versions explicitly.
+cc_defaults {
+    name: "neuralnetworks_use_latest_utils_hal_aidl",
     static_libs: [
         "android.hardware.common-V2-ndk",
         "android.hardware.graphics.common-V2-ndk",
         "android.hardware.neuralnetworks-V2-ndk",
+        "neuralnetworks_utils_hal_aidl",
+    ],
+}
+
+cc_test {
+    name: "neuralnetworks_utils_hal_aidl_test",
+    defaults: [
+        "neuralnetworks_use_latest_utils_hal_aidl",
+        "neuralnetworks_utils_defaults",
+    ],
+    srcs: [
+        "test/*.cpp",
+    ],
+    static_libs: [
         "libaidlcommonsupport",
         "libgmock",
         "libneuralnetworks_common",
         "neuralnetworks_types",
-        "neuralnetworks_utils_hal_aidl",
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
diff --git a/neuralnetworks/aidl/utils/include/AidlBufferTracker.h b/neuralnetworks/aidl/utils/include/AidlBufferTracker.h
new file mode 100644
index 0000000..4421876
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/AidlBufferTracker.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nnapi/hal/aidl/BufferTracker.h"
diff --git a/neuralnetworks/aidl/utils/include/AidlHalInterfaces.h b/neuralnetworks/aidl/utils/include/AidlHalInterfaces.h
new file mode 100644
index 0000000..3777969
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/AidlHalInterfaces.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nnapi/hal/aidl/HalInterfaces.h"
diff --git a/neuralnetworks/aidl/utils/include/AidlHalUtils.h b/neuralnetworks/aidl/utils/include/AidlHalUtils.h
new file mode 100644
index 0000000..98bfbad
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/AidlHalUtils.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nnapi/hal/aidl/HalUtils.h"
diff --git a/neuralnetworks/aidl/utils/include/AidlValidateHal.h b/neuralnetworks/aidl/utils/include/AidlValidateHal.h
new file mode 100644
index 0000000..5aaa74d
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/AidlValidateHal.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nnapi/hal/aidl/ValidateHal.h"
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/BufferTracker.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/BufferTracker.h
new file mode 100644
index 0000000..18d01e2
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/BufferTracker.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
+
+#include <android-base/macros.h>
+#include <android-base/thread_annotations.h>
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "nnapi/hal/aidl/HalInterfaces.h"
+#include "nnapi/hal/aidl/ValidateHal.h"
+
+namespace android::nn {
+
+// This class manages a CPU buffer allocated on heap and provides validation methods.
+class AidlManagedBuffer {
+  public:
+    static std::shared_ptr<AidlManagedBuffer> create(uint32_t size,
+                                                     std::set<AidlHalPreparedModelRole> roles,
+                                                     const Operand& operand);
+
+    // Prefer AidlManagedBuffer::create.
+    AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+                      std::set<AidlHalPreparedModelRole> roles, const Operand& operand);
+
+    uint8_t* getPointer() const { return kBuffer.get(); }
+    uint32_t getSize() const { return kSize; }
+
+    // "poolIndex" is the index of this buffer in the request.pools.
+    ErrorStatus validateRequest(uint32_t poolIndex, const Request& request,
+                                const aidl_hal::IPreparedModel* preparedModel) const;
+
+    // "size" is the byte size of the Memory provided to the copyFrom or copyTo method.
+    ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const;
+    ErrorStatus validateCopyTo(uint32_t size) const;
+
+    bool updateDimensions(const std::vector<uint32_t>& dimensions);
+    void setInitialized(bool initialized);
+
+  private:
+    mutable std::mutex mMutex;
+    const std::unique_ptr<uint8_t[]> kBuffer;
+    const uint32_t kSize;
+    const std::set<AidlHalPreparedModelRole> kRoles;
+    const OperandType kOperandType;
+    const std::vector<uint32_t> kInitialDimensions;
+    std::vector<uint32_t> mUpdatedDimensions GUARDED_BY(mMutex);
+    bool mInitialized GUARDED_BY(mMutex) = false;
+};
+
+// Keep track of all AidlManagedBuffers and assign each with a unique token.
+class AidlBufferTracker : public std::enable_shared_from_this<AidlBufferTracker> {
+    DISALLOW_COPY_AND_ASSIGN(AidlBufferTracker);
+
+  public:
+    // A RAII class to help manage the lifetime of the token.
+    // It is only supposed to be constructed in AidlBufferTracker::add.
+    class Token {
+        DISALLOW_COPY_AND_ASSIGN(Token);
+
+      public:
+        Token(uint32_t token, std::shared_ptr<AidlBufferTracker> tracker)
+            : kToken(token), kBufferTracker(std::move(tracker)) {}
+        ~Token() { kBufferTracker->free(kToken); }
+        uint32_t get() const { return kToken; }
+
+      private:
+        const uint32_t kToken;
+        const std::shared_ptr<AidlBufferTracker> kBufferTracker;
+    };
+
+    // The factory of AidlBufferTracker. This ensures that the AidlBufferTracker is always managed
+    // by a shared_ptr.
+    static std::shared_ptr<AidlBufferTracker> create() {
+        return std::make_shared<AidlBufferTracker>();
+    }
+
+    // Prefer AidlBufferTracker::create.
+    AidlBufferTracker() : mTokenToBuffers(1) {}
+
+    std::unique_ptr<Token> add(std::shared_ptr<AidlManagedBuffer> buffer);
+    std::shared_ptr<AidlManagedBuffer> get(uint32_t token) const;
+
+  private:
+    void free(uint32_t token);
+
+    mutable std::mutex mMutex;
+    std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex);
+
+    // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping.
+    // The index of the vector is the token. When the token gets freed, the corresponding entry is
+    // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token.
+    std::vector<std::shared_ptr<AidlManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex);
+};
+
+}  // namespace android::nn
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BUFFER_TRACKER_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
index 1457646..d558f66 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
@@ -43,11 +43,12 @@
 
   public:
     static nn::GeneralResult<std::shared_ptr<const Device>> create(
-            std::string name, std::shared_ptr<aidl_hal::IDevice> device);
+            std::string name, std::shared_ptr<aidl_hal::IDevice> device, nn::Version featureLevel);
 
     Device(PrivateConstructorTag tag, std::string name, std::string versionString,
-           nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
-           nn::Capabilities capabilities, std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
+           nn::Version featureLevel, nn::DeviceType deviceType,
+           std::vector<nn::Extension> extensions, nn::Capabilities capabilities,
+           std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
            std::shared_ptr<aidl_hal::IDevice> device, DeathHandler deathHandler);
 
     const std::string& getName() const override;
@@ -84,6 +85,7 @@
   private:
     const std::string kName;
     const std::string kVersionString;
+    const nn::Version kFeatureLevel;
     const nn::DeviceType kDeviceType;
     const std::vector<nn::Extension> kExtensions;
     const nn::Capabilities kCapabilities;
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
new file mode 100644
index 0000000..3fb443c
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
+#include <aidl/android/hardware/neuralnetworks/DataLocation.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h>
+#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h>
+#include <aidl/android/hardware/neuralnetworks/FusedActivationFunc.h>
+#include <aidl/android/hardware/neuralnetworks/IBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/IDevice.h>
+#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Operand.h>
+#include <aidl/android/hardware/neuralnetworks/OperandExtraParams.h>
+#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h>
+#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h>
+#include <aidl/android/hardware/neuralnetworks/OperandType.h>
+#include <aidl/android/hardware/neuralnetworks/Operation.h>
+#include <aidl/android/hardware/neuralnetworks/OperationType.h>
+#include <aidl/android/hardware/neuralnetworks/OutputShape.h>
+#include <aidl/android/hardware/neuralnetworks/PerformanceInfo.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <aidl/android/hardware/neuralnetworks/RequestArgument.h>
+#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
+#include <aidl/android/hardware/neuralnetworks/Subgraph.h>
+#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
+#include <aidl/android/hardware/neuralnetworks/Timing.h>
+
+namespace android::nn {
+
+namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;
+
+inline constexpr aidl_hal::Priority kDefaultPriorityAidl = aidl_hal::Priority::MEDIUM;
+
+}  // namespace android::nn
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_INTERFACES_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalUtils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalUtils.h
new file mode 100644
index 0000000..89552bc
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalUtils.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
+
+#include <vector>
+
+#include "nnapi/hal/aidl/HalInterfaces.h"
+
+namespace android {
+namespace nn {
+
+// Return a vector with one entry for each non-extension OperandType except
+// SUBGRAPH, set to the specified PerformanceInfo value.  The vector will be
+// sorted by OperandType.
+//
+// Control flow (OperandType::SUBGRAPH) operation performance is specified
+// separately using Capabilities::ifPerformance and
+// Capabilities::whilePerformance.
+std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
+        aidl_hal::PerformanceInfo perf);
+
+// Update the vector entry corresponding to the specified OperandType with the
+// specified PerformanceInfo value.  The vector must already have an entry for
+// that OperandType, and must be sorted by OperandType.
+void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
+            aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf);
+
+// Returns true if an operand type is an extension type.
+bool isExtensionOperandType(aidl_hal::OperandType type);
+
+// Returns true if an operand type is a scalar type.
+bool isNonExtensionScalar(aidl_hal::OperandType type);
+
+}  // namespace nn
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_HAL_UTILS_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ValidateHal.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ValidateHal.h
new file mode 100644
index 0000000..62aba31
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ValidateHal.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
+
+#include "nnapi/hal/aidl/HalInterfaces.h"
+
+#include <memory>
+#include <set>
+#include <tuple>
+#include <vector>
+
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Validation.h>
+
+namespace android {
+namespace nn {
+
+using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>;
+
+bool validateMemoryDesc(
+        const aidl_hal::BufferDesc& desc,
+        const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
+        const std::vector<aidl_hal::BufferRole>& inputRoles,
+        const std::vector<aidl_hal::BufferRole>& outputRoles,
+        std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
+                getModel,
+        std::set<AidlHalPreparedModelRole>* preparedModelRoles, aidl_hal::Operand* combinedOperand);
+
+}  // namespace nn
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_VALIDATE_HAL_H
diff --git a/neuralnetworks/aidl/utils/src/AidlBufferTracker.cpp b/neuralnetworks/aidl/utils/src/AidlBufferTracker.cpp
new file mode 100644
index 0000000..15d0810
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/AidlBufferTracker.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AidlBufferTracker.h"
+
+#include <android-base/macros.h>
+
+#include <memory>
+#include <mutex>
+#include <set>
+#include <stack>
+#include <utility>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "nnapi/TypeUtils.h"
+
+namespace android::nn {
+
+std::shared_ptr<AidlManagedBuffer> AidlManagedBuffer::create(
+        uint32_t size, std::set<AidlHalPreparedModelRole> roles, const Operand& operand) {
+    std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]);
+    if (buffer == nullptr) {
+        return nullptr;
+    }
+    if (isExtension(operand.type)) {
+        LOG(ERROR) << "AidlManagedBuffer cannot handle extension operands.";
+        return nullptr;
+    }
+    return std::make_shared<AidlManagedBuffer>(std::move(buffer), size, std::move(roles), operand);
+}
+
+AidlManagedBuffer::AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size,
+                                     std::set<AidlHalPreparedModelRole> roles,
+                                     const Operand& operand)
+    : kBuffer(std::move(buffer)),
+      kSize(size),
+      kRoles(std::move(roles)),
+      kOperandType(operand.type),
+      kInitialDimensions(operand.dimensions),
+      mUpdatedDimensions(operand.dimensions) {
+    CHECK(!isExtension(kOperandType));
+}
+
+ErrorStatus AidlManagedBuffer::validateRequest(
+        uint32_t poolIndex, const Request& request,
+        const aidl_hal::IPreparedModel* preparedModel) const {
+    CHECK_LT(poolIndex, request.pools.size());
+    CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex]));
+    std::lock_guard<std::mutex> guard(mMutex);
+
+    bool usedAsInput = false, usedAsOutput = false;
+    for (uint32_t i = 0; i < request.inputs.size(); i++) {
+        if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
+        if (request.inputs[i].location.poolIndex != poolIndex) continue;
+        // Validate if the input role is specified during allocation.
+        if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) {
+            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        if (!mInitialized) {
+            LOG(ERROR)
+                    << "AidlManagedBuffer::validateRequest -- using uninitialized buffer as input "
+                       "request.";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+        auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions);
+        if (!combined.has_value()) {
+            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
+                       << toString(mUpdatedDimensions) << " vs "
+                       << toString(request.inputs[i].dimensions) << ")";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        usedAsInput = true;
+    }
+    for (uint32_t i = 0; i < request.outputs.size(); i++) {
+        if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue;
+        if (request.outputs[i].location.poolIndex != poolIndex) continue;
+        if (usedAsInput || usedAsOutput) {
+            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- using the same device memory for "
+                          "input/output or multiple outputs";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        // Validate if the output role is specified during allocation.
+        if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) {
+            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role.";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions);
+        if (!combined.has_value()) {
+            LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions ("
+                       << toString(kInitialDimensions) << " vs "
+                       << toString(request.outputs[i].dimensions) << ")";
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        usedAsOutput = true;
+    }
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus AidlManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions,
+                                                uint32_t size) const {
+    if (size != kSize) {
+        LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize
+                   << " vs " << size;
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+
+    if (isNonExtensionScalar(kOperandType)) {
+        if (!dimensions.empty()) {
+            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid dimensions for scalar "
+                          "operand: "
+                       << toString(dimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+        return ErrorStatus::NONE;
+    }
+
+    if (dimensions.empty()) {
+        if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) {
+            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the initial dimensions are not "
+                          "fully "
+                          "specified and no dimension update is provided: "
+                       << toString(kInitialDimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+    } else {
+        if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) {
+            LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the updated dimensions are not "
+                          "fully "
+                          "specified: "
+                       << toString(dimensions);
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+    }
+
+    const auto combined = combineDimensions(kInitialDimensions, dimensions);
+    if (!combined.has_value()) {
+        LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- incompatible dimensions ("
+                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus AidlManagedBuffer::validateCopyTo(uint32_t size) const {
+    if (size != kSize) {
+        LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- invalid memory size: " << kSize
+                   << " vs " << size;
+        return ErrorStatus::INVALID_ARGUMENT;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    if (!mInitialized) {
+        LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- using uninitialized buffer as source.";
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    return ErrorStatus::NONE;
+}
+
+bool AidlManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) {
+    auto combined = combineDimensions(kInitialDimensions, dimensions);
+    if (!combined.has_value()) {
+        LOG(ERROR) << "AidlManagedBuffer::updateDimensions -- incompatible dimensions ("
+                   << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")";
+        return false;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    mUpdatedDimensions = std::move(combined).value();
+    return true;
+}
+
+void AidlManagedBuffer::setInitialized(bool initialized) {
+    std::lock_guard<std::mutex> guard(mMutex);
+    mInitialized = initialized;
+}
+
+std::unique_ptr<AidlBufferTracker::Token> AidlBufferTracker::add(
+        std::shared_ptr<AidlManagedBuffer> buffer) {
+    if (buffer == nullptr) {
+        return nullptr;
+    }
+    std::lock_guard<std::mutex> guard(mMutex);
+    uint32_t token = 0;
+    if (mFreeTokens.empty()) {
+        token = mTokenToBuffers.size();
+        mTokenToBuffers.push_back(std::move(buffer));
+    } else {
+        token = mFreeTokens.top();
+        mFreeTokens.pop();
+        mTokenToBuffers[token] = std::move(buffer);
+    }
+    VLOG(MEMORY) << "AidlBufferTracker::add -- new token = " << token;
+    return std::make_unique<Token>(token, shared_from_this());
+}
+
+std::shared_ptr<AidlManagedBuffer> AidlBufferTracker::get(uint32_t token) const {
+    std::lock_guard<std::mutex> guard(mMutex);
+    if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) {
+        LOG(ERROR) << "AidlBufferTracker::get -- unknown token " << token;
+        return nullptr;
+    }
+    return mTokenToBuffers[token];
+}
+
+void AidlBufferTracker::free(uint32_t token) {
+    std::lock_guard<std::mutex> guard(mMutex);
+    CHECK_LT(token, mTokenToBuffers.size());
+    CHECK(mTokenToBuffers[token] != nullptr);
+    VLOG(MEMORY) << "AidlBufferTracker::free -- release token = " << token;
+    mTokenToBuffers[token] = nullptr;
+    mFreeTokens.push(token);
+}
+
+}  // namespace android::nn
diff --git a/neuralnetworks/aidl/utils/src/AidlHalUtils.cpp b/neuralnetworks/aidl/utils/src/AidlHalUtils.cpp
new file mode 100644
index 0000000..6fc46ab
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/AidlHalUtils.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// This file contains pre-canonical-types utility code and includes HAL
+// utilities. LegacyUtils.h is the subset of these utilities that do not touch
+// HAL.
+
+#include "AidlHalUtils.h"
+
+#include <android-base/logging.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+#include "AidlHalInterfaces.h"
+#include "nnapi/TypeUtils.h"
+
+namespace android::nn {
+
+std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance(
+        aidl_hal::PerformanceInfo perf) {
+    static constexpr ndk::enum_range<aidl_hal::OperandType> kOperandTypeRange;
+    std::vector<aidl_hal::OperandPerformance> ret;
+    ret.reserve(std::distance(kOperandTypeRange.begin(), kOperandTypeRange.end()));
+    for (aidl_hal::OperandType type : kOperandTypeRange) {
+        if (type != aidl_hal::OperandType::SUBGRAPH) {
+            ret.push_back(aidl_hal::OperandPerformance{type, perf});
+        }
+    }
+    std::sort(ret.begin(), ret.end(),
+              [](const aidl_hal::OperandPerformance& a, const aidl_hal::OperandPerformance& b) {
+                  return a.type < b.type;
+              });
+
+    return ret;
+}
+
+void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance,
+            aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf) {
+    CHECK(operandPerformance != nullptr);
+    const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
+                                     [](const aidl_hal::OperandPerformance& perf,
+                                        aidl_hal::OperandType type) { return perf.type < type; });
+    CHECK(it != operandPerformance->end())
+            << toString(type) << " not in operand performance vector";
+    it->info = perf;
+}
+
+bool isExtensionOperandType(aidl_hal::OperandType type) {
+    return isExtension(convert(type).value());
+}
+
+bool isNonExtensionScalar(aidl_hal::OperandType type) {
+    return isNonExtensionScalar(convert(type).value());
+}
+
+}  // namespace android::nn
diff --git a/neuralnetworks/aidl/utils/src/AidlValidateHal.cpp b/neuralnetworks/aidl/utils/src/AidlValidateHal.cpp
new file mode 100644
index 0000000..a1c496a
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/AidlValidateHal.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ValidateHal"
+
+#include "AidlValidateHal.h"
+
+#include <android-base/logging.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "AidlHalUtils.h"
+#include "nnapi/TypeUtils.h"
+
+namespace android {
+namespace nn {
+
+bool validateMemoryDesc(
+        const aidl_hal::BufferDesc& desc,
+        const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels,
+        const std::vector<aidl_hal::BufferRole>& inputRoles,
+        const std::vector<aidl_hal::BufferRole>& outputRoles,
+        std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)>
+                getModel,
+        std::set<AidlHalPreparedModelRole>* preparedModelRoles,
+        aidl_hal::Operand* combinedOperand) {
+    NN_RET_CHECK(!preparedModels.empty());
+    NN_RET_CHECK(!inputRoles.empty() || !outputRoles.empty());
+
+    std::set<AidlHalPreparedModelRole> roles;
+    std::vector<aidl_hal::Operand> operands;
+    operands.reserve(inputRoles.size() + outputRoles.size());
+    for (const auto& role : inputRoles) {
+        NN_RET_CHECK_GE(role.modelIndex, 0);
+        NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
+        const auto& preparedModel = preparedModels[role.modelIndex];
+        NN_RET_CHECK(preparedModel != nullptr);
+        const auto* model = getModel(preparedModel);
+        NN_RET_CHECK(model != nullptr);
+        const auto& inputIndexes = model->main.inputIndexes;
+        NN_RET_CHECK_GE(role.ioIndex, 0);
+        NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), inputIndexes.size());
+        NN_RET_CHECK_GT(role.probability, 0.0f);
+        NN_RET_CHECK_LE(role.probability, 1.0f);
+        const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex);
+        NN_RET_CHECK(success);
+        operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]);
+    }
+    for (const auto& role : outputRoles) {
+        NN_RET_CHECK_GE(role.modelIndex, 0);
+        NN_RET_CHECK_LT(static_cast<size_t>(role.modelIndex), preparedModels.size());
+        const auto& preparedModel = preparedModels[role.modelIndex];
+        NN_RET_CHECK(preparedModel != nullptr);
+        const auto* model = getModel(preparedModel);
+        NN_RET_CHECK(model != nullptr);
+        const auto& outputIndexes = model->main.outputIndexes;
+        NN_RET_CHECK_GE(role.ioIndex, 0);
+        NN_RET_CHECK_LT(static_cast<size_t>(role.ioIndex), outputIndexes.size());
+        NN_RET_CHECK_GT(role.probability, 0.0f);
+        NN_RET_CHECK_LE(role.probability, 1.0f);
+        const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex);
+        NN_RET_CHECK(success);
+        operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]);
+    }
+
+    CHECK(!operands.empty());
+    const auto opType = operands[0].type;
+    const auto canonicalOperandType = convert(opType);
+    NN_RET_CHECK(canonicalOperandType.has_value()) << canonicalOperandType.error().message;
+    const bool isExtensionOperand = isExtension(canonicalOperandType.value());
+
+    auto maybeDimensions = toUnsigned(desc.dimensions);
+    NN_RET_CHECK(maybeDimensions.has_value()) << maybeDimensions.error().message;
+    std::vector<uint32_t> dimensions = std::move(maybeDimensions).value();
+
+    for (const auto& operand : operands) {
+        NN_RET_CHECK(operand.type == operands[0].type)
+                << toString(operand.type) << " vs " << toString(operands[0].type);
+        NN_RET_CHECK_EQ(operand.scale, operands[0].scale);
+        NN_RET_CHECK_EQ(operand.zeroPoint, operands[0].zeroPoint);
+        // NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type.
+        if (!isExtensionOperand) {
+            const auto& lhsExtraParams = operand.extraParams;
+            const auto& rhsExtraParams = operands[0].extraParams;
+            NN_RET_CHECK(lhsExtraParams == rhsExtraParams)
+                    << (lhsExtraParams.has_value() ? lhsExtraParams.value().toString()
+                                                   : "std::nullopt")
+                    << " vs "
+                    << (rhsExtraParams.has_value() ? rhsExtraParams.value().toString()
+                                                   : "std::nullopt");
+        }
+        const auto maybeRhsDimensions = toUnsigned(operand.dimensions);
+        NN_RET_CHECK(maybeRhsDimensions.has_value()) << maybeRhsDimensions.error().message;
+        const auto combined = combineDimensions(dimensions, maybeRhsDimensions.value());
+        NN_RET_CHECK(combined.has_value());
+        dimensions = combined.value();
+    }
+
+    // NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type.
+    if (!isExtensionOperand) {
+        NN_RET_CHECK(!isNonExtensionScalar(opType) || dimensions.empty())
+                << "invalid dimensions with scalar operand type.";
+    }
+
+    if (preparedModelRoles != nullptr) {
+        *preparedModelRoles = std::move(roles);
+    }
+    if (combinedOperand != nullptr) {
+        *combinedOperand = operands[0];
+        // No need to check that values fit int32_t here, since the original values are obtained
+        // from int32_t.
+        combinedOperand->dimensions = aidl_hal::utils::toSigned(dimensions).value();
+    }
+    return true;
+}
+
+}  // namespace nn
+}  // namespace android
diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp
index e80de0b..5b7ec4e 100644
--- a/neuralnetworks/aidl/utils/src/Device.cpp
+++ b/neuralnetworks/aidl/utils/src/Device.cpp
@@ -125,7 +125,7 @@
 }  // namespace
 
 nn::GeneralResult<std::shared_ptr<const Device>> Device::create(
-        std::string name, std::shared_ptr<aidl_hal::IDevice> device) {
+        std::string name, std::shared_ptr<aidl_hal::IDevice> device, nn::Version featureLevel) {
     if (name.empty()) {
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
                << "aidl_hal::utils::Device::create must have non-empty name";
@@ -143,18 +143,19 @@
 
     auto deathHandler = NN_TRY(DeathHandler::create(device));
     return std::make_shared<const Device>(
-            PrivateConstructorTag{}, std::move(name), std::move(versionString), deviceType,
-            std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
+            PrivateConstructorTag{}, std::move(name), std::move(versionString), featureLevel,
+            deviceType, std::move(extensions), std::move(capabilities), numberOfCacheFilesNeeded,
             std::move(device), std::move(deathHandler));
 }
 
 Device::Device(PrivateConstructorTag /*tag*/, std::string name, std::string versionString,
-               nn::DeviceType deviceType, std::vector<nn::Extension> extensions,
-               nn::Capabilities capabilities,
+               nn::Version featureLevel, nn::DeviceType deviceType,
+               std::vector<nn::Extension> extensions, nn::Capabilities capabilities,
                std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded,
                std::shared_ptr<aidl_hal::IDevice> device, DeathHandler deathHandler)
     : kName(std::move(name)),
       kVersionString(std::move(versionString)),
+      kFeatureLevel(featureLevel),
       kDeviceType(deviceType),
       kExtensions(std::move(extensions)),
       kCapabilities(std::move(capabilities)),
@@ -171,7 +172,7 @@
 }
 
 nn::Version Device::getFeatureLevel() const {
-    return nn::Version::ANDROID_S;
+    return kFeatureLevel;
 }
 
 nn::DeviceType Device::getType() const {
diff --git a/neuralnetworks/aidl/utils/src/Service.cpp b/neuralnetworks/aidl/utils/src/Service.cpp
index ac182a2..01772ee 100644
--- a/neuralnetworks/aidl/utils/src/Service.cpp
+++ b/neuralnetworks/aidl/utils/src/Service.cpp
@@ -17,6 +17,7 @@
 #include "Service.h"
 
 #include <AndroidVersionUtil.h>
+#include <aidl/android/hardware/neuralnetworks/IDevice.h>
 #include <android/binder_auto_utils.h>
 #include <android/binder_manager.h>
 #include <android/binder_process.h>
@@ -28,8 +29,33 @@
 #include <string>
 
 #include "Device.h"
+#include "Utils.h"
 
 namespace aidl::android::hardware::neuralnetworks::utils {
+namespace {
+
+// Map the AIDL version of an IDevice to NNAPI canonical feature level.
+nn::GeneralResult<nn::Version> getAidlServiceFeatureLevel(IDevice* service) {
+    CHECK(service != nullptr);
+    int aidlVersion;
+    const auto ret = service->getInterfaceVersion(&aidlVersion);
+    HANDLE_ASTATUS(ret) << "getInterfaceVersion failed";
+
+    // For service AIDL versions greater than or equal to the AIDL library version that the runtime
+    // was built against, clamp it to the runtime AIDL library version.
+    aidlVersion = std::min(aidlVersion, IDevice::version);
+
+    // Map stable AIDL versions to canonical versions.
+    switch (aidlVersion) {
+        case 1:
+            return nn::Version::ANDROID_S;
+        case 2:
+            return nn::Version::FEATURE_LEVEL_6;
+    }
+    return NN_ERROR() << "Unknown AIDL service version: " << aidlVersion;
+}
+
+}  // namespace
 
 nn::GeneralResult<nn::SharedDevice> getDevice(const std::string& instanceName) {
     auto fullName = std::string(IDevice::descriptor) + "/" + instanceName;
@@ -55,7 +81,8 @@
                    << " returned nullptr";
         }
         ABinderProcess_startThreadPool();
-        return Device::create(instanceName, std::move(service));
+        const auto featureLevel = NN_TRY(getAidlServiceFeatureLevel(service.get()));
+        return Device::create(instanceName, std::move(service), featureLevel);
     };
 
     return hal::utils::ResilientDevice::create(std::move(makeDevice));
diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
index f121aca..79abe1b 100644
--- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
@@ -146,28 +146,45 @@
     return ndk::ScopedAStatus::fromStatus(STATUS_DEAD_OBJECT);
 };
 
+class DeviceTest : public ::testing::TestWithParam<nn::Version> {
+  protected:
+    const nn::Version kVersion = GetParam();
+};
+
+std::string printDeviceTest(const testing::TestParamInfo<nn::Version>& info) {
+    switch (info.param) {
+        case nn::Version::ANDROID_S:
+            return "v1";
+        case nn::Version::FEATURE_LEVEL_6:
+            return "v2";
+        default:
+            LOG(FATAL) << "Invalid AIDL version: " << info.param;
+            return "invalid";
+    }
+}
+
 }  // namespace
 
-TEST(DeviceTest, invalidName) {
+TEST_P(DeviceTest, invalidName) {
     // run test
     const auto device = MockDevice::create();
-    const auto result = Device::create(kInvalidName, device);
+    const auto result = Device::create(kInvalidName, device, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
 }
 
-TEST(DeviceTest, invalidDevice) {
+TEST_P(DeviceTest, invalidDevice) {
     // run test
-    const auto result = Device::create(kName, kInvalidDevice);
+    const auto result = Device::create(kName, kInvalidDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
 }
 
-TEST(DeviceTest, getVersionStringError) {
+TEST_P(DeviceTest, getVersionStringError) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getVersionString(_))
@@ -175,14 +192,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getVersionStringTransportFailure) {
+TEST_P(DeviceTest, getVersionStringTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getVersionString(_))
@@ -190,14 +207,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getVersionStringDeadObject) {
+TEST_P(DeviceTest, getVersionStringDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getVersionString(_))
@@ -205,27 +222,27 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, getTypeError) {
+TEST_P(DeviceTest, getTypeError) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getType(_)).Times(1).WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getTypeTransportFailure) {
+TEST_P(DeviceTest, getTypeTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getType(_))
@@ -233,14 +250,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getTypeDeadObject) {
+TEST_P(DeviceTest, getTypeDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getType(_))
@@ -248,14 +265,14 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, getSupportedExtensionsError) {
+TEST_P(DeviceTest, getSupportedExtensionsError) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getSupportedExtensions(_))
@@ -263,14 +280,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getSupportedExtensionsTransportFailure) {
+TEST_P(DeviceTest, getSupportedExtensionsTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getSupportedExtensions(_))
@@ -278,14 +295,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getSupportedExtensionsDeadObject) {
+TEST_P(DeviceTest, getSupportedExtensionsDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getSupportedExtensions(_))
@@ -293,20 +310,20 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, getNumberOfCacheFilesNeeded) {
+TEST_P(DeviceTest, getNumberOfCacheFilesNeeded) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(1);
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_TRUE(result.has_value());
@@ -315,7 +332,7 @@
     EXPECT_EQ(result.value()->getNumberOfCacheFilesNeeded(), kNumberOfCacheFilesPair);
 }
 
-TEST(DeviceTest, getNumberOfCacheFilesNeededError) {
+TEST_P(DeviceTest, getNumberOfCacheFilesNeededError) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_))
@@ -323,14 +340,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, dataCacheFilesExceedsSpecifiedMax) {
+TEST_P(DeviceTest, dataCacheFilesExceedsSpecifiedMax) {
     // setup test
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_))
@@ -341,14 +358,14 @@
                             InvokeWithoutArgs(makeStatusOk)));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, modelCacheFilesExceedsSpecifiedMax) {
+TEST_P(DeviceTest, modelCacheFilesExceedsSpecifiedMax) {
     // setup test
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_))
@@ -359,14 +376,14 @@
                             InvokeWithoutArgs(makeStatusOk)));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getNumberOfCacheFilesNeededTransportFailure) {
+TEST_P(DeviceTest, getNumberOfCacheFilesNeededTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_))
@@ -374,14 +391,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getNumberOfCacheFilesNeededDeadObject) {
+TEST_P(DeviceTest, getNumberOfCacheFilesNeededDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_))
@@ -389,14 +406,14 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, getCapabilitiesError) {
+TEST_P(DeviceTest, getCapabilitiesError) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getCapabilities(_))
@@ -404,14 +421,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getCapabilitiesTransportFailure) {
+TEST_P(DeviceTest, getCapabilitiesTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getCapabilities(_))
@@ -419,14 +436,14 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getCapabilitiesDeadObject) {
+TEST_P(DeviceTest, getCapabilitiesDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getCapabilities(_))
@@ -434,17 +451,17 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
 
     // verify result
     ASSERT_FALSE(result.has_value());
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, getName) {
+TEST_P(DeviceTest, getName) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
 
     // run test
     const auto& name = device->getName();
@@ -453,19 +470,19 @@
     EXPECT_EQ(name, kName);
 }
 
-TEST(DeviceTest, getFeatureLevel) {
+TEST_P(DeviceTest, getFeatureLevel) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
 
     // run test
     const auto featureLevel = device->getFeatureLevel();
 
     // verify result
-    EXPECT_EQ(featureLevel, nn::Version::ANDROID_S);
+    EXPECT_EQ(featureLevel, kVersion);
 }
 
-TEST(DeviceTest, getCachedData) {
+TEST_P(DeviceTest, getCachedData) {
     // setup call
     const auto mockDevice = createMockDevice();
     EXPECT_CALL(*mockDevice, getVersionString(_)).Times(1);
@@ -474,7 +491,7 @@
     EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(1);
     EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(1);
 
-    const auto result = Device::create(kName, mockDevice);
+    const auto result = Device::create(kName, mockDevice, kVersion);
     ASSERT_TRUE(result.has_value())
             << "Failed with " << result.error().code << ": " << result.error().message;
     const auto& device = result.value();
@@ -487,10 +504,10 @@
     EXPECT_EQ(device->getCapabilities(), device->getCapabilities());
 }
 
-TEST(DeviceTest, getSupportedOperations) {
+TEST_P(DeviceTest, getSupportedOperations) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, getSupportedOperations(_, _))
             .Times(1)
             .WillOnce(DoAll(
@@ -508,10 +525,10 @@
     EXPECT_THAT(supportedOperations, Each(testing::IsTrue()));
 }
 
-TEST(DeviceTest, getSupportedOperationsError) {
+TEST_P(DeviceTest, getSupportedOperationsError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, getSupportedOperations(_, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
@@ -524,10 +541,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getSupportedOperationsTransportFailure) {
+TEST_P(DeviceTest, getSupportedOperationsTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, getSupportedOperations(_, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -540,10 +557,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, getSupportedOperationsDeadObject) {
+TEST_P(DeviceTest, getSupportedOperationsDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, getSupportedOperations(_, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -556,10 +573,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, prepareModel) {
+TEST_P(DeviceTest, prepareModel) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     const auto mockPreparedModel = MockPreparedModel::create();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
@@ -576,10 +593,10 @@
     EXPECT_NE(result.value(), nullptr);
 }
 
-TEST(DeviceTest, prepareModelLaunchError) {
+TEST_P(DeviceTest, prepareModelLaunchError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Invoke(makePreparedModelReturn(ErrorStatus::GENERAL_FAILURE,
@@ -594,10 +611,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelReturnError) {
+TEST_P(DeviceTest, prepareModelReturnError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Invoke(makePreparedModelReturn(ErrorStatus::NONE,
@@ -612,10 +629,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelNullptrError) {
+TEST_P(DeviceTest, prepareModelNullptrError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(
@@ -630,10 +647,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelTransportFailure) {
+TEST_P(DeviceTest, prepareModelTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -647,10 +664,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelDeadObject) {
+TEST_P(DeviceTest, prepareModelDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -664,10 +681,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, prepareModelAsyncCrash) {
+TEST_P(DeviceTest, prepareModelAsyncCrash) {
     // setup test
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     const auto ret = [&device]() {
         DeathMonitor::serviceDied(device->getDeathMonitor());
         return ndk::ScopedAStatus::ok();
@@ -685,10 +702,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, prepareModelFromCache) {
+TEST_P(DeviceTest, prepareModelFromCache) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     const auto mockPreparedModel = MockPreparedModel::create();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
@@ -704,10 +721,10 @@
     EXPECT_NE(result.value(), nullptr);
 }
 
-TEST(DeviceTest, prepareModelFromCacheLaunchError) {
+TEST_P(DeviceTest, prepareModelFromCacheLaunchError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
             .WillOnce(Invoke(makePreparedModelFromCacheReturn(
@@ -721,10 +738,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelFromCacheReturnError) {
+TEST_P(DeviceTest, prepareModelFromCacheReturnError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
             .WillOnce(Invoke(makePreparedModelFromCacheReturn(
@@ -738,10 +755,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelFromCacheNullptrError) {
+TEST_P(DeviceTest, prepareModelFromCacheNullptrError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
             .WillOnce(Invoke(makePreparedModelFromCacheReturn(ErrorStatus::NONE, ErrorStatus::NONE,
@@ -755,10 +772,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelFromCacheTransportFailure) {
+TEST_P(DeviceTest, prepareModelFromCacheTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -771,10 +788,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, prepareModelFromCacheDeadObject) {
+TEST_P(DeviceTest, prepareModelFromCacheDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, prepareModelFromCache(_, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -787,10 +804,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, prepareModelFromCacheAsyncCrash) {
+TEST_P(DeviceTest, prepareModelFromCacheAsyncCrash) {
     // setup test
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     const auto ret = [&device]() {
         DeathMonitor::serviceDied(device->getDeathMonitor());
         return ndk::ScopedAStatus::ok();
@@ -807,10 +824,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-TEST(DeviceTest, allocate) {
+TEST_P(DeviceTest, allocate) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     const auto mockBuffer = DeviceBuffer{.buffer = MockBuffer::create(), .token = 1};
     EXPECT_CALL(*mockDevice, allocate(_, _, _, _, _))
             .Times(1)
@@ -825,10 +842,10 @@
     EXPECT_NE(result.value(), nullptr);
 }
 
-TEST(DeviceTest, allocateError) {
+TEST_P(DeviceTest, allocateError) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, allocate(_, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
@@ -841,10 +858,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, allocateTransportFailure) {
+TEST_P(DeviceTest, allocateTransportFailure) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, allocate(_, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
@@ -857,10 +874,10 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-TEST(DeviceTest, allocateDeadObject) {
+TEST_P(DeviceTest, allocateDeadObject) {
     // setup call
     const auto mockDevice = createMockDevice();
-    const auto device = Device::create(kName, mockDevice).value();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
     EXPECT_CALL(*mockDevice, allocate(_, _, _, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
@@ -873,4 +890,8 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
+INSTANTIATE_TEST_SUITE_P(TestDevice, DeviceTest,
+                         ::testing::Values(nn::Version::ANDROID_S, nn::Version::FEATURE_LEVEL_6),
+                         printDeviceTest);
+
 }  // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/vts/functional/Android.bp b/neuralnetworks/aidl/vts/functional/Android.bp
index 3558d12..a102fe0 100644
--- a/neuralnetworks/aidl/vts/functional/Android.bp
+++ b/neuralnetworks/aidl/vts/functional/Android.bp
@@ -26,6 +26,7 @@
 cc_test {
     name: "VtsHalNeuralnetworksTargetTest",
     defaults: [
+        "neuralnetworks_use_latest_utils_hal_aidl",
         "neuralnetworks_vts_functional_defaults",
         "use_libaidlvintf_gtest_helper_static",
     ],
@@ -49,18 +50,14 @@
         "libvndksupport",
     ],
     static_libs: [
-        "android.hardware.common-V2-ndk",
-        "android.hardware.graphics.common-V2-ndk",
-        "android.hardware.neuralnetworks-V2-ndk",
         "android.hidl.allocator@1.0",
         "android.hidl.memory@1.0",
         "libaidlcommonsupport",
         "libgmock",
         "libhidlmemory",
+        "libneuralnetworks_common",
         "libneuralnetworks_generated_test_harness",
-        "libneuralnetworks_utils",
         "libsync",
-        "neuralnetworks_utils_hal_aidl",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_AIDL_V2_example",
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h b/neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h
similarity index 66%
rename from neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h
rename to neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h
index 500aa0c..a3aa706 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h
+++ b/neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h
@@ -14,14 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
-
-#include "ExecutionBurstUtils.h"
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
 
 #include <android-base/thread_annotations.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <android/hardware/neuralnetworks/1.2/IBurstCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IBurstContext.h>
 #include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <fmq/MessageQueue.h>
@@ -30,6 +29,7 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/ProtectCallback.h>
+#include <nnapi/hal/1.2/BurstUtils.h>
 
 #include <atomic>
 #include <chrono>
@@ -39,13 +39,13 @@
 #include <tuple>
 #include <vector>
 
-namespace android::hardware::neuralnetworks::V1_2::utils {
+namespace android::hardware::neuralnetworks::adapter {
 
 /**
- * The ExecutionBurstServer class is responsible for waiting for and deserializing a request object
- * from a FMQ, performing the inference, and serializing the result back across another FMQ.
+ * The Burst class is responsible for waiting for and deserializing a request object from a FMQ,
+ * performing the inference, and serializing the result back across another FMQ.
  */
-class ExecutionBurstServer : public IBurstContext {
+class Burst : public V1_2::IBurstContext {
     struct PrivateConstructorTag {};
 
   public:
@@ -58,13 +58,13 @@
       public:
         // Precondition: burstExecutor != nullptr
         // Precondition: burstCallback != nullptr
-        MemoryCache(nn::SharedBurst burstExecutor, sp<IBurstCallback> burstCallback);
+        MemoryCache(nn::SharedBurst burstExecutor, sp<V1_2::IBurstCallback> burstCallback);
 
         /**
          * Get the cached memory objects corresponding to provided slot identifiers.
          *
-         * If the slot entry is not present in the cache, this class will use IBurstCallback to
-         * retrieve those entries that are not present in the cache, then cache them.
+         * If the slot entry is not present in the cache, this class will use V1_2::IBurstCallback
+         * to retrieve those entries that are not present in the cache, then cache them.
          *
          * @param slots Identifiers of memory objects to be retrieved.
          * @return A vector where each element is the memory object and a ref-counted cache "hold"
@@ -93,7 +93,7 @@
         std::map<int32_t, std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>> mCache
                 GUARDED_BY(mMutex);
         nn::SharedBurst kBurstExecutor;
-        const sp<IBurstCallback> kBurstCallback;
+        const sp<V1_2::IBurstCallback> kBurstCallback;
     };
 
     /**
@@ -111,45 +111,45 @@
      *     execution.
      * @param burstExecutor Object which maintains a local cache of the memory pools and executes
      *     using the cached memory pools.
-     * @param pollingTimeWindow How much time (in microseconds) the ExecutionBurstServer is allowed
-     *     to poll the FMQ before waiting on the blocking futex. Polling may result in lower
-     *     latencies at the potential cost of more power usage.
-     * @return IBurstContext Handle to the burst context.
+     * @param pollingTimeWindow How much time (in microseconds) the Burst is allowed to poll the FMQ
+     *     before waiting on the blocking futex. Polling may result in lower latencies at the
+     *     potential cost of more power usage.
+     * @return V1_2::IBurstContext Handle to the burst context.
      */
-    static nn::GeneralResult<sp<ExecutionBurstServer>> create(
-            const sp<IBurstCallback>& callback,
-            const MQDescriptorSync<FmqRequestDatum>& requestChannel,
-            const MQDescriptorSync<FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
+    static nn::GeneralResult<sp<Burst>> create(
+            const sp<V1_2::IBurstCallback>& callback,
+            const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+            const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
+            nn::SharedBurst burstExecutor,
             std::chrono::microseconds pollingTimeWindow = std::chrono::microseconds{0});
 
-    ExecutionBurstServer(PrivateConstructorTag tag, const sp<IBurstCallback>& callback,
-                         std::unique_ptr<RequestChannelReceiver> requestChannel,
-                         std::unique_ptr<ResultChannelSender> resultChannel,
-                         nn::SharedBurst burstExecutor);
-    ~ExecutionBurstServer();
+    Burst(PrivateConstructorTag tag, const sp<V1_2::IBurstCallback>& callback,
+          std::unique_ptr<V1_2::utils::RequestChannelReceiver> requestChannel,
+          std::unique_ptr<V1_2::utils::ResultChannelSender> resultChannel,
+          nn::SharedBurst burstExecutor);
+    ~Burst();
 
     // Used by the NN runtime to preemptively remove any stored memory. See
-    // IBurstContext::freeMemory for more information.
+    // V1_2::IBurstContext::freeMemory for more information.
     Return<void> freeMemory(int32_t slot) override;
 
   private:
-    // Work loop that will continue processing execution requests until the ExecutionBurstServer
-    // object is freed.
+    // Work loop that will continue processing execution requests until the Burst object is freed.
     void task();
 
-    nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> execute(
+    nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> execute(
             const V1_0::Request& requestWithoutPools, const std::vector<int32_t>& slotsOfPools,
-            MeasureTiming measure);
+            V1_2::MeasureTiming measure);
 
     std::thread mWorker;
     std::atomic<bool> mTeardown{false};
-    const sp<IBurstCallback> mCallback;
-    const std::unique_ptr<RequestChannelReceiver> mRequestChannelReceiver;
-    const std::unique_ptr<ResultChannelSender> mResultChannelSender;
+    const sp<V1_2::IBurstCallback> mCallback;
+    const std::unique_ptr<V1_2::utils::RequestChannelReceiver> mRequestChannelReceiver;
+    const std::unique_ptr<V1_2::utils::ResultChannelSender> mResultChannelSender;
     const nn::SharedBurst mBurstExecutor;
     MemoryCache mMemoryCache;
 };
 
-}  // namespace android::hardware::neuralnetworks::V1_2::utils
+}  // namespace android::hardware::neuralnetworks::adapter
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_BURST_SERVER_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_BURST_H
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/utils/adapter/src/Burst.cpp
similarity index 67%
rename from neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
rename to neuralnetworks/utils/adapter/src/Burst.cpp
index f30b662..8b2e1dd 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
+++ b/neuralnetworks/utils/adapter/src/Burst.cpp
@@ -14,11 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "ExecutionBurstServer"
-
-#include "ExecutionBurstServer.h"
-#include "Conversions.h"
-#include "ExecutionBurstUtils.h"
+#include "Burst.h"
 
 #include <android-base/logging.h>
 #include <nnapi/IBurst.h>
@@ -29,6 +25,8 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.0/HandleError.h>
 #include <nnapi/hal/1.0/ProtectCallback.h>
+#include <nnapi/hal/1.2/BurstUtils.h>
+#include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <algorithm>
@@ -42,11 +40,11 @@
 
 #include "Tracing.h"
 
-namespace android::hardware::neuralnetworks::V1_2::utils {
+namespace android::hardware::neuralnetworks::adapter {
 namespace {
 
-constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
-                                    std::numeric_limits<uint64_t>::max()};
+constexpr V1_2::Timing kTiming = {std::numeric_limits<uint64_t>::max(),
+                                  std::numeric_limits<uint64_t>::max()};
 
 nn::GeneralResult<std::vector<nn::SharedMemory>> getMemoriesCallback(
         V1_0::ErrorStatus status, const hidl_vec<hidl_memory>& memories) {
@@ -61,15 +59,15 @@
 
 }  // anonymous namespace
 
-ExecutionBurstServer::MemoryCache::MemoryCache(nn::SharedBurst burstExecutor,
-                                               sp<IBurstCallback> burstCallback)
+Burst::MemoryCache::MemoryCache(nn::SharedBurst burstExecutor,
+                                sp<V1_2::IBurstCallback> burstCallback)
     : kBurstExecutor(std::move(burstExecutor)), kBurstCallback(std::move(burstCallback)) {
     CHECK(kBurstExecutor != nullptr);
     CHECK(kBurstCallback != nullptr);
 }
 
 nn::GeneralResult<std::vector<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>>
-ExecutionBurstServer::MemoryCache::getCacheEntries(const std::vector<int32_t>& slots) {
+Burst::MemoryCache::getCacheEntries(const std::vector<int32_t>& slots) {
     std::lock_guard guard(mMutex);
     NN_TRY(ensureCacheEntriesArePresentLocked(slots));
 
@@ -82,7 +80,7 @@
     return results;
 }
 
-nn::GeneralResult<void> ExecutionBurstServer::MemoryCache::ensureCacheEntriesArePresentLocked(
+nn::GeneralResult<void> Burst::MemoryCache::ensureCacheEntriesArePresentLocked(
         const std::vector<int32_t>& slots) {
     const auto slotIsKnown = [this](int32_t slot)
                                      REQUIRES(mMutex) { return mCache.count(slot) > 0; };
@@ -107,11 +105,10 @@
     auto returnedMemories = NN_TRY(cb.take());
 
     if (returnedMemories.size() != unknownSlots.size()) {
-        return NN_ERROR()
-               << "ExecutionBurstServer::MemoryCache::ensureCacheEntriesArePresentLocked: Error "
-                  "retrieving memories -- count mismatch between requested memories ("
-               << unknownSlots.size() << ") and returned memories (" << returnedMemories.size()
-               << ")";
+        return NN_ERROR() << "Burst::MemoryCache::ensureCacheEntriesArePresentLocked: Error "
+                             "retrieving memories -- count mismatch between requested memories ("
+                          << unknownSlots.size() << ") and returned memories ("
+                          << returnedMemories.size() << ")";
     }
 
     // add memories to unknown slots
@@ -123,56 +120,54 @@
 }
 
 nn::GeneralResult<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>
-ExecutionBurstServer::MemoryCache::getCacheEntryLocked(int32_t slot) {
+Burst::MemoryCache::getCacheEntryLocked(int32_t slot) {
     if (const auto iter = mCache.find(slot); iter != mCache.end()) {
         return iter->second;
     }
-    return NN_ERROR()
-           << "ExecutionBurstServer::MemoryCache::getCacheEntryLocked failed because slot " << slot
-           << " is not present in the cache";
+    return NN_ERROR() << "Burst::MemoryCache::getCacheEntryLocked failed because slot " << slot
+                      << " is not present in the cache";
 }
 
-void ExecutionBurstServer::MemoryCache::addCacheEntryLocked(int32_t slot, nn::SharedMemory memory) {
+void Burst::MemoryCache::addCacheEntryLocked(int32_t slot, nn::SharedMemory memory) {
     auto hold = kBurstExecutor->cacheMemory(memory);
     mCache.emplace(slot, std::make_pair(std::move(memory), std::move(hold)));
 }
 
-void ExecutionBurstServer::MemoryCache::removeCacheEntry(int32_t slot) {
+void Burst::MemoryCache::removeCacheEntry(int32_t slot) {
     std::lock_guard guard(mMutex);
     mCache.erase(slot);
 }
 
-// ExecutionBurstServer methods
+// Burst methods
 
-nn::GeneralResult<sp<ExecutionBurstServer>> ExecutionBurstServer::create(
-        const sp<IBurstCallback>& callback, const MQDescriptorSync<FmqRequestDatum>& requestChannel,
-        const MQDescriptorSync<FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
+nn::GeneralResult<sp<Burst>> Burst::create(
+        const sp<V1_2::IBurstCallback>& callback,
+        const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+        const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
         std::chrono::microseconds pollingTimeWindow) {
     // check inputs
     if (callback == nullptr || burstExecutor == nullptr) {
-        return NN_ERROR() << "ExecutionBurstServer::create passed a nullptr";
+        return NN_ERROR() << "Burst::create passed a nullptr";
     }
 
     // create FMQ objects
     auto requestChannelReceiver =
-            NN_TRY(RequestChannelReceiver::create(requestChannel, pollingTimeWindow));
-    auto resultChannelSender = NN_TRY(ResultChannelSender::create(resultChannel));
+            NN_TRY(V1_2::utils::RequestChannelReceiver::create(requestChannel, pollingTimeWindow));
+    auto resultChannelSender = NN_TRY(V1_2::utils::ResultChannelSender::create(resultChannel));
 
     // check FMQ objects
     CHECK(requestChannelReceiver != nullptr);
     CHECK(resultChannelSender != nullptr);
 
     // make and return context
-    return sp<ExecutionBurstServer>::make(PrivateConstructorTag{}, callback,
-                                          std::move(requestChannelReceiver),
-                                          std::move(resultChannelSender), std::move(burstExecutor));
+    return sp<Burst>::make(PrivateConstructorTag{}, callback, std::move(requestChannelReceiver),
+                           std::move(resultChannelSender), std::move(burstExecutor));
 }
 
-ExecutionBurstServer::ExecutionBurstServer(PrivateConstructorTag /*tag*/,
-                                           const sp<IBurstCallback>& callback,
-                                           std::unique_ptr<RequestChannelReceiver> requestChannel,
-                                           std::unique_ptr<ResultChannelSender> resultChannel,
-                                           nn::SharedBurst burstExecutor)
+Burst::Burst(PrivateConstructorTag /*tag*/, const sp<V1_2::IBurstCallback>& callback,
+             std::unique_ptr<V1_2::utils::RequestChannelReceiver> requestChannel,
+             std::unique_ptr<V1_2::utils::ResultChannelSender> resultChannel,
+             nn::SharedBurst burstExecutor)
     : mCallback(callback),
       mRequestChannelReceiver(std::move(requestChannel)),
       mResultChannelSender(std::move(resultChannel)),
@@ -182,7 +177,7 @@
     mWorker = std::thread([this] { task(); });
 }
 
-ExecutionBurstServer::~ExecutionBurstServer() {
+Burst::~Burst() {
     // set teardown flag
     mTeardown = true;
     mRequestChannelReceiver->invalidate();
@@ -191,12 +186,12 @@
     mWorker.join();
 }
 
-Return<void> ExecutionBurstServer::freeMemory(int32_t slot) {
+Return<void> Burst::freeMemory(int32_t slot) {
     mMemoryCache.removeCacheEntry(slot);
     return Void();
 }
 
-void ExecutionBurstServer::task() {
+void Burst::task() {
     // loop until the burst object is being destroyed
     while (!mTeardown) {
         // receive request
@@ -208,12 +203,12 @@
         // if the burst is being torn down, skip the execution so the "task" function can end
         if (!arguments.has_value()) {
             if (!mTeardown) {
-                mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming);
+                mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kTiming);
             }
             continue;
         }
 
-        // unpack the arguments; types are Request, std::vector<int32_t>, and MeasureTiming,
+        // unpack the arguments; types are Request, std::vector<int32_t>, and V1_2::MeasureTiming,
         // respectively
         const auto [requestWithoutPools, slotsOfPools, measure] = std::move(arguments).value();
 
@@ -226,17 +221,17 @@
         } else {
             const auto& [message, code, outputShapes] = result.error();
             LOG(ERROR) << "IBurst::execute failed with " << code << ": " << message;
-            mResultChannelSender->send(convert(code).value(), convert(outputShapes).value(),
-                                       kNoTiming);
+            mResultChannelSender->send(V1_2::utils::convert(code).value(),
+                                       V1_2::utils::convert(outputShapes).value(), kTiming);
         }
     }
 }
 
-nn::ExecutionResult<std::pair<hidl_vec<OutputShape>, Timing>> ExecutionBurstServer::execute(
+nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> Burst::execute(
         const V1_0::Request& requestWithoutPools, const std::vector<int32_t>& slotsOfPools,
-        MeasureTiming measure) {
+        V1_2::MeasureTiming measure) {
     NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
-                 "ExecutionBurstServer getting memory, executing, and returning results");
+                 "Burst getting memory, executing, and returning results");
 
     // ensure executor with cache has required memory
     const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
@@ -257,7 +252,8 @@
     const auto [outputShapes, timing] =
             NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
 
-    return std::make_pair(NN_TRY(convert(outputShapes)), NN_TRY(convert(timing)));
+    return std::make_pair(NN_TRY(V1_2::utils::convert(outputShapes)),
+                          NN_TRY(V1_2::utils::convert(timing)));
 }
 
-}  // namespace android::hardware::neuralnetworks::V1_2::utils
+}  // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
index 40c0888..a14e782 100644
--- a/neuralnetworks/utils/adapter/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
@@ -16,7 +16,8 @@
 
 #include "PreparedModel.h"
 
-#include <ExecutionBurstServer.h>
+#include "Burst.h"
+
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
@@ -272,6 +273,15 @@
     return syncFences;
 }
 
+nn::GeneralResult<sp<V1_2::IBurstContext>> configureExecutionBurst(
+        const nn::SharedPreparedModel& preparedModel, const sp<V1_2::IBurstCallback>& callback,
+        const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+        const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel) {
+    auto burstExecutor = NN_TRY(preparedModel->configureExecutionBurst());
+    return Burst::create(callback, requestChannel, resultChannel, std::move(burstExecutor),
+                         V1_2::utils::getBurstServerPollingTimeWindow());
+}
+
 nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> executeFenced(
         const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
         const hidl_vec<hidl_handle>& waitFor, V1_2::MeasureTiming measure,
@@ -388,14 +398,17 @@
         const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
         const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
         configureExecutionBurst_cb cb) {
-    const sp<V1_2::IBurstContext> burst = nn::ExecutionBurstServer::create(
-            callback, requestChannel, resultChannel, this, std::chrono::microseconds{0});
-
-    if (burst == nullptr) {
-        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
-    } else {
-        cb(V1_0::ErrorStatus::NONE, burst);
+    auto result = adapter::configureExecutionBurst(kPreparedModel, callback, requestChannel,
+                                                   resultChannel);
+    if (!result.has_value()) {
+        auto [message, code] = std::move(result).error();
+        LOG(ERROR) << "adapter::PreparedModel::configureExecutionBurst failed with " << code << ": "
+                   << message;
+        cb(V1_2::utils::convert(code).value(), nullptr);
+        return Void();
     }
+    auto burstContext = std::move(result).value();
+    cb(V1_0::ErrorStatus::NONE, std::move(burstContext));
     return Void();
 }
 
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index e02a202..39927a3 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -31,7 +31,6 @@
     export_include_dirs: ["include"],
     cflags: ["-Wthread-safety"],
     static_libs: ["neuralnetworks_types"],
-    shared_libs: ["libbinder_ndk"],
 }
 
 cc_test {
diff --git a/neuralnetworks/utils/service/Android.bp b/neuralnetworks/utils/service/Android.bp
index fbb8679..c3272ae 100644
--- a/neuralnetworks/utils/service/Android.bp
+++ b/neuralnetworks/utils/service/Android.bp
@@ -25,7 +25,10 @@
 
 cc_library_static {
     name: "neuralnetworks_utils_hal_service",
-    defaults: ["neuralnetworks_utils_defaults"],
+    defaults: [
+        "neuralnetworks_use_latest_utils_hal_aidl",
+        "neuralnetworks_utils_defaults",
+    ],
     srcs: ["src/*"],
     local_include_dirs: ["include/nnapi/hal"],
     export_include_dirs: ["include"],
@@ -35,15 +38,12 @@
         "neuralnetworks_utils_hal_1_1",
         "neuralnetworks_utils_hal_1_2",
         "neuralnetworks_utils_hal_1_3",
-        "neuralnetworks_utils_hal_aidl",
         "neuralnetworks_utils_hal_common",
     ],
     shared_libs: [
-        "android.hardware.neuralnetworks-V2-ndk",
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
         "android.hardware.neuralnetworks@1.2",
         "android.hardware.neuralnetworks@1.3",
-        "libbinder_ndk",
     ],
 }
diff --git a/tv/cec/1.0/default/Android.bp b/tv/cec/1.0/default/Android.bp
index b4053df..e4c226d 100644
--- a/tv/cec/1.0/default/Android.bp
+++ b/tv/cec/1.0/default/Android.bp
@@ -15,6 +15,7 @@
     srcs: [
         "HdmiCec.cpp",
         "HdmiCecDefault.cpp",
+        "HdmiCecPort.cpp",
     ],
 
     shared_libs: [
diff --git a/tv/cec/1.0/default/HdmiCecDefault.cpp b/tv/cec/1.0/default/HdmiCecDefault.cpp
index 299bcf0..26ccb7d 100644
--- a/tv/cec/1.0/default/HdmiCecDefault.cpp
+++ b/tv/cec/1.0/default/HdmiCecDefault.cpp
@@ -16,19 +16,22 @@
 
 #define LOG_TAG "android.hardware.tv.cec@1.0-impl"
 #include <android-base/logging.h>
+#include <android-base/properties.h>
 
 #include <cutils/properties.h>
+#include <dirent.h>
 #include <errno.h>
 #include <fcntl.h>
-#include <linux/cec.h>
 #include <linux/ioctl.h>
 #include <poll.h>
-#include <pthread.h>
-#include <sys/eventfd.h>
-#include <algorithm>
 
 #include "HdmiCecDefault.h"
 
+#define PROPERTY_DEVICE_TYPE "ro.hdmi.device_type"
+#define MIN_PORT_ID 0
+#define MAX_PORT_ID 15
+#define INVALID_PHYSICAL_ADDRESS 0xFFFF
+
 namespace android {
 namespace hardware {
 namespace tv {
@@ -36,24 +39,14 @@
 namespace V1_0 {
 namespace implementation {
 
-// When set to false, all the CEC commands are discarded. True by default after initialization.
-bool mCecEnabled;
-/*
- * When set to false, HAL does not wake up the system upon receiving <Image View On> or
- * <Text View On>. True by default after initialization.
- */
-bool mWakeupEnabled;
-
-int mCecFd;
-int mExitFd;
-pthread_t mEventThread;
-sp<IHdmiCecCallback> mCallback;
+using android::base::GetUintProperty;
+using std::stoi;
+using std::string;
 
 HdmiCecDefault::HdmiCecDefault() {
-    mCecFd = -1;
-    mExitFd = -1;
     mCecEnabled = false;
     mWakeupEnabled = false;
+    mCecControlEnabled = false;
     mCallback = nullptr;
 }
 
@@ -68,8 +61,8 @@
         return Result::FAILURE_INVALID_ARGS;
     }
 
-    struct cec_log_addrs cecLogAddrs;
-    int ret = ioctl(mCecFd, CEC_ADAP_G_LOG_ADDRS, &cecLogAddrs);
+    cec_log_addrs cecLogAddrs;
+    int ret = ioctl(mHdmiCecPorts[MIN_PORT_ID]->mCecFd, CEC_ADAP_G_LOG_ADDRS, &cecLogAddrs);
     if (ret) {
         LOG(ERROR) << "Add logical address failed, Error = " << strerror(errno);
         return Result::FAILURE_BUSY;
@@ -135,27 +128,36 @@
     cecLogAddrs.features[logAddrIndex][0] = 0;
     cecLogAddrs.features[logAddrIndex][1] = 0;
 
-    ret = ioctl(mCecFd, CEC_ADAP_S_LOG_ADDRS, &cecLogAddrs);
-    if (ret) {
-        LOG(ERROR) << "Add logical address failed, Error = " << strerror(errno);
-        return Result::FAILURE_BUSY;
+    // Return failure only if add logical address fails for all the ports
+    Return<Result> result = Result::FAILURE_BUSY;
+    for (int i = 0; i < mHdmiCecPorts.size(); i++) {
+        ret = ioctl(mHdmiCecPorts[i]->mCecFd, CEC_ADAP_S_LOG_ADDRS, &cecLogAddrs);
+        if (ret) {
+            LOG(ERROR) << "Add logical address failed for port " << mHdmiCecPorts[i]->mPortId
+                       << ", Error = " << strerror(errno);
+        } else {
+            result = Result::SUCCESS;
+        }
     }
-    return Result::SUCCESS;
+    return result;
 }
 
 Return<void> HdmiCecDefault::clearLogicalAddress() {
-    struct cec_log_addrs cecLogAddrs;
+    cec_log_addrs cecLogAddrs;
     memset(&cecLogAddrs, 0, sizeof(cecLogAddrs));
-    int ret = ioctl(mCecFd, CEC_ADAP_S_LOG_ADDRS, &cecLogAddrs);
-    if (ret) {
-        LOG(ERROR) << "Clear logical Address failed, Error = " << strerror(errno);
+    for (int i = 0; i < mHdmiCecPorts.size(); i++) {
+        int ret = ioctl(mHdmiCecPorts[i]->mCecFd, CEC_ADAP_S_LOG_ADDRS, &cecLogAddrs);
+        if (ret) {
+            LOG(ERROR) << "Clear logical Address failed for port " << mHdmiCecPorts[i]->mPortId
+                       << ", Error = " << strerror(errno);
+        }
     }
     return Void();
 }
 
 Return<void> HdmiCecDefault::getPhysicalAddress(getPhysicalAddress_cb callback) {
     uint16_t addr;
-    int ret = ioctl(mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
+    int ret = ioctl(mHdmiCecPorts[MIN_PORT_ID]->mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
     if (ret) {
         LOG(ERROR) << "Get physical address failed, Error = " << strerror(errno);
         callback(Result::FAILURE_INVALID_STATE, addr);
@@ -170,7 +172,7 @@
         return SendMessageResult::FAIL;
     }
 
-    struct cec_msg cecMsg;
+    cec_msg cecMsg;
     memset(&cecMsg, 0, sizeof(cec_msg));
 
     int initiator = static_cast<cec_logical_address_t>(message.initiator);
@@ -182,27 +184,25 @@
     }
     cecMsg.len = message.body.size() + 1;
 
-    int ret = ioctl(mCecFd, CEC_TRANSMIT, &cecMsg);
+    // Return failure only if send message fails for all the ports
+    Return<SendMessageResult> result = SendMessageResult::FAIL;
+    for (int i = 0; i < mHdmiCecPorts.size(); i++) {
+        int ret = ioctl(mHdmiCecPorts[i]->mCecFd, CEC_TRANSMIT, &cecMsg);
 
-    if (ret) {
-        LOG(ERROR) << "Send message failed, Error = " << strerror(errno);
-        return SendMessageResult::FAIL;
-    }
+        if (ret) {
+            LOG(ERROR) << "Send message failed, Error = " << strerror(errno);
+            continue;
+        }
 
-    if (cecMsg.tx_status != CEC_TX_STATUS_OK) {
-        LOG(ERROR) << "Send message tx_status = " << cecMsg.tx_status;
-    }
+        if (cecMsg.tx_status != CEC_TX_STATUS_OK) {
+            LOG(ERROR) << "Send message tx_status = " << cecMsg.tx_status;
+        }
 
-    switch (cecMsg.tx_status) {
-        case CEC_TX_STATUS_OK:
-            return SendMessageResult::SUCCESS;
-        case CEC_TX_STATUS_ARB_LOST:
-            return SendMessageResult::BUSY;
-        case CEC_TX_STATUS_NACK:
-            return SendMessageResult::NACK;
-        default:
-            return SendMessageResult::FAIL;
+        if (result != SendMessageResult::SUCCESS) {
+            result = getSendMessageResult(cecMsg.tx_status);
+        }
     }
+    return result;
 }
 
 Return<void> HdmiCecDefault::setCallback(const sp<IHdmiCecCallback>& callback) {
@@ -227,19 +227,25 @@
 }
 
 Return<void> HdmiCecDefault::getPortInfo(getPortInfo_cb callback) {
-    uint16_t addr;
-    int ret = ioctl(mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
-    if (ret) {
-        LOG(ERROR) << "Get port info failed, Error = " << strerror(errno);
+    hidl_vec<HdmiPortInfo> portInfos(mHdmiCecPorts.size());
+    for (int i = 0; i < mHdmiCecPorts.size(); i++) {
+        uint16_t addr = INVALID_PHYSICAL_ADDRESS;
+        int ret = ioctl(mHdmiCecPorts[i]->mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
+        if (ret) {
+            LOG(ERROR) << "Get port info failed for port : " << mHdmiCecPorts[i]->mPortId
+                       << ", Error = " << strerror(errno);
+        }
+        HdmiPortType type = HdmiPortType::INPUT;
+        uint32_t deviceType = GetUintProperty<uint32_t>(PROPERTY_DEVICE_TYPE, CEC_DEVICE_PLAYBACK);
+        if (deviceType != CEC_DEVICE_TV && i == MIN_PORT_ID) {
+            type = HdmiPortType::OUTPUT;
+        }
+        portInfos[i] = {.type = type,
+                        .portId = mHdmiCecPorts[i]->mPortId,
+                        .cecSupported = true,
+                        .arcSupported = false,
+                        .physicalAddress = addr};
     }
-
-    unsigned int type = property_get_int32("ro.hdmi.device_type", CEC_DEVICE_PLAYBACK);
-    hidl_vec<HdmiPortInfo> portInfos(1);
-    portInfos[0] = {.type = (type == CEC_DEVICE_TV ? HdmiPortType::INPUT : HdmiPortType::OUTPUT),
-                    .portId = 1,
-                    .cecSupported = true,
-                    .arcSupported = false,
-                    .physicalAddress = addr};
     callback(portInfos);
     return Void();
 }
@@ -254,7 +260,9 @@
             LOG(DEBUG) << "setOption: WAKEUP: " << value;
             mWakeupEnabled = value;
             break;
-        default:
+        case OptionKey::SYSTEM_CEC_CONTROL:
+            LOG(DEBUG) << "setOption: SYSTEM_CEC_CONTROL: " << value;
+            mCecControlEnabled = value;
             break;
     }
     return Void();
@@ -268,9 +276,9 @@
     return Void();
 }
 
-Return<bool> HdmiCecDefault::isConnected(int32_t /*portId*/) {
+Return<bool> HdmiCecDefault::isConnected(int32_t portId) {
     uint16_t addr;
-    int ret = ioctl(mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
+    int ret = ioctl(mHdmiCecPorts[portId]->mCecFd, CEC_ADAP_G_PHYS_ADDR, &addr);
     if (ret) {
         LOG(ERROR) << "Is connected failed, Error = " << strerror(errno);
         return false;
@@ -281,79 +289,70 @@
     return true;
 }
 
-// Initialise the cec file descriptor
+int getPortId(string cecFilename) {
+    int portId = stoi(cecFilename.substr(3));
+    if (portId >= MIN_PORT_ID && portId <= MAX_PORT_ID) {
+        return portId;
+    } else {
+        return -1;
+    }
+}
+
+// Initialise the cec file descriptors
 Return<Result> HdmiCecDefault::init() {
-    const char* path = "/dev/cec0";
-    mCecFd = open(path, O_RDWR);
-    if (mCecFd < 0) {
-        LOG(ERROR) << "Failed to open " << path << ", Error = " << strerror(errno);
-        return Result::FAILURE_NOT_SUPPORTED;
-    }
-    mExitFd = eventfd(0, EFD_NONBLOCK);
-    if (mExitFd < 0) {
-        LOG(ERROR) << "Failed to open eventfd, Error = " << strerror(errno);
-        release();
-        return Result::FAILURE_NOT_SUPPORTED;
+    const char* parentPath = "/dev/";
+    DIR* dir = opendir(parentPath);
+    const char* cecFilename = "cec";
+
+    while (struct dirent* dirEntry = readdir(dir)) {
+        string filename = dirEntry->d_name;
+        if (filename.compare(0, 3, cecFilename, 0, 3) == 0) {
+            int portId = getPortId(filename);
+            if (portId == -1) {
+                continue;
+            }
+            shared_ptr<HdmiCecPort> hdmiCecPort(new HdmiCecPort(portId));
+            string filepath = parentPath + filename;
+            Result result = hdmiCecPort->init(filepath.c_str());
+            if (result != Result::SUCCESS) {
+                continue;
+            }
+            thread eventThread(&HdmiCecDefault::event_thread, this, hdmiCecPort.get());
+            mEventThreads.push_back(std::move(eventThread));
+            mHdmiCecPorts.push_back(std::move(hdmiCecPort));
+        }
     }
 
-    // Ensure the CEC device supports required capabilities
-    struct cec_caps caps = {};
-    int ret = ioctl(mCecFd, CEC_ADAP_G_CAPS, &caps);
-    if (ret) {
-        LOG(ERROR) << "Unable to query cec adapter capabilities, Error = " << strerror(errno);
-        release();
-        return Result::FAILURE_NOT_SUPPORTED;
-    }
-
-    if (!(caps.capabilities & (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | CEC_CAP_PASSTHROUGH))) {
-        LOG(ERROR) << "Wrong cec adapter capabilities " << caps.capabilities;
-        release();
-        return Result::FAILURE_NOT_SUPPORTED;
-    }
-
-    uint32_t mode = CEC_MODE_INITIATOR | CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
-    ret = ioctl(mCecFd, CEC_S_MODE, &mode);
-    if (ret) {
-        LOG(ERROR) << "Unable to set initiator mode, Error = " << strerror(errno);
-        release();
-        return Result::FAILURE_NOT_SUPPORTED;
-    }
-
-    /* thread loop for receiving cec messages and hotplug events*/
-    if (pthread_create(&mEventThread, NULL, event_thread, NULL)) {
-        LOG(ERROR) << "Can't create event thread: " << strerror(errno);
-        release();
+    if (mHdmiCecPorts.empty()) {
         return Result::FAILURE_NOT_SUPPORTED;
     }
 
     mCecEnabled = true;
     mWakeupEnabled = true;
+    mCecControlEnabled = true;
     return Result::SUCCESS;
 }
 
 Return<void> HdmiCecDefault::release() {
-    if (mExitFd > 0) {
-        uint64_t tmp = 1;
-        write(mExitFd, &tmp, sizeof(tmp));
-        pthread_join(mEventThread, NULL);
-    }
-    if (mExitFd > 0) {
-        close(mExitFd);
-    }
-    if (mCecFd > 0) {
-        close(mCecFd);
-    }
     mCecEnabled = false;
     mWakeupEnabled = false;
+    mCecControlEnabled = false;
+    for (thread& eventThread : mEventThreads) {
+        if (eventThread.joinable()) {
+            eventThread.join();
+        }
+    }
     setCallback(nullptr);
+    mHdmiCecPorts.clear();
+    mEventThreads.clear();
     return Void();
 }
 
-void* HdmiCecDefault::event_thread(void*) {
+void HdmiCecDefault::event_thread(HdmiCecPort* hdmiCecPort) {
     struct pollfd ufds[3] = {
-            {mCecFd, POLLIN, 0},
-            {mCecFd, POLLERR, 0},
-            {mExitFd, POLLIN, 0},
+            {hdmiCecPort->mCecFd, POLLIN, 0},
+            {hdmiCecPort->mCecFd, POLLERR, 0},
+            {hdmiCecPort->mExitFd, POLLIN, 0},
     };
 
     while (1) {
@@ -372,23 +371,23 @@
         }
 
         if (ufds[1].revents == POLLERR) { /* CEC Event */
-            struct cec_event ev;
-            ret = ioctl(mCecFd, CEC_DQEVENT, &ev);
-
-            if (!mCecEnabled) {
-                continue;
-            }
+            cec_event ev;
+            ret = ioctl(hdmiCecPort->mCecFd, CEC_DQEVENT, &ev);
 
             if (ret) {
                 LOG(ERROR) << "CEC_DQEVENT failed, Error = " << strerror(errno);
                 continue;
             }
 
+            if (!mCecEnabled) {
+                continue;
+            }
+
             if (ev.event == CEC_EVENT_STATE_CHANGE) {
                 if (mCallback != nullptr) {
                     HotplugEvent hotplugEvent{
                             .connected = (ev.state_change.phys_addr != CEC_PHYS_ADDR_INVALID),
-                            .portId = 1};
+                            .portId = hdmiCecPort->mPortId};
                     mCallback->onHotplugEvent(hotplugEvent);
                 } else {
                     LOG(ERROR) << "No event callback for hotplug";
@@ -397,12 +396,8 @@
         }
 
         if (ufds[0].revents == POLLIN) { /* CEC Driver */
-            struct cec_msg msg = {};
-            ret = ioctl(mCecFd, CEC_RECEIVE, &msg);
-
-            if (!mCecEnabled) {
-                continue;
-            }
+            cec_msg msg = {};
+            ret = ioctl(hdmiCecPort->mCecFd, CEC_RECEIVE, &msg);
 
             if (ret) {
                 LOG(ERROR) << "CEC_RECEIVE failed, Error = " << strerror(errno);
@@ -414,11 +409,20 @@
                 continue;
             }
 
+            if (!mCecEnabled) {
+                continue;
+            }
+
             if (!mWakeupEnabled && isWakeupMessage(msg)) {
                 LOG(DEBUG) << "Filter wakeup message";
                 continue;
             }
 
+            if (!mCecControlEnabled && !isTransferableInSleep(msg)) {
+                LOG(DEBUG) << "Filter message in standby mode";
+                continue;
+            }
+
             if (mCallback != nullptr) {
                 size_t length = std::min(msg.len - 1, (uint32_t)MaxLength::MESSAGE_BODY);
                 CecMessage cecMessage{
@@ -435,14 +439,13 @@
             }
         }
     }
-    return NULL;
 }
 
-int HdmiCecDefault::getOpcode(struct cec_msg message) {
-    return (static_cast<uint8_t>(message.msg[1]) & 0xff);
+int HdmiCecDefault::getOpcode(cec_msg message) {
+    return static_cast<uint8_t>(message.msg[1]);
 }
 
-bool HdmiCecDefault::isWakeupMessage(struct cec_msg message) {
+bool HdmiCecDefault::isWakeupMessage(cec_msg message) {
     int opcode = getOpcode(message);
     switch (opcode) {
         case CEC_MESSAGE_TEXT_VIEW_ON:
@@ -453,6 +456,61 @@
     }
 }
 
+bool HdmiCecDefault::isTransferableInSleep(cec_msg message) {
+    int opcode = getOpcode(message);
+    switch (opcode) {
+        case CEC_MESSAGE_ABORT:
+        case CEC_MESSAGE_DEVICE_VENDOR_ID:
+        case CEC_MESSAGE_GET_CEC_VERSION:
+        case CEC_MESSAGE_GET_MENU_LANGUAGE:
+        case CEC_MESSAGE_GIVE_DEVICE_POWER_STATUS:
+        case CEC_MESSAGE_GIVE_DEVICE_VENDOR_ID:
+        case CEC_MESSAGE_GIVE_OSD_NAME:
+        case CEC_MESSAGE_GIVE_PHYSICAL_ADDRESS:
+        case CEC_MESSAGE_REPORT_PHYSICAL_ADDRESS:
+        case CEC_MESSAGE_REPORT_POWER_STATUS:
+        case CEC_MESSAGE_SET_OSD_NAME:
+        case CEC_MESSAGE_DECK_CONTROL:
+        case CEC_MESSAGE_PLAY:
+        case CEC_MESSAGE_IMAGE_VIEW_ON:
+        case CEC_MESSAGE_TEXT_VIEW_ON:
+        case CEC_MESSAGE_SYSTEM_AUDIO_MODE_REQUEST:
+            return true;
+        case CEC_MESSAGE_USER_CONTROL_PRESSED:
+            return isPowerUICommand(message);
+        default:
+            return false;
+    }
+}
+
+int HdmiCecDefault::getFirstParam(cec_msg message) {
+    return static_cast<uint8_t>(message.msg[2]);
+}
+
+bool HdmiCecDefault::isPowerUICommand(cec_msg message) {
+    int uiCommand = getFirstParam(message);
+    switch (uiCommand) {
+        case CEC_OP_UI_CMD_POWER:
+        case CEC_OP_UI_CMD_DEVICE_ROOT_MENU:
+        case CEC_OP_UI_CMD_POWER_ON_FUNCTION:
+            return true;
+        default:
+            return false;
+    }
+}
+
+Return<SendMessageResult> HdmiCecDefault::getSendMessageResult(int tx_status) {
+    switch (tx_status) {
+        case CEC_TX_STATUS_OK:
+            return SendMessageResult::SUCCESS;
+        case CEC_TX_STATUS_ARB_LOST:
+            return SendMessageResult::BUSY;
+        case CEC_TX_STATUS_NACK:
+            return SendMessageResult::NACK;
+        default:
+            return SendMessageResult::FAIL;
+    }
+}
 }  // namespace implementation
 }  // namespace V1_0
 }  // namespace cec
diff --git a/tv/cec/1.0/default/HdmiCecDefault.h b/tv/cec/1.0/default/HdmiCecDefault.h
index c1bb2c7..6574429 100644
--- a/tv/cec/1.0/default/HdmiCecDefault.h
+++ b/tv/cec/1.0/default/HdmiCecDefault.h
@@ -13,9 +13,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-#include <android/hardware/tv/cec/1.0/IHdmiCec.h>
 #include <hardware/hdmi_cec.h>
+#include <linux/cec.h>
+#include <thread>
+#include <vector>
+#include "HdmiCecPort.h"
 
 namespace android {
 namespace hardware {
@@ -24,7 +26,12 @@
 namespace V1_0 {
 namespace implementation {
 
-struct HdmiCecDefault : public IHdmiCec, public hidl_death_recipient {
+using std::shared_ptr;
+using std::thread;
+using std::vector;
+
+class HdmiCecDefault : public IHdmiCec, public hidl_death_recipient {
+  public:
     HdmiCecDefault();
     ~HdmiCecDefault();
     // Methods from ::android::hardware::tv::cec::V1_0::IHdmiCec follow.
@@ -47,11 +54,35 @@
 
     Return<Result> init();
     Return<void> release();
-    static void* event_thread(void*);
-    static int getOpcode(struct cec_msg message);
-    static bool isWakeupMessage(struct cec_msg message);
-};
 
+  private:
+    void event_thread(HdmiCecPort* hdmiCecPort);
+    static int getOpcode(cec_msg message);
+    static int getFirstParam(cec_msg message);
+    static bool isWakeupMessage(cec_msg message);
+    static bool isTransferableInSleep(cec_msg message);
+    static bool isPowerUICommand(cec_msg message);
+    static Return<SendMessageResult> getSendMessageResult(int tx_status);
+
+    vector<thread> mEventThreads;
+    vector<shared_ptr<HdmiCecPort>> mHdmiCecPorts;
+
+    // When set to false, all the CEC commands are discarded. True by default after initialization.
+    bool mCecEnabled;
+    /*
+     * When set to false, HAL does not wake up the system upon receiving <Image View On> or
+     * <Text View On>. True by default after initialization.
+     */
+    bool mWakeupEnabled;
+    /*
+     * Updated when system goes into or comes out of standby mode.
+     * When set to true, Android system is handling CEC commands.
+     * When set to false, microprocessor is handling CEC commands.
+     * True by default after initialization.
+     */
+    bool mCecControlEnabled;
+    sp<IHdmiCecCallback> mCallback;
+};
 }  // namespace implementation
 }  // namespace V1_0
 }  // namespace cec
diff --git a/tv/cec/1.0/default/HdmiCecPort.cpp b/tv/cec/1.0/default/HdmiCecPort.cpp
new file mode 100755
index 0000000..73dda12
--- /dev/null
+++ b/tv/cec/1.0/default/HdmiCecPort.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "android.hardware.tv.cec@1.0-impl"
+
+#include <android-base/logging.h>
+#include <errno.h>
+#include <linux/cec.h>
+#include <linux/ioctl.h>
+#include <sys/eventfd.h>
+#include <algorithm>
+
+#include "HdmiCecPort.h"
+
+namespace android {
+namespace hardware {
+namespace tv {
+namespace cec {
+namespace V1_0 {
+namespace implementation {
+
+HdmiCecPort::HdmiCecPort(unsigned int portId) {
+    mPortId = portId;
+    mCecFd = -1;
+    mExitFd = -1;
+}
+
+HdmiCecPort::~HdmiCecPort() {
+    release();
+}
+
+// Initialise the cec file descriptor
+Return<Result> HdmiCecPort::init(const char* path) {
+    mCecFd = open(path, O_RDWR);
+    if (mCecFd < 0) {
+        LOG(ERROR) << "Failed to open " << path << ", Error = " << strerror(errno);
+        return Result::FAILURE_NOT_SUPPORTED;
+    }
+    mExitFd = eventfd(0, EFD_NONBLOCK);
+    if (mExitFd < 0) {
+        LOG(ERROR) << "Failed to open eventfd, Error = " << strerror(errno);
+        release();
+        return Result::FAILURE_NOT_SUPPORTED;
+    }
+
+    // Ensure the CEC device supports required capabilities
+    struct cec_caps caps = {};
+    int ret = ioctl(mCecFd, CEC_ADAP_G_CAPS, &caps);
+    if (ret) {
+        LOG(ERROR) << "Unable to query cec adapter capabilities, Error = " << strerror(errno);
+        release();
+        return Result::FAILURE_NOT_SUPPORTED;
+    }
+
+    if (!(caps.capabilities & (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | CEC_CAP_PASSTHROUGH))) {
+        LOG(ERROR) << "Wrong cec adapter capabilities " << caps.capabilities;
+        release();
+        return Result::FAILURE_NOT_SUPPORTED;
+    }
+
+    uint32_t mode = CEC_MODE_INITIATOR | CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
+    ret = ioctl(mCecFd, CEC_S_MODE, &mode);
+    if (ret) {
+        LOG(ERROR) << "Unable to set initiator mode, Error = " << strerror(errno);
+        release();
+        return Result::FAILURE_NOT_SUPPORTED;
+    }
+    return Result::SUCCESS;
+}
+
+Return<void> HdmiCecPort::release() {
+    if (mExitFd > 0) {
+        uint64_t tmp = 1;
+        write(mExitFd, &tmp, sizeof(tmp));
+    }
+    if (mExitFd > 0) {
+        close(mExitFd);
+    }
+    if (mCecFd > 0) {
+        close(mCecFd);
+    }
+    return Void();
+}
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace cec
+}  // namespace tv
+}  // namespace hardware
+}  // namespace android
diff --git a/tv/cec/1.0/default/HdmiCecPort.h b/tv/cec/1.0/default/HdmiCecPort.h
new file mode 100755
index 0000000..2a2fdef
--- /dev/null
+++ b/tv/cec/1.0/default/HdmiCecPort.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android/hardware/tv/cec/1.0/IHdmiCec.h>
+
+namespace android {
+namespace hardware {
+namespace tv {
+namespace cec {
+namespace V1_0 {
+namespace implementation {
+
+class HdmiCecPort {
+  public:
+    HdmiCecPort(unsigned int portId);
+    ~HdmiCecPort();
+    Return<Result> init(const char* path);
+    Return<void> release();
+
+    unsigned int mPortId;
+    int mCecFd;
+    int mExitFd;
+};
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace cec
+}  // namespace tv
+}  // namespace hardware
+}  // namespace android