Merge "Support the location injection in AIDL HAL"
diff --git a/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp b/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
index c61693e..829b0ff 100644
--- a/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
+++ b/graphics/composer/aidl/android/hardware/graphics/composer3/vts/functional/VtsHalGraphicsComposer3_TargetTest.cpp
@@ -1426,8 +1426,7 @@
         presentFence2->waitForever(LOG_TAG);
 
         const auto actualPresentTime = presentFence2->getSignalTime();
-        const auto presentError = std::abs(expectedPresentTime - actualPresentTime);
-        EXPECT_LE(presentError, vsyncPeriod / 2);
+        EXPECT_GE(actualPresentTime, expectedPresentTime - vsyncPeriod / 2);
 
         ASSERT_TRUE(mComposerClient->setPowerMode(mPrimaryDisplay, PowerMode::OFF).isOk());
     }
@@ -1499,7 +1498,7 @@
     execute();
     {
         const auto errors = mReader.takeErrors();
-        EXPECT_EQ(1, errors.size());
+        ASSERT_EQ(1, errors.size());
         EXPECT_EQ(IComposerClient::EX_BAD_PARAMETER, errors[0].errorCode);
     }
 
@@ -1507,7 +1506,7 @@
     execute();
     {
         const auto errors = mReader.takeErrors();
-        EXPECT_EQ(1, errors.size());
+        ASSERT_EQ(1, errors.size());
         EXPECT_EQ(IComposerClient::EX_BAD_PARAMETER, errors[0].errorCode);
     }
 }
diff --git a/health/2.1/vts/OWNERS b/health/2.1/vts/OWNERS
index 20450ba..a6803cd 100644
--- a/health/2.1/vts/OWNERS
+++ b/health/2.1/vts/OWNERS
@@ -1,3 +1,2 @@
 elsk@google.com
-hridya@google.com
 sspatil@google.com
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 78433a7..477b311 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -112,11 +112,15 @@
 GeneralResult<Request> convert(const aidl_hal::Request& request);
 GeneralResult<Timing> convert(const aidl_hal::Timing& timing);
 GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle);
+GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc);
 
 GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
 GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
 GeneralResult<std::vector<OutputShape>> convert(
         const std::vector<aidl_hal::OutputShape>& outputShapes);
+GeneralResult<std::vector<SharedHandle>> convert(
+        const std::vector<ndk::ScopedFileDescriptor>& handles);
+GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles);
 
 GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec);
 
@@ -129,6 +133,7 @@
 nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken);
 nn::GeneralResult<BufferDesc> unvalidatedConvert(const nn::BufferDesc& bufferDesc);
 nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRole);
+nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType);
 nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
 nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory);
 nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
@@ -154,14 +159,16 @@
 nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
 nn::GeneralResult<RequestMemoryPool> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool);
 nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
-nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration);
 nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration);
 nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalTimePoint& optionalTimePoint);
 nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SyncFence& syncFence);
 nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SharedHandle& handle);
+nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
 
 nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
 nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
 nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming);
 nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory);
 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
@@ -172,6 +179,8 @@
 nn::GeneralResult<Timing> convert(const nn::Timing& timing);
 nn::GeneralResult<int64_t> convert(const nn::OptionalDuration& optionalDuration);
 nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& optionalTimePoint);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Extension> convert(const nn::Extension& extension);
 
 nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
 nn::GeneralResult<std::vector<OutputShape>> convert(
@@ -180,6 +189,7 @@
         const std::vector<nn::SharedHandle>& handles);
 nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
         const std::vector<nn::SyncFence>& syncFences);
+nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);
 
 nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
 
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 45628c8..113d2da 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -551,6 +551,10 @@
     return validatedConvert(handle);
 }
 
+GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc) {
+    return validatedConvert(bufferDesc);
+}
+
 GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension) {
     return validatedConvert(extension);
 }
@@ -564,6 +568,15 @@
     return validatedConvert(outputShapes);
 }
 
+GeneralResult<std::vector<SharedHandle>> convert(
+        const std::vector<ndk::ScopedFileDescriptor>& handles) {
+    return validatedConvert(handles);
+}
+
+GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles) {
+    return validatedConvert(roles);
+}
+
 GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
     if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
         return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
@@ -576,42 +589,7 @@
 namespace aidl::android::hardware::neuralnetworks::utils {
 namespace {
 
-template <typename Input>
-using UnvalidatedConvertOutput =
-        std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
-        const std::vector<Type>& arguments) {
-    std::vector<UnvalidatedConvertOutput<Type>> halObject;
-    halObject.reserve(arguments.size());
-    for (const auto& argument : arguments) {
-        halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
-    }
-    return halObject;
-}
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
-        const std::vector<Type>& arguments) {
-    return unvalidatedConvertVec(arguments);
-}
-
-template <typename Type>
-nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
-    NN_TRY(compliantVersion(canonical));
-    return utils::unvalidatedConvert(canonical);
-}
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
-        const std::vector<Type>& arguments) {
-    std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
-    for (size_t i = 0; i < arguments.size(); ++i) {
-        halObject[i] = NN_TRY(validatedConvert(arguments[i]));
-    }
-    return halObject;
-}
+using utils::unvalidatedConvert;
 
 // Helper template for std::visit
 template <class... Ts>
@@ -721,6 +699,74 @@
             operator nn::GeneralResult<Memory>();
 }
 
+nn::GeneralResult<PerformanceInfo> unvalidatedConvert(
+        const nn::Capabilities::PerformanceInfo& info) {
+    return PerformanceInfo{.execTime = info.execTime, .powerUsage = info.powerUsage};
+}
+
+nn::GeneralResult<OperandPerformance> unvalidatedConvert(
+        const nn::Capabilities::OperandPerformance& operandPerformance) {
+    return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
+                              .info = NN_TRY(unvalidatedConvert(operandPerformance.info))};
+}
+
+nn::GeneralResult<std::vector<OperandPerformance>> unvalidatedConvert(
+        const nn::Capabilities::OperandPerformanceTable& table) {
+    std::vector<OperandPerformance> operandPerformances;
+    operandPerformances.reserve(table.asVector().size());
+    for (const auto& operandPerformance : table.asVector()) {
+        operandPerformances.push_back(NN_TRY(unvalidatedConvert(operandPerformance)));
+    }
+    return operandPerformances;
+}
+
+nn::GeneralResult<ExtensionOperandTypeInformation> unvalidatedConvert(
+        const nn::Extension::OperandTypeInformation& info) {
+    return ExtensionOperandTypeInformation{.type = info.type,
+                                           .isTensor = info.isTensor,
+                                           .byteSize = static_cast<int32_t>(info.byteSize)};
+}
+
+nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
+    if (duration < nn::Duration::zero()) {
+        return NN_ERROR() << "Unable to convert invalid (negative) duration";
+    }
+    constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
+    const auto count = duration.count();
+    return static_cast<int64_t>(std::min(count, kIntMax));
+}
+
+template <typename Input>
+using UnvalidatedConvertOutput =
+        std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
+
+template <typename Type>
+nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
+        const std::vector<Type>& arguments) {
+    std::vector<UnvalidatedConvertOutput<Type>> halObject;
+    halObject.reserve(arguments.size());
+    for (const auto& argument : arguments) {
+        halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
+    }
+    return halObject;
+}
+
+template <typename Type>
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+    NN_TRY(compliantVersion(canonical));
+    return utils::unvalidatedConvert(canonical);
+}
+
+template <typename Type>
+nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
+        const std::vector<Type>& arguments) {
+    std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
+    for (size_t i = 0; i < arguments.size(); ++i) {
+        halObject[i] = NN_TRY(validatedConvert(arguments[i]));
+    }
+    return halObject;
+}
+
 }  // namespace
 
 nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken) {
@@ -743,6 +789,19 @@
     };
 }
 
+nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType) {
+    switch (deviceType) {
+        case nn::DeviceType::UNKNOWN:
+            break;
+        case nn::DeviceType::OTHER:
+        case nn::DeviceType::CPU:
+        case nn::DeviceType::GPU:
+        case nn::DeviceType::ACCELERATOR:
+            return static_cast<DeviceType>(deviceType);
+    }
+    return NN_ERROR() << "Invalid DeviceType " << deviceType;
+}
+
 nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming) {
     return measureTiming == nn::MeasureTiming::YES;
 }
@@ -956,15 +1015,6 @@
     };
 }
 
-nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
-    if (duration < nn::Duration::zero()) {
-        return NN_ERROR() << "Unable to convert invalid (negative) duration";
-    }
-    constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
-    const auto count = duration.count();
-    return static_cast<int64_t>(std::min(count, kIntMax));
-}
-
 nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration) {
     if (!optionalDuration.has_value()) {
         return kNoTiming;
@@ -989,6 +1039,23 @@
     return ndk::ScopedFileDescriptor(duplicatedFd.release());
 }
 
+nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
+    return Capabilities{
+            .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
+                    unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+            .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
+                    unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
+            .operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)),
+            .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
+            .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+    };
+}
+
+nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
+    return Extension{.name = extension.name,
+                     .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
+}
+
 nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
     return validatedConvert(cacheToken);
 }
@@ -997,6 +1064,10 @@
     return validatedConvert(bufferDesc);
 }
 
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
+    return validatedConvert(deviceType);
+}
+
 nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming) {
     return validatedConvert(measureTiming);
 }
@@ -1037,6 +1108,14 @@
     return validatedConvert(outputShapes);
 }
 
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
+    return validatedConvert(capabilities);
+}
+
+nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
+    return validatedConvert(extension);
+}
+
 nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
     return validatedConvert(bufferRoles);
 }
@@ -1056,6 +1135,10 @@
     return validatedConvert(syncFences);
 }
 
+nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
+    return validatedConvert(extensions);
+}
+
 nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
     if (!std::all_of(vec.begin(), vec.end(),
                      [](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
diff --git a/neuralnetworks/utils/adapter/aidl/Android.bp b/neuralnetworks/utils/adapter/aidl/Android.bp
new file mode 100644
index 0000000..8269a3d
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/Android.bp
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "hardware_interfaces_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["hardware_interfaces_license"],
+}
+
+cc_library_static {
+    name: "neuralnetworks_utils_hal_adapter_aidl",
+    defaults: [
+        "neuralnetworks_use_latest_utils_hal_aidl",
+        "neuralnetworks_utils_defaults",
+    ],
+    srcs: ["src/*"],
+    local_include_dirs: ["include/nnapi/hal/aidl/"],
+    export_include_dirs: ["include"],
+    static_libs: [
+        "neuralnetworks_types",
+        "neuralnetworks_utils_hal_common",
+    ],
+    shared_libs: [
+        "libbinder_ndk",
+    ],
+}
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
similarity index 63%
copy from neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h
copy to neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
index da00a09..4c0b328 100644
--- a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,20 +14,20 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
 
-#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
 #include <nnapi/IDevice.h>
 #include <nnapi/Types.h>
-#include <sys/types.h>
+
 #include <functional>
 #include <memory>
 
-// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
-// lifetimes across processes and for protecting asynchronous calls across HIDL.
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
 
-namespace android::hardware::neuralnetworks::adapter {
+namespace aidl::android::hardware::neuralnetworks::adapter {
 
 /**
  * A self-contained unit of work to be executed.
@@ -37,25 +37,26 @@
 /**
  * A type-erased executor which executes a task asynchronously.
  *
- * This executor is also provided with an Application ID (Android User ID) and an optional deadline
- * for when the caller expects is the upper bound for the amount of time to complete the task.
+ * This executor is also provided an optional deadline for when the caller expects is the upper
+ * bound for the amount of time to complete the task. If needed, the Executor can retrieve the
+ * Application ID (Android User ID) by calling AIBinder_getCallingUid in android/binder_ibinder.h.
  */
-using Executor = std::function<void(Task, uid_t, nn::OptionalTimePoint)>;
+using Executor = std::function<void(Task, ::android::nn::OptionalTimePoint)>;
 
 /**
- * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
+ * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
  *
  * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
  * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
  *
  * @param device NNAPI canonical IDevice interface object to be adapted.
  * @param executor Type-erased executor to handle executing tasks asynchronously.
- * @return HIDL NN HAL IDevice interface object.
+ * @return AIDL NN HAL IDevice interface object.
  */
-sp<V1_3::IDevice> adapt(nn::SharedDevice device, Executor executor);
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor);
 
 /**
- * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
+ * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
  *
  * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
  * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
@@ -63,10 +64,10 @@
  * This function uses a default executor, which will execute tasks from a detached thread.
  *
  * @param device NNAPI canonical IDevice interface object to be adapted.
- * @return HIDL NN HAL IDevice interface object.
+ * @return AIDL NN HAL IDevice interface object.
  */
-sp<V1_3::IDevice> adapt(nn::SharedDevice device);
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device);
 
-}  // namespace android::hardware::neuralnetworks::adapter
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_ADAPTER_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_ADAPTER_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h
new file mode 100644
index 0000000..701e43e
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Buffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBuffer.h>
+
+#include <memory>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IBuffer to BnBuffer.
+class Buffer : public BnBuffer {
+  public:
+    explicit Buffer(::android::nn::SharedBuffer buffer);
+
+    ndk::ScopedAStatus copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) override;
+    ndk::ScopedAStatus copyTo(const Memory& dst) override;
+
+  private:
+    const ::android::nn::SharedBuffer kBuffer;
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BUFFER_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
new file mode 100644
index 0000000..f2687c4
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android-base/thread_annotations.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::Burst to BnBurst.
+class Burst : public BnBurst {
+  public:
+    // Precondition: burst != nullptr
+    explicit Burst(::android::nn::SharedBurst burst);
+
+    ndk::ScopedAStatus executeSynchronously(const Request& request,
+                                            const std::vector<int64_t>& memoryIdentifierTokens,
+                                            bool measureTiming, int64_t deadlineNs,
+                                            int64_t loopTimeoutDurationNs,
+                                            ExecutionResult* executionResult) override;
+    ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override;
+
+    class ThreadSafeMemoryCache {
+      public:
+        using Value =
+                std::pair<::android::nn::SharedMemory, ::android::nn::IBurst::OptionalCacheHold>;
+
+        Value add(int64_t token, const ::android::nn::SharedMemory& memory,
+                  const ::android::nn::IBurst& burst) const;
+        void remove(int64_t token) const;
+
+      private:
+        mutable std::mutex mMutex;
+        mutable std::unordered_map<int64_t, Value> mCache GUARDED_BY(mMutex);
+    };
+
+  private:
+    const ::android::nn::SharedBurst kBurst;
+    const ThreadSafeMemoryCache kMemoryCache;
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_BURST_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
new file mode 100644
index 0000000..aa29d63
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
+
+#include "nnapi/hal/aidl/Adapter.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/Capabilities.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IDevice.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IDevice to BnDevice.
+class Device : public BnDevice {
+  public:
+    Device(::android::nn::SharedDevice device, Executor executor);
+
+    ndk::ScopedAStatus allocate(const BufferDesc& desc,
+                                const std::vector<IPreparedModelParcel>& preparedModels,
+                                const std::vector<BufferRole>& inputRoles,
+                                const std::vector<BufferRole>& outputRoles,
+                                DeviceBuffer* buffer) override;
+    ndk::ScopedAStatus getCapabilities(Capabilities* capabilities) override;
+    ndk::ScopedAStatus getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) override;
+    ndk::ScopedAStatus getSupportedExtensions(std::vector<Extension>* extensions) override;
+    ndk::ScopedAStatus getSupportedOperations(const Model& model,
+                                              std::vector<bool>* supported) override;
+    ndk::ScopedAStatus getType(DeviceType* deviceType) override;
+    ndk::ScopedAStatus getVersionString(std::string* version) override;
+    ndk::ScopedAStatus prepareModel(
+            const Model& model, ExecutionPreference preference, Priority priority,
+            int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+            const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+            const std::vector<uint8_t>& token,
+            const std::shared_ptr<IPreparedModelCallback>& callback) override;
+    ndk::ScopedAStatus prepareModelFromCache(
+            int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+            const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+            const std::vector<uint8_t>& token,
+            const std::shared_ptr<IPreparedModelCallback>& callback) override;
+
+  protected:
+    const ::android::nn::SharedDevice kDevice;
+    const Executor kExecutor;
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_DEVICE_H
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
new file mode 100644
index 0000000..93e0427
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
+
+#include "nnapi/hal/aidl/Adapter.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+// Class that adapts nn::IPreparedModel to BnPreparedModel.
+class PreparedModel : public BnPreparedModel {
+  public:
+    explicit PreparedModel(::android::nn::SharedPreparedModel preparedModel);
+
+    ndk::ScopedAStatus executeSynchronously(const Request& request, bool measureTiming,
+                                            int64_t deadlineNs, int64_t loopTimeoutDurationNs,
+                                            ExecutionResult* executionResult) override;
+    ndk::ScopedAStatus executeFenced(const Request& request,
+                                     const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+                                     bool measureTiming, int64_t deadlineNs,
+                                     int64_t loopTimeoutDurationNs, int64_t durationNs,
+                                     FencedExecutionResult* executionResult) override;
+    ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
+
+    ::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
+
+  protected:
+    const ::android::nn::SharedPreparedModel kPreparedModel;
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_ADAPTER_AIDL_PREPARED_MDOEL_H
diff --git a/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp b/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
new file mode 100644
index 0000000..d0b56e8
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Adapter.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Adapter.h"
+
+#include "Device.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <android/binder_interface_utils.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <thread>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor executor) {
+    return ndk::SharedRefBase::make<Device>(std::move(device), std::move(executor));
+}
+
+std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device) {
+    Executor defaultExecutor = [](Task task, ::android::nn::OptionalTimePoint /*deadline*/) {
+        std::thread(std::move(task)).detach();
+    };
+    return adapt(std::move(device), std::move(defaultExecutor));
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp b/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
new file mode 100644
index 0000000..c15ab65
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Buffer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Buffer.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/Memory.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+    auto result = nn::convert(object);
+    if (!result.has_value()) {
+        result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return result;
+}
+
+nn::GeneralResult<std::vector<uint32_t>> inputToUnsigned(const std::vector<int32_t>& dims) {
+    auto result = nn::toUnsigned(dims);
+    if (!result.has_value()) {
+        result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return result;
+}
+
+nn::GeneralResult<void> copyTo(const nn::IBuffer& buffer, const Memory& dst) {
+    const auto nnDst = NN_TRY(convertInput(dst));
+    return buffer.copyTo(nnDst);
+}
+
+nn::GeneralResult<void> copyFrom(const nn::IBuffer& buffer, const Memory& src,
+                                 const std::vector<int32_t>& dimensions) {
+    const auto nnSrc = NN_TRY(convertInput(src));
+    const auto nnDims = NN_TRY(inputToUnsigned(dimensions));
+    return buffer.copyFrom(nnSrc, nnDims);
+}
+
+}  // namespace
+
+Buffer::Buffer(nn::SharedBuffer buffer) : kBuffer(std::move(buffer)) {
+    CHECK(kBuffer != nullptr);
+}
+
+ndk::ScopedAStatus Buffer::copyTo(const Memory& dst) {
+    const auto result = adapter::copyTo(*kBuffer, dst);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Buffer::copyFrom(const Memory& src, const std::vector<int32_t>& dimensions) {
+    const auto result = adapter::copyFrom(*kBuffer, src, dimensions);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
new file mode 100644
index 0000000..4fabb20
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Burst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <algorithm>
+#include <chrono>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <utility>
+#include <variant>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+using Value = Burst::ThreadSafeMemoryCache::Value;
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+    auto result = nn::convert(object);
+    if (!result.has_value()) {
+        result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return result;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+    return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
+    if (durationNs < -1) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
+    }
+    return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+    if (durationNs < -1) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+    }
+    return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+std::vector<nn::IBurst::OptionalCacheHold> ensureAllMemoriesAreCached(
+        nn::Request* request, const std::vector<int64_t>& memoryIdentifierTokens,
+        const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache) {
+    std::vector<nn::IBurst::OptionalCacheHold> holds;
+    holds.reserve(memoryIdentifierTokens.size());
+
+    for (size_t i = 0; i < memoryIdentifierTokens.size(); ++i) {
+        const auto& pool = request->pools[i];
+        const auto token = memoryIdentifierTokens[i];
+        constexpr int64_t kNoToken = -1;
+        if (token == kNoToken || !std::holds_alternative<nn::SharedMemory>(pool)) {
+            continue;
+        }
+
+        const auto& memory = std::get<nn::SharedMemory>(pool);
+        auto [storedMemory, hold] = cache.add(token, memory, burst);
+
+        request->pools[i] = std::move(storedMemory);
+        holds.push_back(std::move(hold));
+    }
+
+    return holds;
+}
+
+nn::ExecutionResult<ExecutionResult> executeSynchronously(
+        const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache, const Request& request,
+        const std::vector<int64_t>& memoryIdentifierTokens, bool measureTiming, int64_t deadlineNs,
+        int64_t loopTimeoutDurationNs) {
+    if (request.pools.size() != memoryIdentifierTokens.size()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+               << "request.pools.size() != memoryIdentifierTokens.size()";
+    }
+    if (!std::all_of(memoryIdentifierTokens.begin(), memoryIdentifierTokens.end(),
+                     [](int64_t token) { return token >= -1; })) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid memoryIdentifierTokens";
+    }
+
+    auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+    const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+    const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+
+    const auto hold = ensureAllMemoriesAreCached(&nnRequest, memoryIdentifierTokens, burst, cache);
+
+    const auto result =
+            burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+
+    if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        const auto& [message, code, outputShapes] = result.error();
+        return ExecutionResult{.outputSufficientSize = false,
+                               .outputShapes = utils::convert(outputShapes).value(),
+                               .timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
+    }
+
+    const auto& [outputShapes, timing] = NN_TRY(result);
+    return ExecutionResult{.outputSufficientSize = true,
+                           .outputShapes = utils::convert(outputShapes).value(),
+                           .timing = utils::convert(timing).value()};
+}
+
+}  // namespace
+
+Value Burst::ThreadSafeMemoryCache::add(int64_t token, const nn::SharedMemory& memory,
+                                        const nn::IBurst& burst) const {
+    std::lock_guard guard(mMutex);
+    if (const auto it = mCache.find(token); it != mCache.end()) {
+        return it->second;
+    }
+    auto hold = burst.cacheMemory(memory);
+    auto [it, _] = mCache.emplace(token, std::make_pair(memory, std::move(hold)));
+    return it->second;
+}
+
+void Burst::ThreadSafeMemoryCache::remove(int64_t token) const {
+    std::lock_guard guard(mMutex);
+    mCache.erase(token);
+}
+
+Burst::Burst(nn::SharedBurst burst) : kBurst(std::move(burst)) {
+    CHECK(kBurst != nullptr);
+}
+
+ndk::ScopedAStatus Burst::executeSynchronously(const Request& request,
+                                               const std::vector<int64_t>& memoryIdentifierTokens,
+                                               bool measureTiming, int64_t deadlineNs,
+                                               int64_t loopTimeoutDurationNs,
+                                               ExecutionResult* executionResult) {
+    auto result =
+            adapter::executeSynchronously(*kBurst, kMemoryCache, request, memoryIdentifierTokens,
+                                          measureTiming, deadlineNs, loopTimeoutDurationNs);
+    if (!result.has_value()) {
+        auto [message, code, _] = std::move(result).error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Burst::releaseMemoryResource(int64_t memoryIdentifierToken) {
+    if (memoryIdentifierToken < -1) {
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(ErrorStatus::INVALID_ARGUMENT),
+                "Invalid memoryIdentifierToken");
+    }
+    kMemoryCache.remove(memoryIdentifierToken);
+    return ndk::ScopedAStatus::ok();
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/Device.cpp b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
new file mode 100644
index 0000000..763be7f
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Adapter.h"
+#include "Buffer.h"
+#include "PreparedModel.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnDevice.h>
+#include <aidl/android/hardware/neuralnetworks/BufferDesc.h>
+#include <aidl/android/hardware/neuralnetworks/BufferRole.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h>
+#include <aidl/android/hardware/neuralnetworks/DeviceType.h>
+#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h>
+#include <aidl/android/hardware/neuralnetworks/Extension.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h>
+#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
+#include <aidl/android/hardware/neuralnetworks/Model.h>
+#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/Priority.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <android/binder_interface_utils.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/aidl/Conversions.h>
+
+#include <chrono>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+    auto result = nn::convert(object);
+    if (!result.has_value()) {
+        result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return result;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+    return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+    if (durationNs < -1) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+    }
+    return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+nn::GeneralResult<nn::CacheToken> convertCacheToken(const std::vector<uint8_t>& token) {
+    nn::CacheToken nnToken;
+    if (token.size() != nnToken.size()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid token";
+    }
+    std::copy(token.begin(), token.end(), nnToken.begin());
+    return nnToken;
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> downcast(const IPreparedModelParcel& preparedModel) {
+    if (preparedModel.preparedModel == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
+    }
+    if (preparedModel.preparedModel->isRemote()) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
+    }
+
+    // This static_cast is safe because adapter::PreparedModel is the only class that implements
+    // the IPreparedModel interface in the adapter service code.
+    const auto* casted = static_cast<const PreparedModel*>(preparedModel.preparedModel.get());
+    return casted->getUnderlyingPreparedModel();
+}
+
+nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
+        const std::vector<IPreparedModelParcel>& preparedModels) {
+    std::vector<nn::SharedPreparedModel> canonical;
+    canonical.reserve(preparedModels.size());
+    for (const auto& preparedModel : preparedModels) {
+        canonical.push_back(NN_TRY(downcast(preparedModel)));
+    }
+    return canonical;
+}
+
+nn::GeneralResult<DeviceBuffer> allocate(const nn::IDevice& device, const BufferDesc& desc,
+                                         const std::vector<IPreparedModelParcel>& preparedModels,
+                                         const std::vector<BufferRole>& inputRoles,
+                                         const std::vector<BufferRole>& outputRoles) {
+    auto nnDesc = NN_TRY(convertInput(desc));
+    auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
+    auto nnInputRoles = NN_TRY(convertInput(inputRoles));
+    auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
+
+    auto buffer = NN_TRY(device.allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
+    CHECK(buffer != nullptr);
+
+    const nn::Request::MemoryDomainToken token = buffer->getToken();
+    auto aidlBuffer = ndk::SharedRefBase::make<Buffer>(std::move(buffer));
+    return DeviceBuffer{.buffer = std::move(aidlBuffer), .token = static_cast<int32_t>(token)};
+}
+
+nn::GeneralResult<std::vector<bool>> getSupportedOperations(const nn::IDevice& device,
+                                                            const Model& model) {
+    const auto nnModel = NN_TRY(convertInput(model));
+    return device.getSupportedOperations(nnModel);
+}
+
+using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
+
+std::shared_ptr<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
+    if (preparedModel == nullptr) {
+        return nullptr;
+    }
+    return ndk::SharedRefBase::make<PreparedModel>(std::move(preparedModel));
+}
+
+void notify(IPreparedModelCallback* callback, PrepareModelResult result) {
+    if (!result.has_value()) {
+        const auto& [message, status] = result.error();
+        LOG(ERROR) << message;
+        const auto aidlCode = utils::convert(status).value_or(ErrorStatus::GENERAL_FAILURE);
+        callback->notify(aidlCode, nullptr);
+    } else {
+        auto preparedModel = std::move(result).value();
+        auto aidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
+        callback->notify(ErrorStatus::NONE, std::move(aidlPreparedModel));
+    }
+}
+
+nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
+                                     const Model& model, ExecutionPreference preference,
+                                     Priority priority, int64_t deadlineNs,
+                                     const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+                                     const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+                                     const std::vector<uint8_t>& token,
+                                     const std::shared_ptr<IPreparedModelCallback>& callback) {
+    if (callback.get() == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+    }
+
+    auto nnModel = NN_TRY(convertInput(model));
+    const auto nnPreference = NN_TRY(convertInput(preference));
+    const auto nnPriority = NN_TRY(convertInput(priority));
+    const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+    auto nnModelCache = NN_TRY(convertInput(modelCache));
+    auto nnDataCache = NN_TRY(convertInput(dataCache));
+    const auto nnToken = NN_TRY(convertCacheToken(token));
+
+    Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
+                 nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
+                 nnToken, callback] {
+        auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
+                                           nnModelCache, nnDataCache, nnToken);
+        notify(callback.get(), std::move(result));
+    };
+    executor(std::move(task), nnDeadline);
+
+    return {};
+}
+
+nn::GeneralResult<void> prepareModelFromCache(
+        const nn::SharedDevice& device, const Executor& executor, int64_t deadlineNs,
+        const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+        const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+        const std::shared_ptr<IPreparedModelCallback>& callback) {
+    if (callback.get() == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+    }
+
+    const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+    auto nnModelCache = NN_TRY(convertInput(modelCache));
+    auto nnDataCache = NN_TRY(convertInput(dataCache));
+    const auto nnToken = NN_TRY(convertCacheToken(token));
+
+    auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
+                 nnDataCache = std::move(nnDataCache), nnToken, callback] {
+        auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
+        notify(callback.get(), std::move(result));
+    };
+    executor(std::move(task), nnDeadline);
+
+    return {};
+}
+
+}  // namespace
+
+Device::Device(::android::nn::SharedDevice device, Executor executor)
+    : kDevice(std::move(device)), kExecutor(std::move(executor)) {
+    CHECK(kDevice != nullptr);
+    CHECK(kExecutor != nullptr);
+}
+
+ndk::ScopedAStatus Device::allocate(const BufferDesc& desc,
+                                    const std::vector<IPreparedModelParcel>& preparedModels,
+                                    const std::vector<BufferRole>& inputRoles,
+                                    const std::vector<BufferRole>& outputRoles,
+                                    DeviceBuffer* buffer) {
+    auto result = adapter::allocate(*kDevice, desc, preparedModels, inputRoles, outputRoles);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *buffer = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getCapabilities(Capabilities* capabilities) {
+    *capabilities = utils::convert(kDevice->getCapabilities()).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) {
+    const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
+    *numberOfCacheFiles = NumberOfCacheFiles{.numModelCache = static_cast<int32_t>(numModelCache),
+                                             .numDataCache = static_cast<int32_t>(numDataCache)};
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getSupportedExtensions(std::vector<Extension>* extensions) {
+    *extensions = utils::convert(kDevice->getSupportedExtensions()).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getSupportedOperations(const Model& model,
+                                                  std::vector<bool>* supported) {
+    auto result = adapter::getSupportedOperations(*kDevice, model);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *supported = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getType(DeviceType* deviceType) {
+    *deviceType = utils::convert(kDevice->getType()).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::getVersionString(std::string* version) {
+    *version = kDevice->getVersionString();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::prepareModel(const Model& model, ExecutionPreference preference,
+                                        Priority priority, int64_t deadlineNs,
+                                        const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+                                        const std::vector<ndk::ScopedFileDescriptor>& dataCache,
+                                        const std::vector<uint8_t>& token,
+                                        const std::shared_ptr<IPreparedModelCallback>& callback) {
+    const auto result = adapter::prepareModel(kDevice, kExecutor, model, preference, priority,
+                                              deadlineNs, modelCache, dataCache, token, callback);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        callback->notify(aidlCode, nullptr);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Device::prepareModelFromCache(
+        int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+        const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+        const std::shared_ptr<IPreparedModelCallback>& callback) {
+    const auto result = adapter::prepareModelFromCache(kDevice, kExecutor, deadlineNs, modelCache,
+                                                       dataCache, token, callback);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        callback->notify(aidlCode, nullptr);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
new file mode 100644
index 0000000..71ed1a8
--- /dev/null
+++ b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Burst.h"
+
+#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
+#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/FencedExecutionResult.h>
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/aidl/Conversions.h>
+#include <nnapi/hal/aidl/Utils.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace aidl::android::hardware::neuralnetworks::adapter {
+namespace {
+
+class FencedExecutionCallback : public BnFencedExecutionCallback {
+  public:
+    FencedExecutionCallback(nn::ExecuteFencedInfoCallback callback)
+        : kCallback(std::move(callback)) {}
+
+    ndk::ScopedAStatus getExecutionInfo(Timing* timingLaunched, Timing* timingFenced,
+                                        ErrorStatus* errorStatus) override {
+        const auto result = kCallback();
+        if (result.ok()) {
+            const auto& [nnTimingLaunched, nnTimingFenced] = result.value();
+            *timingLaunched = utils::convert(nnTimingLaunched).value();
+            *timingFenced = utils::convert(nnTimingFenced).value();
+            *errorStatus = ErrorStatus::NONE;
+        } else {
+            constexpr auto kNoTiming = Timing{.timeOnDeviceNs = -1, .timeInDriverNs = -1};
+            const auto& [message, code] = result.error();
+            LOG(ERROR) << "getExecutionInfo failed with " << code << ": " << message;
+            const auto aidlStatus = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+            *timingLaunched = kNoTiming;
+            *timingFenced = kNoTiming;
+            *errorStatus = aidlStatus;
+        }
+        return ndk::ScopedAStatus::ok();
+    }
+
+  private:
+    const nn::ExecuteFencedInfoCallback kCallback;
+};
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+    auto result = nn::convert(object);
+    if (!result.has_value()) {
+        result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+    }
+    return result;
+}
+
+nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
+        const std::vector<ndk::ScopedFileDescriptor>& waitFor) {
+    auto handles = NN_TRY(convertInput(waitFor));
+
+    constexpr auto valid = [](const nn::SharedHandle& handle) {
+        return handle != nullptr && handle->ok();
+    };
+    if (!std::all_of(handles.begin(), handles.end(), valid)) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid sync fence";
+    }
+
+    std::vector<nn::SyncFence> syncFences;
+    syncFences.reserve(waitFor.size());
+    for (auto& handle : handles) {
+        syncFences.push_back(nn::SyncFence::create(std::move(handle)).value());
+    }
+    return syncFences;
+}
+
+nn::Duration makeDuration(int64_t durationNs) {
+    return nn::Duration(std::chrono::nanoseconds(durationNs));
+}
+
+nn::GeneralResult<nn::OptionalDuration> makeOptionalDuration(int64_t durationNs) {
+    if (durationNs < -1) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid duration " << durationNs;
+    }
+    return durationNs < 0 ? nn::OptionalDuration{} : makeDuration(durationNs);
+}
+
+nn::GeneralResult<nn::OptionalTimePoint> makeOptionalTimePoint(int64_t durationNs) {
+    if (durationNs < -1) {
+        return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid time point " << durationNs;
+    }
+    return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
+}
+
+nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IPreparedModel& preparedModel,
+                                                          const Request& request,
+                                                          bool measureTiming, int64_t deadlineNs,
+                                                          int64_t loopTimeoutDurationNs) {
+    const auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+    const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+    const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+
+    const auto result =
+            preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+
+    if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        const auto& [message, code, outputShapes] = result.error();
+        LOG(ERROR) << "executeSynchronously failed with " << code << ": " << message;
+        return ExecutionResult{.outputSufficientSize = false,
+                               .outputShapes = utils::convert(outputShapes).value(),
+                               .timing = {.timeInDriverNs = -1, .timeOnDeviceNs = -1}};
+    }
+
+    const auto& [outputShapes, timing] = NN_TRY(result);
+    return ExecutionResult{.outputSufficientSize = true,
+                           .outputShapes = utils::convert(outputShapes).value(),
+                           .timing = utils::convert(timing).value()};
+}
+
+nn::GeneralResult<FencedExecutionResult> executeFenced(
+        const nn::IPreparedModel& preparedModel, const Request& request,
+        const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measureTiming,
+        int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs) {
+    const auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
+    const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
+    const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
+    const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+    const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
+
+    auto [syncFence, executeFencedInfoCallback] = NN_TRY(preparedModel.executeFenced(
+            nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+
+    ndk::ScopedFileDescriptor fileDescriptor;
+    if (syncFence.hasFd()) {
+        auto uniqueFd = NN_TRY(nn::dupFd(syncFence.getFd()));
+        fileDescriptor = ndk::ScopedFileDescriptor(uniqueFd.release());
+    }
+
+    return FencedExecutionResult{.callback = ndk::SharedRefBase::make<FencedExecutionCallback>(
+                                         std::move(executeFencedInfoCallback)),
+                                 .syncFence = std::move(fileDescriptor)};
+}
+
+}  // namespace
+
+PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel)
+    : kPreparedModel(std::move(preparedModel)) {
+    CHECK(kPreparedModel != nullptr);
+}
+
+ndk::ScopedAStatus PreparedModel::executeSynchronously(const Request& request, bool measureTiming,
+                                                       int64_t deadlineNs,
+                                                       int64_t loopTimeoutDurationNs,
+                                                       ExecutionResult* executionResult) {
+    auto result = adapter::executeSynchronously(*kPreparedModel, request, measureTiming, deadlineNs,
+                                                loopTimeoutDurationNs);
+    if (!result.has_value()) {
+        const auto& [message, code, _] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeFenced(
+        const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+        bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
+        FencedExecutionResult* executionResult) {
+    auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, measureTiming,
+                                         deadlineNs, loopTimeoutDurationNs, durationNs);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::configureExecutionBurst(std::shared_ptr<IBurst>* burst) {
+    auto result = kPreparedModel->configureExecutionBurst();
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *burst = ndk::SharedRefBase::make<Burst>(std::move(result).value());
+    return ndk::ScopedAStatus::ok();
+}
+
+nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
+    return kPreparedModel;
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/Android.bp b/neuralnetworks/utils/adapter/hidl/Android.bp
similarity index 100%
rename from neuralnetworks/utils/adapter/Android.bp
rename to neuralnetworks/utils/adapter/hidl/Android.bp
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
similarity index 86%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
index da00a09..6fba4ab 100644
--- a/neuralnetworks/utils/adapter/include/nnapi/hal/Adapter.h
+++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
@@ -20,7 +20,6 @@
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
 #include <nnapi/IDevice.h>
 #include <nnapi/Types.h>
-#include <sys/types.h>
 #include <functional>
 #include <memory>
 
@@ -37,10 +36,12 @@
 /**
  * A type-erased executor which executes a task asynchronously.
  *
- * This executor is also provided with an Application ID (Android User ID) and an optional deadline
- * for when the caller expects is the upper bound for the amount of time to complete the task.
+ * This executor is also provided an optional deadline for when the caller expects is the upper
+ * bound for the amount of time to complete the task. If needed, the Executor can retrieve the
+ * Application ID (Android User ID) by calling IPCThreadState::self()->getCallingUid() in
+ * hwbinder/IPCThreadState.h.
  */
-using Executor = std::function<void(Task, uid_t, nn::OptionalTimePoint)>;
+using Executor = std::function<void(Task, nn::OptionalTimePoint)>;
 
 /**
  * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Buffer.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Buffer.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Buffer.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Burst.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Burst.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Burst.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/Device.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Device.h
similarity index 100%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/Device.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Device.h
diff --git a/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
similarity index 98%
rename from neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h
rename to neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
index 65763b8..9482b0d 100644
--- a/neuralnetworks/utils/adapter/include/nnapi/hal/PreparedModel.h
+++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
@@ -39,7 +39,7 @@
 // Class that adapts nn::IPreparedModel to V1_3::IPreparedModel.
 class PreparedModel final : public V1_3::IPreparedModel {
   public:
-    PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId);
+    PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor);
 
     Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
                                       const sp<V1_0::IExecutionCallback>& callback) override;
@@ -71,7 +71,6 @@
   private:
     const nn::SharedPreparedModel kPreparedModel;
     const Executor kExecutor;
-    const uid_t kUserId;
 };
 
 }  // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/src/Adapter.cpp b/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
similarity index 92%
rename from neuralnetworks/utils/adapter/src/Adapter.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
index d6f53f0..782e815 100644
--- a/neuralnetworks/utils/adapter/src/Adapter.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
@@ -21,7 +21,6 @@
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
 #include <nnapi/IDevice.h>
 #include <nnapi/Types.h>
-#include <sys/types.h>
 
 #include <functional>
 #include <memory>
@@ -37,7 +36,7 @@
 }
 
 sp<V1_3::IDevice> adapt(nn::SharedDevice device) {
-    Executor defaultExecutor = [](Task task, uid_t /*uid*/, nn::OptionalTimePoint /*deadline*/) {
+    Executor defaultExecutor = [](Task task, nn::OptionalTimePoint /*deadline*/) {
         std::thread(std::move(task)).detach();
     };
     return adapt(std::move(device), std::move(defaultExecutor));
diff --git a/neuralnetworks/utils/adapter/src/Buffer.cpp b/neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Buffer.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
diff --git a/neuralnetworks/utils/adapter/src/Burst.cpp b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
similarity index 100%
rename from neuralnetworks/utils/adapter/src/Burst.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Burst.cpp
diff --git a/neuralnetworks/utils/adapter/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
similarity index 92%
rename from neuralnetworks/utils/adapter/src/Device.cpp
rename to neuralnetworks/utils/adapter/hidl/src/Device.cpp
index 96142c3..4993a80 100644
--- a/neuralnetworks/utils/adapter/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
@@ -28,7 +28,6 @@
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
-#include <hwbinder/IPCThreadState.h>
 #include <nnapi/IBuffer.h>
 #include <nnapi/IDevice.h>
 #include <nnapi/IPreparedModel.h>
@@ -43,7 +42,6 @@
 #include <nnapi/hal/1.2/Utils.h>
 #include <nnapi/hal/1.3/Conversions.h>
 #include <nnapi/hal/1.3/Utils.h>
-#include <sys/types.h>
 
 #include <memory>
 
@@ -64,12 +62,11 @@
 
 using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
 
-sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor,
-                                     uid_t userId) {
+sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) {
     if (preparedModel == nullptr) {
         return nullptr;
     }
-    return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor), userId);
+    return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor));
 }
 
 void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
@@ -108,15 +105,14 @@
 }
 
 template <typename CallbackType>
-void notify(CallbackType* callback, PrepareModelResult result, Executor executor, uid_t userId) {
+void notify(CallbackType* callback, PrepareModelResult result, Executor executor) {
     if (!result.has_value()) {
         const auto [message, status] = std::move(result).error();
         LOG(ERROR) << message;
         notify(callback, status, nullptr);
     } else {
         auto preparedModel = std::move(result).value();
-        auto hidlPreparedModel =
-                adaptPreparedModel(std::move(preparedModel), std::move(executor), userId);
+        auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor));
         notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
     }
 }
@@ -137,13 +133,12 @@
 
     auto nnModel = NN_TRY(convertInput(model));
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
-    Task task = [device, nnModel = std::move(nnModel), userId, executor, callback] {
+    Task task = [device, nnModel = std::move(nnModel), executor, callback] {
         auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
                                            nn::Priority::DEFAULT, {}, {}, {}, {});
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
@@ -159,13 +154,12 @@
     auto nnModel = NN_TRY(convertInput(model));
     const auto nnPreference = NN_TRY(convertInput(preference));
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
-    Task task = [device, nnModel = std::move(nnModel), nnPreference, userId, executor, callback] {
+    Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
         auto result =
                 device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
@@ -187,15 +181,14 @@
     auto nnDataCache = NN_TRY(convertInput(dataCache));
     const auto nnToken = nn::CacheToken(token);
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
     Task task = [device, nnModel = std::move(nnModel), nnPreference,
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
-                 nnToken, userId, executor, callback] {
+                 nnToken, executor, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
                                            nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
@@ -218,15 +211,14 @@
     auto nnDataCache = NN_TRY(convertInput(dataCache));
     const auto nnToken = nn::CacheToken(token);
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
     Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
-                 nnToken, userId, executor, callback] {
+                 nnToken, executor, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
                                            nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, nnDeadline);
+    executor(std::move(task), nnDeadline);
 
     return {};
 }
@@ -245,13 +237,12 @@
     auto nnDataCache = NN_TRY(convertInput(dataCache));
     const auto nnToken = nn::CacheToken(token);
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
     Task task = [device, nnModelCache = std::move(nnModelCache),
-                 nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
+                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
         auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
@@ -270,13 +261,12 @@
     auto nnDataCache = NN_TRY(convertInput(dataCache));
     const auto nnToken = nn::CacheToken(token);
 
-    const uid_t userId = hardware::IPCThreadState::self()->getCallingUid();
     auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
-                 nnDataCache = std::move(nnDataCache), nnToken, userId, executor, callback] {
+                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
         auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor, userId);
+        notify(callback.get(), std::move(result), executor);
     };
-    executor(std::move(task), userId, nnDeadline);
+    executor(std::move(task), nnDeadline);
 
     return {};
 }
diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
similarity index 96%
rename from neuralnetworks/utils/adapter/src/PreparedModel.cpp
rename to neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
index a14e782..71060d5 100644
--- a/neuralnetworks/utils/adapter/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
@@ -28,7 +28,6 @@
 #include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
-#include <hwbinder/IPCThreadState.h>
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
@@ -37,7 +36,6 @@
 #include <nnapi/hal/1.2/Utils.h>
 #include <nnapi/hal/1.3/Conversions.h>
 #include <nnapi/hal/1.3/Utils.h>
-#include <sys/types.h>
 
 #include <memory>
 #include <thread>
@@ -145,7 +143,7 @@
     }
 }
 
-nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, uid_t userId,
+nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel,
                                 const Executor& executor, const V1_0::Request& request,
                                 const sp<V1_0::IExecutionCallback>& callback) {
     if (callback.get() == nullptr) {
@@ -164,12 +162,12 @@
         auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
         notify(callback.get(), std::move(result));
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
 
-nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel, uid_t userId,
+nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel,
                                     const Executor& executor, const V1_0::Request& request,
                                     V1_2::MeasureTiming measure,
                                     const sp<V1_2::IExecutionCallback>& callback) {
@@ -190,12 +188,12 @@
         auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
         notify(callback.get(), std::move(result));
     };
-    executor(std::move(task), userId, {});
+    executor(std::move(task), {});
 
     return {};
 }
 
-nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel, uid_t userId,
+nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel,
                                     const Executor& executor, const V1_3::Request& request,
                                     V1_2::MeasureTiming measure,
                                     const V1_3::OptionalTimePoint& deadline,
@@ -222,7 +220,7 @@
                 preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
         notify(callback.get(), std::move(result));
     };
-    executor(std::move(task), userId, nnDeadline);
+    executor(std::move(task), nnDeadline);
 
     return {};
 }
@@ -305,8 +303,8 @@
 
 }  // namespace
 
-PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor, uid_t userId)
-    : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)), kUserId(userId) {
+PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor)
+    : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) {
     CHECK(kPreparedModel != nullptr);
     CHECK(kExecutor != nullptr);
 }
@@ -317,7 +315,7 @@
 
 Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
                                                  const sp<V1_0::IExecutionCallback>& callback) {
-    auto result = adapter::execute(kPreparedModel, kUserId, kExecutor, request, callback);
+    auto result = adapter::execute(kPreparedModel, kExecutor, request, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
         LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message;
@@ -330,8 +328,7 @@
 Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request,
                                                      V1_2::MeasureTiming measure,
                                                      const sp<V1_2::IExecutionCallback>& callback) {
-    auto result =
-            adapter::execute_1_2(kPreparedModel, kUserId, kExecutor, request, measure, callback);
+    auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
         LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message;
@@ -346,8 +343,8 @@
         const V1_3::OptionalTimePoint& deadline,
         const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
         const sp<V1_3::IExecutionCallback>& callback) {
-    auto result = adapter::execute_1_3(kPreparedModel, kUserId, kExecutor, request, measure,
-                                       deadline, loopTimeoutDuration, callback);
+    auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline,
+                                       loopTimeoutDuration, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
         LOG(ERROR) << "adapter::PreparedModel::execute_1_3 failed with " << code << ": " << message;
diff --git a/sensors/aidl/vts/Android.bp b/sensors/aidl/vts/Android.bp
index 5a519a5..b5a5f15 100644
--- a/sensors/aidl/vts/Android.bp
+++ b/sensors/aidl/vts/Android.bp
@@ -34,6 +34,7 @@
     shared_libs: [
         "libbinder",
         "libbinder_ndk",
+        "libvndksupport",
         "libfmq",
         "android.hardware.common-V2-ndk",
         "android.hardware.common.fmq-V1-ndk",
@@ -41,6 +42,7 @@
     static_libs: [
         "android.hardware.sensors-V1-ndk",
         "VtsHalSensorsTargetTestUtils",
+        "libaidlcommonsupport",
     ],
     test_suites: [
         "general-tests",
diff --git a/sensors/aidl/vts/SensorsAidlTestSharedMemory.h b/sensors/aidl/vts/SensorsAidlTestSharedMemory.h
new file mode 100644
index 0000000..4b5916a
--- /dev/null
+++ b/sensors/aidl/vts/SensorsAidlTestSharedMemory.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SENSORS_AIDL_TEST_SHARED_MEMORY_H
+#define ANDROID_SENSORS_AIDL_TEST_SHARED_MEMORY_H
+
+#include "sensors-vts-utils/GrallocWrapper.h"
+
+#include <aidlcommonsupport/NativeHandle.h>
+#include <android-base/macros.h>
+#include <log/log.h>
+
+#include <sys/mman.h>
+#include <cinttypes>
+
+#include <cutils/ashmem.h>
+
+using ::aidl::android::hardware::sensors::BnSensors;
+using ::aidl::android::hardware::sensors::Event;
+using ::aidl::android::hardware::sensors::ISensors;
+using ::aidl::android::hardware::sensors::SensorType;
+
+template <class SensorType, class Event>
+class SensorsAidlTestSharedMemory {
+  public:
+    static SensorsAidlTestSharedMemory* create(ISensors::SharedMemInfo::SharedMemType type,
+                                               size_t size) {
+        constexpr size_t kMaxSize =
+                128 * 1024 * 1024;  // sensor test should not need more than 128M
+        if (size == 0 || size >= kMaxSize) {
+            return nullptr;
+        }
+
+        auto m = new SensorsAidlTestSharedMemory<SensorType, Event>(type, size);
+        if (m->mSize != size || m->mBuffer == nullptr) {
+            delete m;
+            m = nullptr;
+        }
+        return m;
+    }
+
+    ISensors::SharedMemInfo getSharedMemInfo() const {
+        ISensors::SharedMemInfo mem = {
+                .type = mType,
+                .format = ISensors::SharedMemInfo::SharedMemFormat::SENSORS_EVENT,
+                .size = static_cast<int32_t>(mSize),
+                .memoryHandle = android::dupToAidl(mNativeHandle)};
+        return mem;
+    }
+    char* getBuffer() const { return mBuffer; }
+    size_t getSize() const { return mSize; }
+    std::vector<Event> parseEvents(int64_t lastCounter = -1, size_t offset = 0) const {
+        constexpr size_t kEventSize =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_TOTAL_LENGTH);
+        constexpr size_t kOffsetSize =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_FIELD);
+        constexpr size_t kOffsetToken =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_REPORT_TOKEN);
+        constexpr size_t kOffsetType =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_SENSOR_TYPE);
+        constexpr size_t kOffsetAtomicCounter = static_cast<size_t>(
+                BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_ATOMIC_COUNTER);
+        constexpr size_t kOffsetTimestamp =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_TIMESTAMP);
+        constexpr size_t kOffsetData =
+                static_cast<size_t>(BnSensors::DIRECT_REPORT_SENSOR_EVENT_OFFSET_SIZE_DATA);
+
+        std::vector<Event> events;
+        std::vector<float> data(16);
+
+        while (offset + kEventSize <= mSize) {
+            int64_t atomicCounter =
+                    *reinterpret_cast<uint32_t*>(mBuffer + offset + kOffsetAtomicCounter);
+            if (atomicCounter <= lastCounter) {
+                ALOGV("atomicCounter = %" PRId64 ", lastCounter = %" PRId64, atomicCounter,
+                      lastCounter);
+                break;
+            }
+
+            int32_t size = *reinterpret_cast<int32_t*>(mBuffer + offset + kOffsetSize);
+            if (size != kEventSize) {
+                // unknown error, events parsed may be wrong, remove all
+                events.clear();
+                break;
+            }
+
+            int32_t token = *reinterpret_cast<int32_t*>(mBuffer + offset + kOffsetToken);
+            int32_t type = *reinterpret_cast<int32_t*>(mBuffer + offset + kOffsetType);
+            int64_t timestamp = *reinterpret_cast<int64_t*>(mBuffer + offset + kOffsetTimestamp);
+
+            ALOGV("offset = %zu, cnt %" PRId64 ", token %" PRId32 ", type %" PRId32
+                  ", timestamp %" PRId64,
+                  offset, atomicCounter, token, type, timestamp);
+
+            Event event = {
+                    .timestamp = timestamp,
+                    .sensorHandle = token,
+                    .sensorType = type,
+            };
+
+            event.set<Event::Data>(reinterpret_cast<float*>(mBuffer + offset + kOffsetData));
+            // event.u.data = android::hardware::hidl_array<float,
+            // 16>(reinterpret_cast<float*>(mBuffer + offset + kOffsetData));
+
+            events.push_back(event);
+
+            lastCounter = atomicCounter;
+            offset += kEventSize;
+        }
+
+        return events;
+    }
+
+    virtual ~SensorsAidlTestSharedMemory() {
+        switch (mType) {
+            case ISensors::SharedMemInfo::SharedMemType::ASHMEM: {
+                if (mSize != 0) {
+                    ::munmap(mBuffer, mSize);
+                    mBuffer = nullptr;
+
+                    ::native_handle_close(mNativeHandle);
+                    ::native_handle_delete(mNativeHandle);
+
+                    mNativeHandle = nullptr;
+                    mSize = 0;
+                }
+                break;
+            }
+            case ISensors::SharedMemInfo::SharedMemType::GRALLOC: {
+                if (mSize != 0) {
+                    mGrallocWrapper->freeBuffer(mNativeHandle);
+                    mNativeHandle = nullptr;
+                    mSize = 0;
+                }
+                break;
+            }
+            default: {
+                if (mNativeHandle != nullptr || mSize != 0 || mBuffer != nullptr) {
+                    ALOGE("SensorsAidlTestSharedMemory %p not properly destructed: "
+                          "type %d, native handle %p, size %zu, buffer %p",
+                          this, static_cast<int>(mType), mNativeHandle, mSize, mBuffer);
+                }
+                break;
+            }
+        }
+    }
+
+  private:
+    SensorsAidlTestSharedMemory(ISensors::SharedMemInfo::SharedMemType type, size_t size)
+        : mType(type), mSize(0), mBuffer(nullptr) {
+        native_handle_t* handle = nullptr;
+        char* buffer = nullptr;
+        switch (type) {
+            case ISensors::SharedMemInfo::SharedMemType::ASHMEM: {
+                int fd;
+                handle = ::native_handle_create(1 /*nFds*/, 0 /*nInts*/);
+                if (handle != nullptr) {
+                    handle->data[0] = fd =
+                            ::ashmem_create_region("SensorsAidlTestSharedMemory", size);
+                    if (handle->data[0] > 0) {
+                        // memory is pinned by default
+                        buffer = static_cast<char*>(
+                                ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
+                        if (buffer != reinterpret_cast<char*>(MAP_FAILED)) {
+                            break;
+                        }
+                        ::native_handle_close(handle);
+                    }
+                    ::native_handle_delete(handle);
+                    handle = nullptr;
+                }
+                break;
+            }
+            case ISensors::SharedMemInfo::SharedMemType::GRALLOC: {
+                mGrallocWrapper = std::make_unique<::android::GrallocWrapper>();
+                if (!mGrallocWrapper->isInitialized()) {
+                    break;
+                }
+
+                std::pair<native_handle_t*, void*> buf = mGrallocWrapper->allocate(size);
+                handle = buf.first;
+                buffer = static_cast<char*>(buf.second);
+                break;
+            }
+            default:
+                break;
+        }
+
+        if (buffer != nullptr) {
+            mNativeHandle = handle;
+            mSize = size;
+            mBuffer = buffer;
+        }
+    }
+
+    ISensors::SharedMemInfo::SharedMemType mType;
+    native_handle_t* mNativeHandle;
+    size_t mSize;
+    char* mBuffer;
+    std::unique_ptr<::android::GrallocWrapper> mGrallocWrapper;
+
+    DISALLOW_COPY_AND_ASSIGN(SensorsAidlTestSharedMemory);
+};
+
+#endif  // ANDROID_SENSORS_TEST_SHARED_MEMORY_H
diff --git a/sensors/aidl/vts/VtsAidlHalSensorsTargetTest.cpp b/sensors/aidl/vts/VtsAidlHalSensorsTargetTest.cpp
index 33645b2..608a4b0 100644
--- a/sensors/aidl/vts/VtsAidlHalSensorsTargetTest.cpp
+++ b/sensors/aidl/vts/VtsAidlHalSensorsTargetTest.cpp
@@ -26,6 +26,7 @@
 #include <utils/SystemClock.h>
 
 #include "SensorsAidlEnvironment.h"
+#include "SensorsAidlTestSharedMemory.h"
 #include "sensors-vts-utils/SensorsVtsEnvironmentBase.h"
 
 #include <cinttypes>
@@ -43,6 +44,9 @@
 using android::ProcessState;
 using std::chrono::duration_cast;
 
+constexpr size_t kEventSize =
+        static_cast<size_t>(ISensors::DIRECT_REPORT_SENSOR_EVENT_TOTAL_LENGTH);
+
 namespace {
 
 static void assertTypeMatchStringType(SensorType type, const std::string& stringType) {
@@ -97,6 +101,24 @@
     }
 }
 
+bool isDirectChannelTypeSupported(SensorInfo sensor, ISensors::SharedMemInfo::SharedMemType type) {
+    switch (type) {
+        case ISensors::SharedMemInfo::SharedMemType::ASHMEM:
+            return (sensor.flags & SensorInfo::SENSOR_FLAG_BITS_DIRECT_CHANNEL_ASHMEM) != 0;
+        case ISensors::SharedMemInfo::SharedMemType::GRALLOC:
+            return (sensor.flags & SensorInfo::SENSOR_FLAG_BITS_DIRECT_CHANNEL_GRALLOC) != 0;
+        default:
+            return false;
+    }
+}
+
+bool isDirectReportRateSupported(SensorInfo sensor, ISensors::RateLevel rate) {
+    unsigned int r = static_cast<unsigned int>(sensor.flags &
+                                               SensorInfo::SENSOR_FLAG_BITS_MASK_DIRECT_REPORT) >>
+                     static_cast<unsigned int>(SensorInfo::SENSOR_FLAG_SHIFT_DIRECT_REPORT);
+    return r >= static_cast<unsigned int>(rate);
+}
+
 int expectedReportModeForType(SensorType type) {
     switch (type) {
         case SensorType::ACCELEROMETER:
@@ -286,6 +308,24 @@
     std::vector<SensorInfo> getOneShotSensors();
     std::vector<SensorInfo> getInjectEventSensors();
 
+    void verifyDirectChannel(ISensors::SharedMemInfo::SharedMemType memType);
+
+    void verifyRegisterDirectChannel(
+            std::shared_ptr<SensorsAidlTestSharedMemory<SensorType, Event>> mem,
+            int32_t* directChannelHandle, bool supportsSharedMemType,
+            bool supportsAnyDirectChannel);
+
+    void verifyConfigure(const SensorInfo& sensor, ISensors::SharedMemInfo::SharedMemType memType,
+                         int32_t directChannelHandle, bool directChannelSupported);
+
+    void queryDirectChannelSupport(ISensors::SharedMemInfo::SharedMemType memType,
+                                   bool* supportsSharedMemType, bool* supportsAnyDirectChannel);
+
+    void verifyUnregisterDirectChannel(int32_t* directChannelHandle, bool supportsAnyDirectChannel);
+
+    void checkRateLevel(const SensorInfo& sensor, int32_t directChannelHandle,
+                        ISensors::RateLevel rateLevel, int32_t* reportToken);
+
     inline std::shared_ptr<ISensors>& getSensors() { return mEnvironment->mSensors; }
 
     inline SensorsAidlEnvironment* getEnvironment() { return mEnvironment; }
@@ -313,6 +353,18 @@
 
     ndk::ScopedAStatus flush(int32_t sensorHandle) { return getSensors()->flush(sensorHandle); }
 
+    ndk::ScopedAStatus registerDirectChannel(const ISensors::SharedMemInfo& mem,
+                                             int32_t* aidlReturn);
+
+    ndk::ScopedAStatus unregisterDirectChannel(int32_t* channelHandle) {
+        return getSensors()->unregisterDirectChannel(*channelHandle);
+    }
+
+    ndk::ScopedAStatus configDirectReport(int32_t sensorHandle, int32_t channelHandle,
+                                          ISensors::RateLevel rate, int32_t* reportToken) {
+        return getSensors()->configDirectReport(sensorHandle, channelHandle, rate, reportToken);
+    }
+
     void runSingleFlushTest(const std::vector<SensorInfo>& sensors, bool activateSensor,
                             int32_t expectedFlushCount, bool expectedResult);
 
@@ -334,6 +386,19 @@
     SensorsAidlEnvironment* mEnvironment;
 };
 
+ndk::ScopedAStatus SensorsAidlTest::registerDirectChannel(const ISensors::SharedMemInfo& mem,
+                                                          int32_t* aidlReturn) {
+    // If registeration of a channel succeeds, add the handle of channel to a set so that it can be
+    // unregistered when test fails. Unregister a channel does not remove the handle on purpose.
+    // Unregistering a channel more than once should not have negative effect.
+
+    ndk::ScopedAStatus status = getSensors()->registerDirectChannel(mem, aidlReturn);
+    if (status.isOk()) {
+        mDirectChannelHandles.insert(*aidlReturn);
+    }
+    return status;
+}
+
 std::vector<SensorInfo> SensorsAidlTest::getSensorsList() {
     std::vector<SensorInfo> sensorInfoList;
     checkIsOk(getSensors()->getSensorsList(&sensorInfoList));
@@ -850,12 +915,141 @@
     }
 }
 
+void SensorsAidlTest::checkRateLevel(const SensorInfo& sensor, int32_t directChannelHandle,
+                                     ISensors::RateLevel rateLevel, int32_t* reportToken) {
+    ndk::ScopedAStatus status =
+            configDirectReport(sensor.sensorHandle, directChannelHandle, rateLevel, reportToken);
+
+    SCOPED_TRACE(::testing::Message()
+                 << " handle=0x" << std::hex << std::setw(8) << std::setfill('0')
+                 << sensor.sensorHandle << std::dec << " type=" << static_cast<int>(sensor.type)
+                 << " name=" << sensor.name);
+
+    if (isDirectReportRateSupported(sensor, rateLevel)) {
+        ASSERT_TRUE(status.isOk());
+        if (rateLevel != ISensors::RateLevel::STOP) {
+            ASSERT_GT(*reportToken, 0);
+        } else {
+            ASSERT_EQ(status.getExceptionCode(), EX_ILLEGAL_ARGUMENT);
+        }
+    }
+}
+
+void SensorsAidlTest::queryDirectChannelSupport(ISensors::SharedMemInfo::SharedMemType memType,
+                                                bool* supportsSharedMemType,
+                                                bool* supportsAnyDirectChannel) {
+    *supportsSharedMemType = false;
+    *supportsAnyDirectChannel = false;
+    for (const SensorInfo& curSensor : getSensorsList()) {
+        if (isDirectChannelTypeSupported(curSensor, memType)) {
+            *supportsSharedMemType = true;
+        }
+        if (isDirectChannelTypeSupported(curSensor,
+                                         ISensors::SharedMemInfo::SharedMemType::ASHMEM) ||
+            isDirectChannelTypeSupported(curSensor,
+                                         ISensors::SharedMemInfo::SharedMemType::GRALLOC)) {
+            *supportsAnyDirectChannel = true;
+        }
+
+        if (*supportsSharedMemType && *supportsAnyDirectChannel) {
+            break;
+        }
+    }
+}
+
+void SensorsAidlTest::verifyRegisterDirectChannel(
+        std::shared_ptr<SensorsAidlTestSharedMemory<SensorType, Event>> mem,
+        int32_t* directChannelHandle, bool supportsSharedMemType, bool supportsAnyDirectChannel) {
+    char* buffer = mem->getBuffer();
+    size_t size = mem->getSize();
+
+    if (supportsSharedMemType) {
+        memset(buffer, 0xff, size);
+    }
+
+    int32_t channelHandle;
+
+    ::ndk::ScopedAStatus status = registerDirectChannel(mem->getSharedMemInfo(), &channelHandle);
+    if (supportsSharedMemType) {
+        ASSERT_TRUE(status.isOk());
+        ASSERT_EQ(channelHandle, 0);
+    } else {
+        int32_t error = supportsAnyDirectChannel ? EX_ILLEGAL_ARGUMENT : EX_UNSUPPORTED_OPERATION;
+        ASSERT_EQ(status.getExceptionCode(), error);
+        ASSERT_EQ(channelHandle, -1);
+    }
+    directChannelHandle = &channelHandle;
+}
+
+void SensorsAidlTest::verifyUnregisterDirectChannel(int32_t* channelHandle,
+                                                    bool supportsAnyDirectChannel) {
+    int result = supportsAnyDirectChannel ? EX_NONE : EX_UNSUPPORTED_OPERATION;
+    ndk::ScopedAStatus status = unregisterDirectChannel(channelHandle);
+    ASSERT_EQ(status.getExceptionCode(), result);
+}
+
+void SensorsAidlTest::verifyDirectChannel(ISensors::SharedMemInfo::SharedMemType memType) {
+    constexpr size_t kNumEvents = 1;
+    constexpr size_t kMemSize = kNumEvents * kEventSize;
+
+    std::shared_ptr<SensorsAidlTestSharedMemory<SensorType, Event>> mem(
+            SensorsAidlTestSharedMemory<SensorType, Event>::create(memType, kMemSize));
+    ASSERT_NE(mem, nullptr);
+
+    bool supportsSharedMemType;
+    bool supportsAnyDirectChannel;
+    queryDirectChannelSupport(memType, &supportsSharedMemType, &supportsAnyDirectChannel);
+
+    for (const SensorInfo& sensor : getSensorsList()) {
+        int32_t directChannelHandle = 0;
+        verifyRegisterDirectChannel(mem, &directChannelHandle, supportsSharedMemType,
+                                    supportsAnyDirectChannel);
+        verifyConfigure(sensor, memType, directChannelHandle, supportsAnyDirectChannel);
+        verifyUnregisterDirectChannel(&directChannelHandle, supportsAnyDirectChannel);
+    }
+}
+
+void SensorsAidlTest::verifyConfigure(const SensorInfo& sensor,
+                                      ISensors::SharedMemInfo::SharedMemType memType,
+                                      int32_t directChannelHandle, bool supportsAnyDirectChannel) {
+    SCOPED_TRACE(::testing::Message()
+                 << " handle=0x" << std::hex << std::setw(8) << std::setfill('0')
+                 << sensor.sensorHandle << std::dec << " type=" << static_cast<int>(sensor.type)
+                 << " name=" << sensor.name);
+
+    int32_t reportToken = 0;
+    if (isDirectChannelTypeSupported(sensor, memType)) {
+        // Verify that each rate level is properly supported
+        checkRateLevel(sensor, directChannelHandle, ISensors::RateLevel::NORMAL, &reportToken);
+        checkRateLevel(sensor, directChannelHandle, ISensors::RateLevel::FAST, &reportToken);
+        checkRateLevel(sensor, directChannelHandle, ISensors::RateLevel::VERY_FAST, &reportToken);
+        checkRateLevel(sensor, directChannelHandle, ISensors::RateLevel::STOP, &reportToken);
+
+        // Verify that a sensor handle of -1 is only acceptable when using RateLevel::STOP
+        ndk::ScopedAStatus status = configDirectReport(-1 /* sensorHandle */, directChannelHandle,
+                                                       ISensors::RateLevel::NORMAL, &reportToken);
+        ASSERT_EQ(status.getServiceSpecificError(), android::BAD_VALUE);
+
+        status = configDirectReport(-1 /* sensorHandle */, directChannelHandle,
+                                    ISensors::RateLevel::STOP, &reportToken);
+        ASSERT_TRUE(status.isOk());
+    } else {
+        // directChannelHandle will be -1 here, HAL should either reject it as a bad value if there
+        // is some level of direct channel report, otherwise return INVALID_OPERATION if direct
+        // channel is not supported at all
+        int error = supportsAnyDirectChannel ? EX_ILLEGAL_ARGUMENT : EX_UNSUPPORTED_OPERATION;
+        ndk::ScopedAStatus status = configDirectReport(sensor.sensorHandle, directChannelHandle,
+                                                       ISensors::RateLevel::NORMAL, &reportToken);
+        ASSERT_EQ(status.getExceptionCode(), error);
+    }
+}
+
 TEST_P(SensorsAidlTest, DirectChannelAshmem) {
-    // TODO(b/195593357): Implement this
+    verifyDirectChannel(ISensors::SharedMemInfo::SharedMemType::ASHMEM);
 }
 
 TEST_P(SensorsAidlTest, DirectChannelGralloc) {
-    // TODO(b/195593357): Implement this
+    verifyDirectChannel(ISensors::SharedMemInfo::SharedMemType::GRALLOC);
 }
 
 GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SensorsAidlTest);
diff --git a/tv/tuner/1.0/default/Lnb.cpp b/tv/tuner/1.0/default/Lnb.cpp
index 6025339..c770e91 100644
--- a/tv/tuner/1.0/default/Lnb.cpp
+++ b/tv/tuner/1.0/default/Lnb.cpp
@@ -33,9 +33,10 @@
 
 Lnb::~Lnb() {}
 
-Return<Result> Lnb::setCallback(const sp<ILnbCallback>& /* callback */) {
+Return<Result> Lnb::setCallback(const sp<ILnbCallback>& callback) {
     ALOGV("%s", __FUNCTION__);
 
+    mCallback = callback;
     return Result::SUCCESS;
 }
 
@@ -57,9 +58,16 @@
     return Result::SUCCESS;
 }
 
-Return<Result> Lnb::sendDiseqcMessage(const hidl_vec<uint8_t>& /* diseqcMessage */) {
+Return<Result> Lnb::sendDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
     ALOGV("%s", __FUNCTION__);
 
+    if (mCallback != nullptr) {
+        // The correct implementation should be to return the response from the
+        // device via onDiseqcMessage(). The below implementation is only to enable
+        // testing for LnbCallbacks.
+        ALOGV("[hidl] %s - this is for test purpose only, and must be replaced!", __FUNCTION__);
+        mCallback->onDiseqcMessage(diseqcMessage);
+    }
     return Result::SUCCESS;
 }
 
diff --git a/tv/tuner/1.0/default/Lnb.h b/tv/tuner/1.0/default/Lnb.h
index 1e97214..c14bbd8 100644
--- a/tv/tuner/1.0/default/Lnb.h
+++ b/tv/tuner/1.0/default/Lnb.h
@@ -57,6 +57,7 @@
   private:
     int mId;
     virtual ~Lnb();
+    sp<ILnbCallback> mCallback;
 };
 
 }  // namespace implementation
diff --git a/tv/tuner/1.1/default/Lnb.cpp b/tv/tuner/1.1/default/Lnb.cpp
index 044727f..5dd0147 100644
--- a/tv/tuner/1.1/default/Lnb.cpp
+++ b/tv/tuner/1.1/default/Lnb.cpp
@@ -33,9 +33,10 @@
 
 Lnb::~Lnb() {}
 
-Return<Result> Lnb::setCallback(const sp<ILnbCallback>& /* callback */) {
+Return<Result> Lnb::setCallback(const sp<ILnbCallback>& callback) {
     ALOGV("%s", __FUNCTION__);
 
+    mCallback = callback;
     return Result::SUCCESS;
 }
 
@@ -57,9 +58,16 @@
     return Result::SUCCESS;
 }
 
-Return<Result> Lnb::sendDiseqcMessage(const hidl_vec<uint8_t>& /* diseqcMessage */) {
+Return<Result> Lnb::sendDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) {
     ALOGV("%s", __FUNCTION__);
 
+    if (mCallback != nullptr) {
+        // The correct implementation should be to return the response from the
+        // device via onDiseqcMessage(). The below implementation is only to enable
+        // testing for LnbCallbacks.
+        ALOGV("[hidl] %s - this is for test purpose only, and must be replaced!", __FUNCTION__);
+        mCallback->onDiseqcMessage(diseqcMessage);
+    }
     return Result::SUCCESS;
 }
 
diff --git a/tv/tuner/1.1/default/Lnb.h b/tv/tuner/1.1/default/Lnb.h
index 70a8e41..b34ca39 100644
--- a/tv/tuner/1.1/default/Lnb.h
+++ b/tv/tuner/1.1/default/Lnb.h
@@ -51,6 +51,7 @@
   private:
     int mId;
     virtual ~Lnb();
+    sp<ILnbCallback> mCallback;
 };
 
 }  // namespace implementation
diff --git a/tv/tuner/aidl/default/Lnb.cpp b/tv/tuner/aidl/default/Lnb.cpp
index 35d2da6..f9343ae 100644
--- a/tv/tuner/aidl/default/Lnb.cpp
+++ b/tv/tuner/aidl/default/Lnb.cpp
@@ -34,9 +34,11 @@
 
 Lnb::~Lnb() {}
 
-::ndk::ScopedAStatus Lnb::setCallback(const std::shared_ptr<ILnbCallback>& /* in_callback */) {
+::ndk::ScopedAStatus Lnb::setCallback(const std::shared_ptr<ILnbCallback>& in_callback) {
     ALOGV("%s", __FUNCTION__);
 
+    mCallback = in_callback;
+
     return ::ndk::ScopedAStatus::ok();
 }
 
@@ -58,9 +60,17 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
-::ndk::ScopedAStatus Lnb::sendDiseqcMessage(const std::vector<uint8_t>& /* in_diseqcMessage */) {
+::ndk::ScopedAStatus Lnb::sendDiseqcMessage(const std::vector<uint8_t>& in_diseqcMessage) {
     ALOGV("%s", __FUNCTION__);
 
+    if (mCallback != nullptr) {
+        // The correct implementation should be to return the response from the
+        // device via onDiseqcMessage(). The below implementation is only to enable
+        // testing for LnbCallbacks.
+        ALOGV("[aidl] %s - this is for test purpose only, and must be replaced!", __FUNCTION__);
+        mCallback->onDiseqcMessage(in_diseqcMessage);
+    }
+
     return ::ndk::ScopedAStatus::ok();
 }
 
diff --git a/tv/tuner/aidl/default/Lnb.h b/tv/tuner/aidl/default/Lnb.h
index bfe3097..464f9a4 100644
--- a/tv/tuner/aidl/default/Lnb.h
+++ b/tv/tuner/aidl/default/Lnb.h
@@ -44,6 +44,7 @@
   private:
     int mId;
     virtual ~Lnb();
+    std::shared_ptr<ILnbCallback> mCallback;
 };
 
 }  // namespace tuner
diff --git a/wifi/hostapd/aidl/aidl_api/android.hardware.wifi.hostapd/current/android/hardware/wifi/hostapd/NetworkParams.aidl b/wifi/hostapd/aidl/aidl_api/android.hardware.wifi.hostapd/current/android/hardware/wifi/hostapd/NetworkParams.aidl
index ffe2f33..4554223 100644
--- a/wifi/hostapd/aidl/aidl_api/android.hardware.wifi.hostapd/current/android/hardware/wifi/hostapd/NetworkParams.aidl
+++ b/wifi/hostapd/aidl/aidl_api/android.hardware.wifi.hostapd/current/android/hardware/wifi/hostapd/NetworkParams.aidl
@@ -39,4 +39,5 @@
   android.hardware.wifi.hostapd.EncryptionType encryptionType;
   String passphrase;
   boolean isMetered;
+  byte[] vendorElements;
 }
diff --git a/wifi/hostapd/aidl/android/hardware/wifi/hostapd/NetworkParams.aidl b/wifi/hostapd/aidl/android/hardware/wifi/hostapd/NetworkParams.aidl
index df84eca..47d9e6f 100644
--- a/wifi/hostapd/aidl/android/hardware/wifi/hostapd/NetworkParams.aidl
+++ b/wifi/hostapd/aidl/android/hardware/wifi/hostapd/NetworkParams.aidl
@@ -44,4 +44,12 @@
      * CHARGEABLE_PUBLIC_NETWORK when set to true.
      */
     boolean isMetered;
+    /**
+     * Additional vendor specific elements for Beacon and Probe Response frames
+     * This parameter can be used to add additional vendor specific element(s) into
+     * the end of the Beacon and Probe Response frames. The format for these
+     * element(s) is a binary dump of the raw information elements (id+len+payload for
+     * one or more elements). Example: byte[]{ 221, 4, 17, 34, 51, 1 }
+     */
+    byte[] vendorElements;
 }
diff --git a/wifi/hostapd/aidl/vts/functional/VtsHalHostapdTargetTest.cpp b/wifi/hostapd/aidl/vts/functional/VtsHalHostapdTargetTest.cpp
index dad7085..cd7ff82 100644
--- a/wifi/hostapd/aidl/vts/functional/VtsHalHostapdTargetTest.cpp
+++ b/wifi/hostapd/aidl/vts/functional/VtsHalHostapdTargetTest.cpp
@@ -431,6 +431,7 @@
     EXPECT_TRUE(status.isOk());
 }
 
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(HostapdAidl);
 INSTANTIATE_TEST_SUITE_P(
     Hostapd, HostapdAidl,
     testing::ValuesIn(android::getAidlHalInstanceNames(IHostapd::descriptor)),
diff --git a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
index ed435e2..826d916 100644
--- a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
+++ b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
@@ -50,4 +50,5 @@
   oneway void onServiceDiscoveryResponse(in byte[] srcAddress, in char updateIndicator, in byte[] tlvs);
   oneway void onStaAuthorized(in byte[] srcAddress, in byte[] p2pDeviceAddress);
   oneway void onStaDeauthorized(in byte[] srcAddress, in byte[] p2pDeviceAddress);
+  oneway void onGroupFrequencyChanged(in String groupIfname, in int frequency);
 }
diff --git a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
index 4f7584d..6276a35 100644
--- a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
+++ b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
@@ -38,4 +38,5 @@
   oneway void onNetworkEapSimGsmAuthRequest(in android.hardware.wifi.supplicant.NetworkRequestEapSimGsmAuthParams params);
   oneway void onNetworkEapSimUmtsAuthRequest(in android.hardware.wifi.supplicant.NetworkRequestEapSimUmtsAuthParams params);
   oneway void onTransitionDisable(in android.hardware.wifi.supplicant.TransitionDisableIndication ind);
+  oneway void onServerCertificateAvailable(in int depth, in byte[] subject, in byte[] certHash, in byte[] certBlob);
 }
diff --git a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
index 43772af..9a0a924 100644
--- a/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
+++ b/wifi/supplicant/aidl/aidl_api/android.hardware.wifi.supplicant/current/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
@@ -38,4 +38,5 @@
   OCE = 2,
   SAE_PK = 4,
   WFD_R2 = 8,
+  TRUST_ON_FIRST_USE = 16,
 }
diff --git a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
index f0cabd6..2b58cc2 100644
--- a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
+++ b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantP2pIfaceCallback.aidl
@@ -205,4 +205,12 @@
      * @param p2pDeviceAddress P2P device address.
      */
     oneway void onStaDeauthorized(in byte[] srcAddress, in byte[] p2pDeviceAddress);
+
+    /**
+     * Used to indicate that operating frequency has changed for this P2P group interface.
+     *
+     * @param groupIfName Interface name of the group. (For ex: p2p-p2p0-1)
+     * @param frequency New operating frequency in MHz.
+     */
+    oneway void onGroupFrequencyChanged(in String groupIfname, in int frequency);
 }
diff --git a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
index c28b494..4024c35 100644
--- a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
+++ b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/ISupplicantStaNetworkCallback.aidl
@@ -62,4 +62,13 @@
      * Used to notify WPA3 transition disable.
      */
     oneway void onTransitionDisable(in TransitionDisableIndication ind);
+
+    /**
+     * Used to notify EAP certificate event.
+     *
+     * On receiving a server certifidate from TLS handshake, send this certificate
+     * to the framework for Trust On First Use.
+     */
+    oneway void onServerCertificateAvailable(
+            in int depth, in byte[] subject, in byte[] certHash, in byte[] certBlob);
 }
diff --git a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
index e174199..08006cf 100644
--- a/wifi/supplicant/aidl/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
+++ b/wifi/supplicant/aidl/android/hardware/wifi/supplicant/WpaDriverCapabilitiesMask.aidl
@@ -38,4 +38,8 @@
      * Wi-Fi Display R2
      */
     WFD_R2 = 1 << 3,
+    /**
+     * Trust On First Use
+     */
+    TRUST_ON_FIRST_USE = 1 << 4,
 }
diff --git a/wifi/supplicant/aidl/vts/functional/supplicant_p2p_iface_aidl_test.cpp b/wifi/supplicant/aidl/vts/functional/supplicant_p2p_iface_aidl_test.cpp
index 2f4f06d..10aab4d 100644
--- a/wifi/supplicant/aidl/vts/functional/supplicant_p2p_iface_aidl_test.cpp
+++ b/wifi/supplicant/aidl/vts/functional/supplicant_p2p_iface_aidl_test.cpp
@@ -159,6 +159,10 @@
         const std::vector<uint8_t>& /* p2pDeviceAddress */) override {
         return ndk::ScopedAStatus::ok();
     }
+    ::ndk::ScopedAStatus onGroupFrequencyChanged(const std::string& /* groupIfname */,
+                                                 int32_t /* frequency */) override {
+        return ndk::ScopedAStatus::ok();
+    }
 };
 
 class SupplicantP2pIfaceAidlTest : public testing::TestWithParam<std::string> {
@@ -620,6 +624,7 @@
         p2p_iface_->removeUpnpService(0 /* version */, upnpServiceName).isOk());
 }
 
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SupplicantP2pIfaceAidlTest);
 INSTANTIATE_TEST_SUITE_P(Supplicant, SupplicantP2pIfaceAidlTest,
                          testing::ValuesIn(android::getAidlHalInstanceNames(
                              ISupplicant::descriptor)),
diff --git a/wifi/supplicant/aidl/vts/functional/supplicant_sta_iface_aidl_test.cpp b/wifi/supplicant/aidl/vts/functional/supplicant_sta_iface_aidl_test.cpp
index f51c07f..6e6955f 100644
--- a/wifi/supplicant/aidl/vts/functional/supplicant_sta_iface_aidl_test.cpp
+++ b/wifi/supplicant/aidl/vts/functional/supplicant_sta_iface_aidl_test.cpp
@@ -769,6 +769,7 @@
     EXPECT_TRUE(sta_iface_->removeDppUri(peer_id).isOk());
 }
 
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SupplicantStaIfaceAidlTest);
 INSTANTIATE_TEST_SUITE_P(Supplicant, SupplicantStaIfaceAidlTest,
                          testing::ValuesIn(android::getAidlHalInstanceNames(
                              ISupplicant::descriptor)),
diff --git a/wifi/supplicant/aidl/vts/functional/supplicant_sta_network_aidl_test.cpp b/wifi/supplicant/aidl/vts/functional/supplicant_sta_network_aidl_test.cpp
index 66c8807..0a35f66 100644
--- a/wifi/supplicant/aidl/vts/functional/supplicant_sta_network_aidl_test.cpp
+++ b/wifi/supplicant/aidl/vts/functional/supplicant_sta_network_aidl_test.cpp
@@ -92,6 +92,12 @@
         TransitionDisableIndication /* ind */) override {
         return ndk::ScopedAStatus::ok();
     }
+    ::ndk::ScopedAStatus onServerCertificateAvailable(
+            int32_t /* depth */, const std::vector<uint8_t>& /* subject */,
+            const std::vector<uint8_t>& /* certHash */,
+            const std::vector<uint8_t>& /* certBlob */) override {
+        return ndk::ScopedAStatus::ok();
+    }
 };
 
 class SupplicantStaNetworkAidlTest
@@ -778,6 +784,7 @@
     EXPECT_NE(retrievedToken.size(), 0);
 }
 
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(SupplicantStaNetworkAidlTest);
 INSTANTIATE_TEST_SUITE_P(Supplicant, SupplicantStaNetworkAidlTest,
                          testing::ValuesIn(android::getAidlHalInstanceNames(
                              ISupplicant::descriptor)),