HAL interface for compilation and execution hints

The following AIDL types are added:
 - TokenValuePair
 - PrepareModelConfig
 - ExecutionConfig

The following AIDL methods are added:
 - IDevice::prepareModelWithConfig
 - IPreparedModel::executeSynchronouslyWithConfig
 - IPreparedModel::executeFencedWithConfig
 - IBurst::executeSynchronouslyWithConfig

The compilation and execution hints are being stored as a list of
token-value pairs as part of the PrepareModelConfig / ExecutionConfig.
And the PrepareModelConfig / ExecutionConfig parcelables are created in
order to make future extensions to the execution related interfaces
easier.

It is the drivers responsibility to verify the hints, and it is allowed
for the driver to ignore them.

Bug: 203248587
Test: neuralnetworks_utils_hal_aidl_test
Change-Id: I98240fd75089fc85cdfcaa0be28aab8a6f0dfca5
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index 8bd2fbe..cef76c6 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -45,12 +45,15 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
   private:
     const nn::SharedPreparedModel kPreparedModel;
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index 0a6ca3e..d7c43ef 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -65,8 +65,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index bdb5b54..337c132 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -49,18 +49,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index 1284721..3642bc6 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -50,15 +50,20 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
-    return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+    return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, hints,
+                                   extensionNameToPrefix);
 }
 
 nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
-    return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+    return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, hints,
+                                                   extensionNameToPrefix);
 }
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index b0c236e..620d040 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -143,7 +143,9 @@
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/,
         nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
-        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 00e7d22..b8055fc 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -59,7 +59,9 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming /*measure*/,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -94,19 +96,22 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& /*request*/,
-                             const std::vector<nn::SyncFence>& /*waitFor*/,
-                             nn::MeasureTiming /*measure*/,
-                             const nn::OptionalTimePoint& /*deadline*/,
-                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
-                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(
+        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
 }
 
 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming /*measure*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.0/utils/test/DeviceTest.cpp b/neuralnetworks/1.0/utils/test/DeviceTest.cpp
index 83e555f..9e9db16 100644
--- a/neuralnetworks/1.0/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.0/utils/test/DeviceTest.cpp
@@ -380,7 +380,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -399,7 +399,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -417,7 +417,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -435,7 +435,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -452,7 +452,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -469,7 +469,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -488,7 +488,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
index 7820c06..e03a98d 100644
--- a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
@@ -121,7 +121,7 @@
             .WillOnce(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -138,7 +138,7 @@
                                          V1_0::ErrorStatus::GENERAL_FAILURE)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -155,7 +155,7 @@
                     makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -171,7 +171,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -187,7 +187,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -205,7 +205,7 @@
     EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -218,7 +218,7 @@
     const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -235,7 +235,7 @@
             .WillRepeatedly(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -258,7 +258,7 @@
                                          V1_0::ErrorStatus::GENERAL_FAILURE)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -279,7 +279,7 @@
                     makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -299,7 +299,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -319,7 +319,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -341,7 +341,7 @@
     EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -358,7 +358,7 @@
     const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index d6bd36a..38ca138 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -64,8 +64,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index 3effa84..28f3276 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -143,7 +143,9 @@
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
         nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
-        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.1/utils/test/DeviceTest.cpp b/neuralnetworks/1.1/utils/test/DeviceTest.cpp
index 2248da6..8ab87bc 100644
--- a/neuralnetworks/1.1/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.1/utils/test/DeviceTest.cpp
@@ -390,7 +390,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -409,7 +409,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -427,7 +427,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -445,7 +445,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -462,7 +462,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -479,7 +479,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -498,7 +498,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
index ac9411c..1b28476 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h
@@ -170,13 +170,16 @@
     // See IBurst::execute for information on this method.
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     // See IBurst::createReusableExecution for information on this method.
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     // If fallback is not nullptr, this method will invoke the fallback function to try another
     // execution path if the packet could not be sent. Otherwise, failing to send the packet will
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index c3348aa..4f13adc 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -37,7 +37,7 @@
 GeneralResult<Operand::ExtraParams> unvalidatedConvert(
         const hal::V1_2::Operand::ExtraParams& extraParams);
 GeneralResult<Model> unvalidatedConvert(const hal::V1_2::Model& model);
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
         const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
 GeneralResult<OutputShape> unvalidatedConvert(const hal::V1_2::OutputShape& outputShape);
 GeneralResult<MeasureTiming> unvalidatedConvert(const hal::V1_2::MeasureTiming& measureTiming);
@@ -78,7 +78,7 @@
         const nn::Operand::ExtraParams& extraParams);
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
 nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
-        const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix);
+        const nn::ExtensionNameAndPrefix& extensionNameAndPrefix);
 nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
 nn::GeneralResult<MeasureTiming> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
 nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index e7ac172..d92cf50 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -83,8 +83,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 1150e5e..72a5b2f 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -49,18 +49,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
diff --git a/neuralnetworks/1.2/utils/src/Burst.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp
index 911fbfa..23e8070 100644
--- a/neuralnetworks/1.2/utils/src/Burst.cpp
+++ b/neuralnetworks/1.2/utils/src/Burst.cpp
@@ -305,8 +305,9 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // This is the first point when we know an execution is occurring, so begin to collect
     // systraces. Note that the first point we can begin collecting systraces in
     // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
@@ -317,7 +318,7 @@
     // fall back to another execution path
     if (!compliantVersion(request).ok()) {
         // fallback to another execution path if the packet could not be sent
-        return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+        return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {});
     }
 
     // ensure that request is ready for IPC
@@ -346,7 +347,7 @@
     // send request packet
     const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
     const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
-        return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+        return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {});
     };
     return executeInternal(requestPacket, relocation, fallback);
 }
@@ -354,14 +355,17 @@
 // See IBurst::createReusableExecution for information on this method.
 nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution");
 
     // if the request is valid but of a higher version than what's supported in burst execution,
     // fall back to another execution path
     if (!compliantVersion(request).ok()) {
         // fallback to another execution path if the packet could not be sent
-        return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+        return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, {},
+                                                       {});
     }
 
     // ensure that request is ready for IPC
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 838d9c4..62ec2ed 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -212,9 +212,9 @@
     };
 }
 
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
         const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
-    return Model::ExtensionNameAndPrefix{
+    return ExtensionNameAndPrefix{
             .name = extensionNameAndPrefix.name,
             .prefix = extensionNameAndPrefix.prefix,
     };
@@ -495,7 +495,7 @@
 }
 
 nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
-        const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
+        const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return Model::ExtensionNameAndPrefix{
             .name = extensionNameAndPrefix.name,
             .prefix = extensionNameAndPrefix.prefix,
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index e7acecd..3a58d2c 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -236,7 +236,9 @@
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/,
         nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache,
-        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index 6df3df3..feb3951 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -91,7 +91,9 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -123,19 +125,22 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& /*request*/,
-                             const std::vector<nn::SyncFence>& /*waitFor*/,
-                             nn::MeasureTiming /*measure*/,
-                             const nn::OptionalTimePoint& /*deadline*/,
-                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
-                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(
+        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
 }
 
 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.2/utils/test/DeviceTest.cpp b/neuralnetworks/1.2/utils/test/DeviceTest.cpp
index 1dc6285..0d8c141 100644
--- a/neuralnetworks/1.2/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.2/utils/test/DeviceTest.cpp
@@ -636,7 +636,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -655,7 +655,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -673,7 +673,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -691,7 +691,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -708,7 +708,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -725,7 +725,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -746,7 +746,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
index 5e2ad79..a5ec9d3 100644
--- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
@@ -154,7 +154,7 @@
             .WillOnce(Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -172,7 +172,7 @@
                     makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -189,7 +189,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -206,7 +206,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -224,7 +224,7 @@
                                                        V1_0::ErrorStatus::NONE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -243,7 +243,7 @@
                                                        kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -261,7 +261,7 @@
                     V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -278,7 +278,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -295,7 +295,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -314,7 +314,7 @@
     EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -328,7 +328,7 @@
             PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -347,7 +347,7 @@
                     Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -371,7 +371,7 @@
                     makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -392,7 +392,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -413,7 +413,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -436,7 +436,7 @@
                     V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -461,7 +461,7 @@
                                                        kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -483,7 +483,7 @@
                     V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -504,7 +504,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -525,7 +525,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -548,7 +548,7 @@
     EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -566,7 +566,7 @@
             PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index c3c6fc4..cf5e5ea0 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -66,8 +66,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index 480438d..124cc43 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -48,18 +48,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index a1d414c..09e9d80 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -396,7 +396,7 @@
 }
 
 nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> unvalidatedConvert(
-        const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
+        const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) {
     return V1_2::utils::unvalidatedConvert(extensionNameAndPrefix);
 }
 
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index 9517fda..824cec6 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -187,7 +187,9 @@
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index ce977e5..b92f877 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -135,8 +135,9 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -174,10 +175,13 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
-                             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-                             const nn::OptionalDuration& loopTimeoutDuration,
-                             const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const nn::OptionalDuration& timeoutDurationAfterFence,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -230,7 +234,9 @@
 
 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
diff --git a/neuralnetworks/1.3/utils/test/DeviceTest.cpp b/neuralnetworks/1.3/utils/test/DeviceTest.cpp
index 7eba4bc..6f48837 100644
--- a/neuralnetworks/1.3/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/1.3/utils/test/DeviceTest.cpp
@@ -658,7 +658,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -677,7 +677,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -695,7 +695,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -713,7 +713,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -730,7 +730,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -747,7 +747,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -768,7 +768,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
index 6dbbd6b..51b5d29 100644
--- a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
@@ -182,7 +182,7 @@
             .WillOnce(Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -200,7 +200,7 @@
                     makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -217,7 +217,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -234,7 +234,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -252,7 +252,7 @@
                                                        V1_3::ErrorStatus::NONE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -271,7 +271,7 @@
                                                        kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -289,7 +289,7 @@
                     V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -306,7 +306,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -323,7 +323,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -344,7 +344,7 @@
             .WillOnce(InvokeWithoutArgs(ret));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -366,7 +366,7 @@
             .WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -396,7 +396,7 @@
             .WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -422,7 +422,7 @@
                     makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -439,7 +439,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -456,7 +456,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -475,7 +475,7 @@
                     Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -499,7 +499,7 @@
                     makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -520,7 +520,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -541,7 +541,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -564,7 +564,7 @@
                     V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -589,7 +589,7 @@
                                                        kNoTiming)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -611,7 +611,7 @@
                     V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -628,7 +628,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -649,7 +649,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -674,7 +674,7 @@
             .WillOnce(InvokeWithoutArgs(ret));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -702,7 +702,7 @@
                     Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -738,7 +738,7 @@
             .WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -768,7 +768,7 @@
                     makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -789,7 +789,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -810,7 +810,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl
new file mode 100644
index 0000000..cb85743
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/ExecutionConfig.aidl
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+//     the interface (from the latest frozen version), the build system will
+//     prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable ExecutionConfig {
+  boolean measureTiming;
+  long loopTimeoutDurationNs;
+  android.hardware.neuralnetworks.TokenValuePair[] executionHints;
+  android.hardware.neuralnetworks.ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
index eb3d0b0..461fdfa 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IBurst.aidl
@@ -36,4 +36,5 @@
 interface IBurst {
   android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
   void releaseMemoryResource(in long memoryIdentifierToken);
+  android.hardware.neuralnetworks.ExecutionResult executeSynchronouslyWithConfig(in android.hardware.neuralnetworks.Request request, in long[] memoryIdentifierTokens, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs);
 }
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
index c9c67f2..c0fba47 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IDevice.aidl
@@ -43,6 +43,7 @@
   String getVersionString();
   void prepareModel(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.ExecutionPreference preference, in android.hardware.neuralnetworks.Priority priority, in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
   void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache, in ParcelFileDescriptor[] dataCache, in byte[] token, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
+  void prepareModelWithConfig(in android.hardware.neuralnetworks.Model model, in android.hardware.neuralnetworks.PrepareModelConfig config, in android.hardware.neuralnetworks.IPreparedModelCallback callback);
   const int BYTE_SIZE_OF_CACHE_TOKEN = 32;
   const int MAX_NUMBER_OF_CACHE_FILES = 32;
   const int EXTENSION_TYPE_HIGH_BITS_PREFIX = 15;
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
index f899567..fb0c372 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/IPreparedModel.aidl
@@ -37,7 +37,9 @@
   android.hardware.neuralnetworks.ExecutionResult executeSynchronously(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs);
   android.hardware.neuralnetworks.FencedExecutionResult executeFenced(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in boolean measureTiming, in long deadlineNs, in long loopTimeoutDurationNs, in long durationNs);
   android.hardware.neuralnetworks.IBurst configureExecutionBurst();
-  android.hardware.neuralnetworks.IExecution createReusableExecution(in android.hardware.neuralnetworks.Request request, in boolean measureTiming, in long loopTimeoutDurationNs);
+  android.hardware.neuralnetworks.IExecution createReusableExecution(in android.hardware.neuralnetworks.Request request, in android.hardware.neuralnetworks.ExecutionConfig config);
+  android.hardware.neuralnetworks.ExecutionResult executeSynchronouslyWithConfig(in android.hardware.neuralnetworks.Request request, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs);
+  android.hardware.neuralnetworks.FencedExecutionResult executeFencedWithConfig(in android.hardware.neuralnetworks.Request request, in ParcelFileDescriptor[] waitFor, in android.hardware.neuralnetworks.ExecutionConfig config, in long deadlineNs, in long durationNs);
   const long DEFAULT_LOOP_TIMEOUT_DURATION_NS = 2000000000;
   const long MAXIMUM_LOOP_TIMEOUT_DURATION_NS = 15000000000;
 }
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl
new file mode 100644
index 0000000..85c924f
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/PrepareModelConfig.aidl
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+//     the interface (from the latest frozen version), the build system will
+//     prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable PrepareModelConfig {
+  android.hardware.neuralnetworks.ExecutionPreference preference;
+  android.hardware.neuralnetworks.Priority priority;
+  long deadlineNs;
+  ParcelFileDescriptor[] modelCache;
+  ParcelFileDescriptor[] dataCache;
+  byte[] cacheToken;
+  android.hardware.neuralnetworks.TokenValuePair[] compilationHints;
+  android.hardware.neuralnetworks.ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl
new file mode 100644
index 0000000..e477d6e
--- /dev/null
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/TokenValuePair.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE.                          //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a snapshot of an AIDL file. Do not edit it manually. There are
+// two cases:
+// 1). this is a frozen version file - do not edit this in any case.
+// 2). this is a 'current' file. If you make a backwards compatible change to
+//     the interface (from the latest frozen version), the build system will
+//     prompt you to update this file with `m <name>-update-api`.
+//
+// You must not make a backward incompatible change to any AIDL file built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.hardware.neuralnetworks;
+@VintfStability
+parcelable TokenValuePair {
+  int token;
+  byte[] value;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl
new file mode 100644
index 0000000..00f1e11
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExecutionConfig.aidl
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+import android.hardware.neuralnetworks.ExtensionNameAndPrefix;
+import android.hardware.neuralnetworks.TokenValuePair;
+
+/**
+ * A type that is used to represent all configuration related to
+ * an Execution.
+ */
+@VintfStability
+parcelable ExecutionConfig {
+    /**
+     * Specifies whether or not to measure duration of the execution.
+     * For {@link IPreparedModel::executeSynchronouslyWithConfig}, the duration runs from the time
+     * the driver sees the corresponding call to the execute function to the time the driver returns
+     * from the function. For {@link IPreparedModel::executeFencedWithConfig}, please refer to
+     * {@link IPreparedModelCallback} for details.
+     */
+    boolean measureTiming;
+    /**
+     * The maximum amount of time in nanoseconds that should be spent
+     * executing a {@link OperationType::WHILE} operation. If a loop
+     * condition model does not output false within this duration,
+     * the execution must be aborted. If -1 is provided, the maximum
+     * amount of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}.
+     * Other negative values are invalid. When provided, the duration
+     * must not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
+     */
+    long loopTimeoutDurationNs;
+    /**
+     * A vector of token / value pairs represent vendor specific
+     * execution hints or metadata. The provided TokenValuePairs must not
+     * contain the same token twice. The driver must validate the
+     * data and ignore invalid hints. It is up to the driver to
+     * decide whether to respect the provided hints or not.
+     */
+    TokenValuePair[] executionHints;
+    /**
+     * The mapping between extension names and prefixes of token values.
+     * The driver must ignore the corresponding execution hint, if
+     * the extension is not supported.
+     */
+    ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
index 20109bd..9f70a53 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/Extension.aidl
@@ -20,6 +20,10 @@
 
 /**
  * Information about an extension.
+ *
+ * The extension can provide zero or more operation types (which are not enumerated), zero or more
+ * operand types (which are enumerated in {@link Extension::operandTypes}, and compilation and
+ * execution hints (which are not enumerated).
  */
 @VintfStability
 parcelable Extension {
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
index 29be93f..6c296e0 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.aidl
@@ -17,7 +17,8 @@
 package android.hardware.neuralnetworks;
 
 /**
- * The mapping between extension names and prefixes of operand and operation type values.
+ * The mapping between extension names and prefixes of values like operand and operation type, and
+ * token in {@link TokenValuePair}.
  *
  * An operand or operation whose numeric type value is above {@link IDevice::OPERAND_TYPE_BASE_MAX}
  * or {@link IDevice::OPERATION_TYPE_BASE_MAX} respectively should be interpreted as an extension
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
index b089c49..a05a7fb 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IBurst.aidl
@@ -17,6 +17,7 @@
 package android.hardware.neuralnetworks;
 
 import android.hardware.neuralnetworks.ErrorStatus;
+import android.hardware.neuralnetworks.ExecutionConfig;
 import android.hardware.neuralnetworks.ExecutionResult;
 import android.hardware.neuralnetworks.Request;
 
@@ -68,6 +69,8 @@
      *
      * Only a single execution on a given burst object may be active at any time.
      *
+     * Also see {@link IBurst::executeSynchronouslyWithConfig}.
+     *
      * @param request The input and output information on which the prepared model is to be
      *                executed.
      * @param memoryIdentifierTokens A list of tokens where each token is a non-negative number
@@ -117,4 +120,13 @@
      *     - INVALID_ARGUMENT if one of the input arguments is invalid
      */
     void releaseMemoryResource(in long memoryIdentifierToken);
+
+    /**
+     * For detailed specification, please refer to {@link IBurst::executeSynchronously}. The
+     * difference between the two methods is that executeSynchronouslyWithConfig takes {@link
+     * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+     * more configuration parameters than are passed to executeSynchronously.
+     */
+    ExecutionResult executeSynchronouslyWithConfig(in Request request,
+            in long[] memoryIdentifierTokens, in ExecutionConfig config, in long deadlineNs);
 }
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
index 72e2623..821b9fe 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IDevice.aidl
@@ -28,6 +28,7 @@
 import android.hardware.neuralnetworks.IPreparedModelParcel;
 import android.hardware.neuralnetworks.Model;
 import android.hardware.neuralnetworks.NumberOfCacheFiles;
+import android.hardware.neuralnetworks.PrepareModelConfig;
 import android.hardware.neuralnetworks.Priority;
 
 /**
@@ -148,7 +149,7 @@
      *
      * If the device reports that caching is not supported, the user may avoid calling
      * IDevice::prepareModelFromCache or providing cache file descriptors to
-     * IDevice::prepareModel.
+     * IDevice::prepareModel or IDevice::prepareModelWithConfig.
      *
      * @return NumberOfCacheFiles structure indicating how many files for model and data cache the
      *     driver needs to cache a single prepared model. It must be less than or equal to
@@ -302,6 +303,8 @@
      *
      * Multiple threads may call prepareModel on the same model concurrently.
      *
+     * Also see {@link IDevice::prepareModelWithConfig}.
+     *
      * @param model The model to be prepared for execution.
      * @param preference Indicates the intended execution behavior of a prepared model.
      * @param priority The priority of the prepared model relative to other prepared models owned by
@@ -403,17 +406,17 @@
      * @param modelCache A vector of file descriptors for the security-sensitive cache. The length
      *                   of the vector must match the numModelCache returned from
      *                   getNumberOfCacheFilesNeeded. The cache file descriptors will be provided in
-     *                   the same order as with prepareModel.
+     *                   the same order as with prepareModel or prepareModelWithConfig.
      * @param dataCache A vector of file descriptors for the constants' cache. The length of the
      *                  vector must match the numDataCache returned from
      *                  getNumberOfCacheFilesNeeded. The cache file descriptors will be provided in
-     *                  the same order as with prepareModel.
+     *                  the same order as with prepareModel or prepareModelWithConfig.
      * @param token A caching token of length BYTE_SIZE_OF_CACHE_TOKEN identifying the prepared
      *              model. It is the same token provided when saving the cache files with
-     *              prepareModel. Tokens should be chosen to have a low rate of collision for a
-     *              particular application. The driver cannot detect a collision; a collision will
-     *              result in a failed execution or in a successful execution that produces
-     *              incorrect output values.
+     *              prepareModel or prepareModelWithConfig. Tokens should be chosen to have a low
+     *              rate of collision for a particular application. The driver cannot detect a
+     *              collision; a collision will result in a failed execution or in a successful
+     *              execution that produces incorrect output values.
      * @param callback A callback object used to return the error status of preparing the model for
      *                 execution and the prepared model if successful, nullptr otherwise. The
      *                 callback object's notify function must be called exactly once, even if the
@@ -429,4 +432,28 @@
     void prepareModelFromCache(in long deadlineNs, in ParcelFileDescriptor[] modelCache,
             in ParcelFileDescriptor[] dataCache, in byte[] token,
             in IPreparedModelCallback callback);
+
+    /**
+     * For detailed specification, please refer to {@link IDevice::prepareModel}. The only
+     * difference between the two methods is that prepareModelWithConfig takes {@link
+     * PrepareModelConfig} instead of standalone configuration parameters, which allows vendor
+     * specific compilation metadata to be passed.
+     *
+     * @param model The model to be prepared for execution.
+     * @param config Configuration parameters to prepare the model.
+     * @param callback A callback object used to return the error status of preparing the model for
+     *                 execution and the prepared model if successful, nullptr otherwise. The
+     *                 callback object's notify function must be called exactly once, even if the
+     *                 model could not be prepared.
+     * @throws ServiceSpecificException with one of the following ErrorStatus values:
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if one of the input arguments related to preparing the model is
+     *       invalid
+     *     - MISSED_DEADLINE_* if the preparation is aborted because the model cannot be prepared by
+     *       the deadline
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+     */
+    void prepareModelWithConfig(
+            in Model model, in PrepareModelConfig config, in IPreparedModelCallback callback);
 }
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
index 79053e5..949804e 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/IPreparedModel.aidl
@@ -18,6 +18,7 @@
 
 import android.hardware.common.NativeHandle;
 import android.hardware.neuralnetworks.ErrorStatus;
+import android.hardware.neuralnetworks.ExecutionConfig;
 import android.hardware.neuralnetworks.ExecutionResult;
 import android.hardware.neuralnetworks.FencedExecutionResult;
 import android.hardware.neuralnetworks.IBurst;
@@ -68,6 +69,8 @@
      * Any number of calls to the execute* functions, in any combination, may be made concurrently,
      * even on the same IPreparedModel object.
      *
+     * Also see {@link IPreparedModel::executeSynchronouslyWithConfig}.
+     *
      * @param request The input and output information on which the prepared model is to be
      *                executed.
      * @param measure Specifies whether or not to measure duration of the execution. The duration
@@ -134,6 +137,8 @@
      * Any number of calls to the execute* functions, in any combination, may be made concurrently,
      * even on the same IPreparedModel object.
      *
+     * Also see {@link IPreparedModel::executeFencedWithConfig}.
+     *
      * @param request The input and output information on which the prepared model is to be
      *                executed. The outputs in the request must have fully specified dimensions.
      * @param waitFor A vector of sync fence file descriptors. Execution must not start until all
@@ -201,15 +206,7 @@
      *
      * @param request The input and output information on which the prepared model is to be
      *                executed.
-     * @param measure Specifies whether or not to measure duration of the execution.
-     * @param loopTimeoutDurationNs The maximum amount of time in nanoseconds that should be spent
-     *                              executing a {@link OperationType::WHILE} operation. If a loop
-     *                              condition model does not output false within this duration, the
-     *                              computation performed on the returned reusable execution object
-     *                              must be aborted. If -1 is provided, the maximum amount
-     *                              of time is {@link DEFAULT_LOOP_TIMEOUT_DURATION_NS}. Other
-     *                              negative values are invalid. When provided, the duration must
-     *                              not exceed {@link MAXIMUM_LOOP_TIMEOUT_DURATION_NS}.
+     * @param config Specifies the execution configuration parameters.
      * @return execution An IExecution object representing a reusable execution that has been
      *                   specialized for a fixed request.
      * @throws ServiceSpecificException with one of the following ErrorStatus values:
@@ -218,6 +215,64 @@
      *     - INVALID_ARGUMENT if one of the input arguments is invalid
      *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
      */
-    IExecution createReusableExecution(
-            in Request request, in boolean measureTiming, in long loopTimeoutDurationNs);
+    IExecution createReusableExecution(in Request request, in ExecutionConfig config);
+
+    /**
+     * For detailed specification, please refer to {@link IPreparedModel::executeSynchronously}. The
+     * difference between the two methods is that executeSynchronouslyWithConfig takes {@link
+     * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+     * more configuration parameters than are passed to executeSynchronously.
+     *
+     * @param request The input and output information on which the prepared model is to be
+     *                executed.
+     * @param config Specifies the execution configuration parameters.
+     * @param deadlineNs The time by which the execution is expected to complete. The time is
+     *                   measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
+     *                   &ts) or ::android::base::boot_clock). If the execution cannot be finished
+     *                   by the deadline, the execution may be aborted. Passing -1 means the
+     *                   deadline is omitted. Other negative valueggs are invalid.
+     * @return ExecutionResult parcelable, containing the status of the execution, output shapes and
+     *     timing information.
+     *     - MISSED_DEADLINE_* if the execution is aborted because it cannot be completed by the
+     *       deadline
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+     */
+    ExecutionResult executeSynchronouslyWithConfig(
+            in Request request, in ExecutionConfig config, in long deadlineNs);
+
+    /**
+     * For detailed specification, please refer to {@link IPreparedModel::executeFenced}. The
+     * difference between the two methods is that executeFencedWithConfig takes {@link
+     * ExecutionConfig} instead of a list of configuration parameters, and ExecutionConfig contains
+     * more configuration parameters than are passed to executeFenced.
+     *
+     * @param request The input and output information on which the prepared model is to be
+     *                executed. The outputs in the request must have fully specified dimensions.
+     * @param waitFor A vector of sync fence file descriptors. Execution must not start until all
+     *                sync fences have been signaled.
+     * @param config Specifies the execution configuration parameters.
+     * @param deadlineNs The time by which the execution is expected to complete. The time is
+     *                   measured in nanoseconds since boot (as from clock_gettime(CLOCK_BOOTTIME,
+     *                   &ts) or ::android::base::boot_clock). If the execution cannot be finished
+     *                   by the deadline, the execution may be aborted. Passing -1 means the
+     *                   deadline is omitted. Other negative values are invalid.
+     * @param durationNs The length of time in nanoseconds within which the execution is expected to
+     *                   complete after all sync fences in waitFor are signaled. If the execution
+     *                   cannot be finished within the duration, the execution may be aborted.
+     *                   Passing -1 means the duration is omitted. Other negative values are
+     *                   invalid.
+     * @return The FencedExecutionResult parcelable, containing IFencedExecutionCallback and the
+     *         sync fence.
+     * @throws ServiceSpecificException with one of the following ErrorStatus values:
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if one of the input arguments is invalid, including fences in error
+     *       states.
+     *     - MISSED_DEADLINE_* if the execution is aborted because it cannot be completed by the
+     *       deadline
+     *     - RESOURCE_EXHAUSTED_* if the task was aborted by the driver
+     */
+    FencedExecutionResult executeFencedWithConfig(in Request request,
+            in ParcelFileDescriptor[] waitFor, in ExecutionConfig config, in long deadlineNs,
+            in long durationNs);
 }
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl
new file mode 100644
index 0000000..96df968
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/PrepareModelConfig.aidl
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+import android.hardware.neuralnetworks.ExecutionPreference;
+import android.hardware.neuralnetworks.ExtensionNameAndPrefix;
+import android.hardware.neuralnetworks.Priority;
+import android.hardware.neuralnetworks.TokenValuePair;
+
+/**
+ * A type that is used to represent all configuration needed to
+ * prepare a model.
+ */
+@VintfStability
+parcelable PrepareModelConfig {
+    /**
+     * Indicates the intended execution behavior of a prepared model.
+     */
+    ExecutionPreference preference;
+    /**
+     * The priority of the prepared model relative to other prepared
+     * models owned by the client.
+     */
+    Priority priority;
+    /**
+     * The time by which the model is expected to be prepared. The
+     * time is measured in nanoseconds since boot (as from
+     * clock_gettime(CLOCK_BOOTTIME, &ts) or
+     * ::android::base::boot_clock). If the model cannot be prepared
+     * by the deadline, the preparation may be aborted. Passing -1
+     * means the deadline is omitted. Other negative values are
+     * invalid.
+     */
+    long deadlineNs;
+    /**
+     * A vector of file descriptors for the security-sensitive cache.
+     * The length of the vector must either be 0 indicating that
+     * caching information is not provided, or match the
+     * numModelCache returned from IDevice::getNumberOfCacheFilesNeeded. The
+     * cache file descriptors will be provided in the same order when
+     * retrieving the preparedModel from cache files with
+     * IDevice::prepareModelFromCache.
+     */
+    ParcelFileDescriptor[] modelCache;
+    /**
+     * A vector of file descriptors for the constants' cache. The
+     * length of the vector must either be 0 indicating that caching
+     * information is not provided, or match the numDataCache
+     * returned from IDevice::getNumberOfCacheFilesNeeded. The cache file
+     * descriptors will be provided in the same order when retrieving
+     * the preparedModel from cache files with IDevice::prepareModelFromCache.
+     */
+    ParcelFileDescriptor[] dataCache;
+    /**
+     * A caching token of length IDevice::BYTE_SIZE_OF_CACHE_TOKEN identifying
+     * the prepared model. The same token will be provided when
+     * retrieving the prepared model from the cache files with
+     * IDevice::prepareModelFromCache.  Tokens should be chosen to have a low
+     * rate of collision for a particular application. The driver
+     * cannot detect a collision; a collision will result in a failed
+     * execution or in a successful execution that produces incorrect
+     * output values. If both modelCache and dataCache are empty
+     * indicating that caching information is not provided, this
+     * token must be ignored.
+     */
+    byte[] cacheToken;
+    /**
+     * A vector of token / value pairs represent vendor specific
+     * compilation hints or metadata. The provided TokenValuePairs must not
+     * contain the same token twice. The driver must validate the
+     * data and ignore invalid hints. It is up to the driver to
+     * decide whether to respect the provided hints or not.
+     */
+    TokenValuePair[] compilationHints;
+    /**
+     * The mapping between extension names and prefixes of token values.
+     * The driver must ignore the corresponding compilation hint, if
+     * the extension is not supported.
+     */
+    ExtensionNameAndPrefix[] extensionNameToPrefix;
+}
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl
new file mode 100644
index 0000000..ec665b4
--- /dev/null
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/TokenValuePair.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks;
+
+/**
+ * A type that is used to represent a token / byte array data pair.
+ */
+@VintfStability
+parcelable TokenValuePair {
+    /**
+     * A 32bit integer token. The token is created by combining the
+     * extension prefix and enum defined within the extension.
+     * The low {@link IDevice::EXTENSION_TYPE_LOW_BITS_TYPE} bits of the value
+     * correspond to the hint within the extension and the high
+     * {@link IDevice::EXTENSION_TYPE_HIGH_BITS_PREFIX} bits encode the "prefix", which maps
+     * uniquely to the extension name. The sign bit is always 0.
+     *
+     * For example, if a token value is 0x7AAA000B and the corresponding
+     * {@link ExtensionNameAndPrefix} contains an entry with prefix=0x7AAA and
+     * name="vendor.test.test_extension", then the token should be interpreted as the hint
+     * 0x000B of the extension named vendor.test.test_extension.
+     */
+    int token;
+    /**
+     * A byte array containing the raw data.
+     */
+    byte[] value;
+}
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
index 0cc78d4..f2e6e75 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -86,10 +86,12 @@
                 GUARDED_BY(mMutex);
     };
 
+    // featureLevel is for testing purposes.
     static nn::GeneralResult<std::shared_ptr<const Burst>> create(
-            std::shared_ptr<aidl_hal::IBurst> burst);
+            std::shared_ptr<aidl_hal::IBurst> burst, nn::Version featureLevel);
 
-    Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst);
+    Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst,
+          nn::Version featureLevel);
 
     // See IBurst::cacheMemory for information.
     OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
@@ -97,23 +99,29 @@
     // See IBurst::execute for information.
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     // See IBurst::createReusableExecution for information.
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
             const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
             bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
             const hal::utils::RequestRelocation& relocation) const;
 
   private:
     mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
     const std::shared_ptr<aidl_hal::IBurst> kBurst;
     const std::shared_ptr<MemoryCache> kMemoryCache;
+    const nn::Version kFeatureLevel;
 };
 
 }  // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 477b311..af58715 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -46,6 +46,10 @@
 #include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
 #include <aidl/android/hardware/neuralnetworks/Timing.h>
 
+#ifdef NN_AIDL_V4_OR_ABOVE
+#include <aidl/android/hardware/neuralnetworks/TokenValuePair.h>
+#endif  // NN_AIDL_V4_OR_ABOVE
+
 #include <android/binder_auto_utils.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
@@ -74,7 +78,7 @@
         const aidl_hal::SymmPerChannelQuantParams& symmPerChannelQuantParams);
 GeneralResult<Operation> unvalidatedConvert(const aidl_hal::Operation& operation);
 GeneralResult<Model> unvalidatedConvert(const aidl_hal::Model& model);
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
         const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix);
 GeneralResult<Model::OperandValues> unvalidatedConvert(const std::vector<uint8_t>& operandValues);
 GeneralResult<Model::Subgraph> unvalidatedConvert(const aidl_hal::Subgraph& subgraph);
@@ -97,6 +101,10 @@
         const aidl_hal::ExtensionOperandTypeInformation& operandTypeInformation);
 GeneralResult<SharedHandle> unvalidatedConvert(const ndk::ScopedFileDescriptor& handle);
 
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<TokenValuePair> unvalidatedConvert(const aidl_hal::TokenValuePair& tokenValuePair);
+#endif  // NN_AIDL_V4_OR_ABOVE
+
 GeneralResult<std::vector<Operation>> unvalidatedConvert(
         const std::vector<aidl_hal::Operation>& operations);
 
@@ -116,6 +124,14 @@
 
 GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
 GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
+GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+        const std::vector<aidl_hal::ExtensionNameAndPrefix>& extensionNameAndPrefix);
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<std::vector<TokenValuePair>> convert(
+        const std::vector<aidl_hal::TokenValuePair>& metaData);
+#endif  // NN_AIDL_V4_OR_ABOVE
+
 GeneralResult<std::vector<OutputShape>> convert(
         const std::vector<aidl_hal::OutputShape>& outputShapes);
 GeneralResult<std::vector<SharedHandle>> convert(
@@ -152,7 +168,7 @@
 nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(
         const nn::Model::OperandValues& operandValues);
 nn::GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
-        const nn::Model::ExtensionNameAndPrefix& extensionNameToPrefix);
+        const nn::ExtensionNameAndPrefix& extensionNameToPrefix);
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
 nn::GeneralResult<Priority> unvalidatedConvert(const nn::Priority& priority);
 nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request);
@@ -166,6 +182,10 @@
 nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
 nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
 
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<TokenValuePair> unvalidatedConvert(const nn::TokenValuePair& tokenValuePair);
+#endif  // NN_AIDL_V4_OR_ABOVE
+
 nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
 nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
@@ -190,6 +210,13 @@
 nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
         const std::vector<nn::SyncFence>& syncFences);
 nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);
+nn::GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix);
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<std::vector<TokenValuePair>> convert(
+        const std::vector<nn::TokenValuePair>& metaData);
+#endif  // NN_AIDL_V4_OR_ABOVE
 
 nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
 
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
index d558f66..615c6de 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Device.h
@@ -42,6 +42,7 @@
     struct PrivateConstructorTag {};
 
   public:
+    // featureLevel is for testing purposes.
     static nn::GeneralResult<std::shared_ptr<const Device>> create(
             std::string name, std::shared_ptr<aidl_hal::IDevice> device, nn::Version featureLevel);
 
@@ -67,8 +68,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
index 205d428..cacdc26 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
@@ -63,7 +63,9 @@
 
 #ifdef NN_AIDL_V4_OR_ABOVE
 #include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <aidl/android/hardware/neuralnetworks/ExecutionConfig.h>
 #include <aidl/android/hardware/neuralnetworks/IExecution.h>
+#include <aidl/android/hardware/neuralnetworks/PrepareModelConfig.h>
 #endif  // NN_AIDL_V4_OR_ABOVE
 
 namespace android::nn {
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
index e66507a..9375c1d 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/InvalidDevice.h
@@ -53,6 +53,9 @@
             const std::vector<ndk::ScopedFileDescriptor>& dataCache,
             const std::vector<uint8_t>& token,
             const std::shared_ptr<IPreparedModelCallback>& callback) override;
+    ndk::ScopedAStatus prepareModelWithConfig(
+            const Model& model, const PrepareModelConfig& config,
+            const std::shared_ptr<IPreparedModelCallback>& callback) override;
     ndk::ScopedAStatus prepareModelFromCache(
             int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
             const std::vector<ndk::ScopedFileDescriptor>& dataCache,
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 24cd681..cb6a85b 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -40,6 +40,7 @@
     struct PrivateConstructorTag {};
 
   public:
+    // featureLevel is for testing purposes.
     static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
             std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel);
 
@@ -49,18 +50,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
@@ -68,6 +74,8 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
             const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
             const hal::utils::RequestRelocation& relocation) const;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -75,6 +83,8 @@
                           const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
                           int64_t deadline, int64_t loopTimeoutDuration,
                           int64_t timeoutDurationAfterFence,
+                          const std::vector<nn::TokenValuePair>& hints,
+                          const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
                           const hal::utils::RequestRelocation& relocation) const;
 
   private:
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index fb00b26..6c7aa88 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -43,12 +43,16 @@
     static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
             std::shared_ptr<const Burst> burst, Request request,
             std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
             hal::utils::RequestRelocation relocation,
             std::vector<Burst::OptionalCacheHold> cacheHolds);
 
     BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
                    std::vector<int64_t> memoryIdentifierTokens, bool measure,
-                   int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
+                   int64_t loopTimeoutDuration, const std::vector<nn::TokenValuePair>& hints,
+                   const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
+                   hal::utils::RequestRelocation relocation,
                    std::vector<Burst::OptionalCacheHold> cacheHolds);
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
@@ -64,6 +68,8 @@
     const std::vector<int64_t> kMemoryIdentifierTokens;
     const bool kMeasure;
     const int64_t kLoopTimeoutDuration;
+    const std::vector<nn::TokenValuePair> kHints;
+    const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix;
     const hal::utils::RequestRelocation kRelocation;
     const std::vector<Burst::OptionalCacheHold> kCacheHolds;
 };
@@ -149,17 +155,20 @@
 }
 
 nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
-        std::shared_ptr<aidl_hal::IBurst> burst) {
+        std::shared_ptr<aidl_hal::IBurst> burst, nn::Version featureLevel) {
     if (burst == nullptr) {
         return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                << "aidl_hal::utils::Burst::create must have non-null burst";
     }
 
-    return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst));
+    return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst), featureLevel);
 }
 
-Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst)
-    : kBurst(std::move(burst)), kMemoryCache(std::make_shared<MemoryCache>(kBurst)) {
+Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst,
+             nn::Version featureLevel)
+    : kBurst(std::move(burst)),
+      kMemoryCache(std::make_shared<MemoryCache>(kBurst)),
+      kFeatureLevel(featureLevel) {
     CHECK(kBurst != nullptr);
 }
 
@@ -170,8 +179,9 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -200,14 +210,14 @@
         memoryIdentifierTokens.push_back(-1);
     }
     CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
-
     return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
-                           aidlLoopTimeoutDuration, relocation);
+                           aidlLoopTimeoutDuration, hints, extensionNameToPrefix, relocation);
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
         const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
-        int64_t deadline, int64_t loopTimeoutDuration,
+        int64_t deadline, int64_t loopTimeoutDuration, const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
         const hal::utils::RequestRelocation& relocation) const {
     // Ensure that at most one execution is in flight at any given time.
     const bool alreadyInFlight = mExecutionInFlight.test_and_set();
@@ -221,9 +231,21 @@
     }
 
     ExecutionResult executionResult;
-    const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
-                                                  deadline, loopTimeoutDuration, &executionResult);
-    HANDLE_ASTATUS(ret) << "execute failed";
+    if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+        auto aidlHints = NN_TRY(convert(hints));
+        auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+        const auto ret = kBurst->executeSynchronouslyWithConfig(
+                request, memoryIdentifierTokens,
+                {measure, loopTimeoutDuration, std::move(aidlHints),
+                 std::move(aidlExtensionPrefix)},
+                deadline, &executionResult);
+        HANDLE_ASTATUS(ret) << "execute failed";
+    } else {
+        const auto ret =
+                kBurst->executeSynchronously(request, memoryIdentifierTokens, measure, deadline,
+                                             loopTimeoutDuration, &executionResult);
+        HANDLE_ASTATUS(ret) << "execute failed";
+    }
     if (!executionResult.outputSufficientSize) {
         auto canonicalOutputShapes =
                 nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
@@ -241,7 +263,9 @@
 
 nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -272,12 +296,15 @@
 
     return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
                                   std::move(memoryIdentifierTokens), aidlMeasure,
-                                  aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
+                                  aidlLoopTimeoutDuration, hints, extensionNameToPrefix,
+                                  std::move(relocation), std::move(holds));
 }
 
 nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
         std::shared_ptr<const Burst> burst, Request request,
         std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
         hal::utils::RequestRelocation relocation,
         std::vector<Burst::OptionalCacheHold> cacheHolds) {
     if (burst == nullptr) {
@@ -286,13 +313,15 @@
 
     return std::make_shared<const BurstExecution>(
             PrivateConstructorTag{}, std::move(burst), std::move(request),
-            std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
-            std::move(cacheHolds));
+            std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, hints,
+            extensionNameToPrefix, std::move(relocation), std::move(cacheHolds));
 }
 
 BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
                                Request request, std::vector<int64_t> memoryIdentifierTokens,
                                bool measure, int64_t loopTimeoutDuration,
+                               const std::vector<nn::TokenValuePair>& hints,
+                               const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
                                hal::utils::RequestRelocation relocation,
                                std::vector<Burst::OptionalCacheHold> cacheHolds)
     : kBurst(std::move(burst)),
@@ -300,6 +329,8 @@
       kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
       kMeasure(measure),
       kLoopTimeoutDuration(loopTimeoutDuration),
+      kHints(hints),
+      kExtensionNameToPrefix(extensionNameToPrefix),
       kRelocation(std::move(relocation)),
       kCacheHolds(std::move(cacheHolds)) {}
 
@@ -307,7 +338,8 @@
         const nn::OptionalTimePoint& deadline) const {
     const auto aidlDeadline = NN_TRY(convert(deadline));
     return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
-                                   kLoopTimeoutDuration, kRelocation);
+                                   kLoopTimeoutDuration, kHints, kExtensionNameToPrefix,
+                                   kRelocation);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 113d2da..eb28db7 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -302,9 +302,9 @@
     };
 }
 
-GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert(
+GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
         const aidl_hal::ExtensionNameAndPrefix& extensionNameAndPrefix) {
-    return Model::ExtensionNameAndPrefix{
+    return ExtensionNameAndPrefix{
             .name = extensionNameAndPrefix.name,
             .prefix = extensionNameAndPrefix.prefix,
     };
@@ -506,6 +506,12 @@
     return std::make_shared<const Handle>(std::move(duplicatedFd));
 }
 
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<TokenValuePair> unvalidatedConvert(const aidl_hal::TokenValuePair& tokenValuePair) {
+    return TokenValuePair{.token = tokenValuePair.token, .value = tokenValuePair.value};
+}
+#endif  // NN_AIDL_V4_OR_ABOVE
+
 GeneralResult<Capabilities> convert(const aidl_hal::Capabilities& capabilities) {
     return validatedConvert(capabilities);
 }
@@ -562,6 +568,17 @@
 GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories) {
     return validatedConvert(memories);
 }
+GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+        const std::vector<aidl_hal::ExtensionNameAndPrefix>& extensionNameAndPrefix) {
+    return unvalidatedConvert(extensionNameAndPrefix);
+}
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+GeneralResult<std::vector<TokenValuePair>> convert(
+        const std::vector<aidl_hal::TokenValuePair>& metaData) {
+    return validatedConvert(metaData);
+}
+#endif  // NN_AIDL_V4_OR_ABOVE
 
 GeneralResult<std::vector<OutputShape>> convert(
         const std::vector<aidl_hal::OutputShape>& outputShapes) {
@@ -942,7 +959,7 @@
 }
 
 nn::GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert(
-        const nn::Model::ExtensionNameAndPrefix& extensionNameToPrefix) {
+        const nn::ExtensionNameAndPrefix& extensionNameToPrefix) {
     return ExtensionNameAndPrefix{
             .name = extensionNameToPrefix.name,
             .prefix = extensionNameToPrefix.prefix,
@@ -1055,6 +1072,11 @@
     return Extension{.name = extension.name,
                      .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
 }
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<TokenValuePair> unvalidatedConvert(const nn::TokenValuePair& tokenValuePair) {
+    return TokenValuePair{.token = tokenValuePair.token, .value = tokenValuePair.value};
+}
+#endif  // NN_AIDL_V4_OR_ABOVE
 
 nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
     return validatedConvert(cacheToken);
@@ -1134,6 +1156,17 @@
         const std::vector<nn::SyncFence>& syncFences) {
     return validatedConvert(syncFences);
 }
+nn::GeneralResult<std::vector<ExtensionNameAndPrefix>> convert(
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) {
+    return unvalidatedConvert(extensionNameToPrefix);
+}
+
+#ifdef NN_AIDL_V4_OR_ABOVE
+nn::GeneralResult<std::vector<TokenValuePair>> convert(
+        const std::vector<nn::TokenValuePair>& metaData) {
+    return validatedConvert(metaData);
+}
+#endif  // NN_AIDL_V4_OR_ABOVE
 
 nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
     return validatedConvert(extensions);
diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp
index bad10ed..f3f4fdb 100644
--- a/neuralnetworks/aidl/utils/src/Device.cpp
+++ b/neuralnetworks/aidl/utils/src/Device.cpp
@@ -215,7 +215,9 @@
 nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that model is ready for IPC.
     std::optional<nn::Model> maybeModelInShared;
     const nn::Model& modelInShared =
@@ -225,17 +227,28 @@
     const auto aidlPreference = NN_TRY(convert(preference));
     const auto aidlPriority = NN_TRY(convert(priority));
     const auto aidlDeadline = NN_TRY(convert(deadline));
-    const auto aidlModelCache = NN_TRY(convert(modelCache));
-    const auto aidlDataCache = NN_TRY(convert(dataCache));
+    auto aidlModelCache = NN_TRY(convert(modelCache));
+    auto aidlDataCache = NN_TRY(convert(dataCache));
     const auto aidlToken = NN_TRY(convert(token));
 
     const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
     const auto scoped = kDeathHandler.protectCallback(cb.get());
 
+    if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+        auto aidlHints = NN_TRY(convert(hints));
+        auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+        const auto ret = kDevice->prepareModelWithConfig(
+                aidlModel,
+                {aidlPreference, aidlPriority, aidlDeadline, std::move(aidlModelCache),
+                 std::move(aidlDataCache), aidlToken, std::move(aidlHints),
+                 std::move(aidlExtensionPrefix)},
+                cb);
+        HANDLE_ASTATUS(ret) << "prepareModel failed";
+        return cb->get();
+    }
     const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline,
                                            aidlModelCache, aidlDataCache, aidlToken, cb);
     HANDLE_ASTATUS(ret) << "prepareModel failed";
-
     return cb->get();
 }
 
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index c4add63..2fd88af 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -63,7 +63,7 @@
 ExecutionWithCachedRequest::compute(const nn::OptionalTimePoint& deadline) const {
     const auto aidlDeadline = NN_TRY(convert(deadline));
     return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
-                                           kRelocation);
+                                           {}, {}, kRelocation);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -73,9 +73,9 @@
     const auto aidlWaitFor = NN_TRY(convert(waitFor));
     const auto aidlDeadline = NN_TRY(convert(deadline));
     const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
-    return kPreparedModel->executeFencedInternal(kRequest, aidlWaitFor, kMeasure, aidlDeadline,
-                                                 kLoopTimeoutDuration,
-                                                 aidlTimeoutDurationAfterFence, kRelocation);
+    return kPreparedModel->executeFencedInternal(
+            kRequest, aidlWaitFor, kMeasure, aidlDeadline, kLoopTimeoutDuration,
+            aidlTimeoutDurationAfterFence, {}, {}, kRelocation);
 }
 
 nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
diff --git a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
index c9d9955..33270ff 100644
--- a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
+++ b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp
@@ -167,6 +167,31 @@
     return ndk::ScopedAStatus::ok();
 }
 
+ndk::ScopedAStatus InvalidDevice::prepareModelWithConfig(
+        const Model& model, const PrepareModelConfig& config,
+        const std::shared_ptr<IPreparedModelCallback>& callback) {
+    if (!utils::valid(config.extensionNameToPrefix)) {
+        callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+        return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Invalid extensionNameToPrefix");
+    }
+    for (const auto& hint : config.compilationHints) {
+        auto result = std::find_if(config.extensionNameToPrefix.begin(),
+                                   config.extensionNameToPrefix.end(),
+                                   [&hint](const ExtensionNameAndPrefix& extension) {
+                                       uint16_t prefix = static_cast<uint32_t>(hint.token) >>
+                                                         IDevice::EXTENSION_TYPE_LOW_BITS_TYPE;
+                                       return prefix == extension.prefix;
+                                   });
+        if (result == config.extensionNameToPrefix.end()) {
+            callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return toAStatus(ErrorStatus::INVALID_ARGUMENT,
+                             "Invalid token for compilation hints: " + std::to_string(hint.token));
+        }
+    }
+    return prepareModel(model, config.preference, config.priority, config.deadlineNs,
+                        config.modelCache, config.dataCache, config.cacheToken, callback);
+}
+
 ndk::ScopedAStatus InvalidDevice::prepareModelFromCache(
         int64_t /*deadline*/, const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/,
         const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/,
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 6d1de56..7e3a31c 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -128,8 +128,9 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -141,30 +142,46 @@
     const auto aidlMeasure = NN_TRY(convert(measure));
     const auto aidlDeadline = NN_TRY(convert(deadline));
     const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
-    return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
-                           relocation);
+    return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, hints,
+                           extensionNameToPrefix, relocation);
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
                                int64_t loopTimeoutDuration,
+                               const std::vector<nn::TokenValuePair>& hints,
+                               const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
                                const hal::utils::RequestRelocation& relocation) const {
     if (relocation.input) {
         relocation.input->flush();
     }
 
     ExecutionResult executionResult;
-    const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
-                                                          loopTimeoutDuration, &executionResult);
-    HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+    if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+        auto aidlHints = NN_TRY(convert(hints));
+        auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+        const auto ret = kPreparedModel->executeSynchronouslyWithConfig(
+                request,
+                {measure, loopTimeoutDuration, std::move(aidlHints),
+                 std::move(aidlExtensionPrefix)},
+                deadline, &executionResult);
+        HANDLE_ASTATUS(ret) << "executeSynchronouslyWithConfig failed";
+    } else {
+        const auto ret = kPreparedModel->executeSynchronously(
+                request, measure, deadline, loopTimeoutDuration, &executionResult);
+        HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+    }
     return handleExecutionResult(executionResult, relocation);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
-                             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-                             const nn::OptionalDuration& loopTimeoutDuration,
-                             const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const nn::OptionalDuration& timeoutDurationAfterFence,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -179,31 +196,45 @@
     const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
     const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
     return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
-                                 aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
-                                 relocation);
+                                 aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence, hints,
+                                 extensionNameToPrefix, relocation);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFencedInternal(const Request& request,
-                                     const std::vector<ndk::ScopedFileDescriptor>& waitFor,
-                                     bool measure, int64_t deadline, int64_t loopTimeoutDuration,
-                                     int64_t timeoutDurationAfterFence,
-                                     const hal::utils::RequestRelocation& relocation) const {
+PreparedModel::executeFencedInternal(
+        const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
+        int64_t deadline, int64_t loopTimeoutDuration, int64_t timeoutDurationAfterFence,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
+        const hal::utils::RequestRelocation& relocation) const {
     if (relocation.input) {
         relocation.input->flush();
     }
 
     FencedExecutionResult result;
-    const auto ret =
-            kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
-                                          timeoutDurationAfterFence, &result);
-    HANDLE_ASTATUS(ret) << "executeFenced failed";
+    if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+        auto aidlHints = NN_TRY(convert(hints));
+        auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+        const auto ret = kPreparedModel->executeFencedWithConfig(
+                request, waitFor,
+                {measure, loopTimeoutDuration, std::move(aidlHints),
+                 std::move(aidlExtensionPrefix)},
+                deadline, timeoutDurationAfterFence, &result);
+        HANDLE_ASTATUS(ret) << "executeFencedWithConfig failed";
+    } else {
+        const auto ret = kPreparedModel->executeFenced(request, waitFor, measure, deadline,
+                                                       loopTimeoutDuration,
+                                                       timeoutDurationAfterFence, &result);
+        HANDLE_ASTATUS(ret) << "executeFenced failed";
+    }
     return handleFencedExecutionResult(result, relocation);
 }
 
 nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
@@ -217,8 +248,14 @@
 
     if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
         std::shared_ptr<IExecution> execution;
+        auto aidlHints = NN_TRY(convert(hints));
+        auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+
         const auto ret = kPreparedModel->createReusableExecution(
-                aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution);
+                aidlRequest,
+                {aidlMeasure, aidlLoopTimeoutDuration, std::move(aidlHints),
+                 std::move(aidlExtensionPrefix)},
+                &execution);
         HANDLE_ASTATUS(ret) << "createReusableExecution failed";
         return Execution::create(std::move(execution), std::move(relocation));
     }
@@ -232,7 +269,7 @@
     std::shared_ptr<IBurst> burst;
     const auto ret = kPreparedModel->configureExecutionBurst(&burst);
     HANDLE_ASTATUS(ret) << "configureExecutionBurst failed";
-    return Burst::create(std::move(burst));
+    return Burst::create(std::move(burst), kFeatureLevel);
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
index fb13af8..73727b3 100644
--- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp
+++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp
@@ -61,7 +61,6 @@
                                                 .powerUsage = std::numeric_limits<float>::max()};
 constexpr NumberOfCacheFiles kNumberOfCacheFiles = {.numModelCache = nn::kMaxNumberOfCacheFiles - 1,
                                                     .numDataCache = nn::kMaxNumberOfCacheFiles};
-
 constexpr auto makeStatusOk = [] { return ndk::ScopedAStatus::ok(); };
 
 std::shared_ptr<MockDevice> createMockDevice() {
@@ -124,6 +123,18 @@
     };
 }
 
+const std::vector<nn::TokenValuePair> kHints = {nn::TokenValuePair{.token = 0, .value = {1}}};
+const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix = {
+        nn::ExtensionNameAndPrefix{.name = "com.android.nn_test", .prefix = 1}};
+auto makePreparedModelWithConfigReturn(ErrorStatus launchStatus, ErrorStatus returnStatus,
+                                       const std::shared_ptr<MockPreparedModel>& preparedModel) {
+    return [launchStatus, returnStatus, preparedModel](
+                   const Model& /*model*/, const PrepareModelConfig& /*config*/,
+                   const std::shared_ptr<IPreparedModelCallback>& cb) -> ndk::ScopedAStatus {
+        return makePreparedModelReturnImpl(launchStatus, returnStatus, preparedModel, cb);
+    };
+}
+
 auto makePreparedModelFromCacheReturn(ErrorStatus launchStatus, ErrorStatus returnStatus,
                                       const std::shared_ptr<MockPreparedModel>& preparedModel) {
     return [launchStatus, returnStatus, preparedModel](
@@ -560,6 +571,8 @@
 }
 
 TEST_P(DeviceTest, prepareModel) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -571,7 +584,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -580,6 +593,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelLaunchError) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -590,7 +605,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -598,6 +613,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelReturnError) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -608,7 +625,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -616,6 +633,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelNullptrError) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -626,7 +645,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -634,6 +653,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelTransportFailure) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -643,7 +664,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -651,6 +672,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelDeadObject) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup call
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -660,7 +683,7 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -668,6 +691,8 @@
 }
 
 TEST_P(DeviceTest, prepareModelAsyncCrash) {
+    if (kVersion.level > nn::Version::Level::FEATURE_LEVEL_7) return;
+
     // setup test
     const auto mockDevice = createMockDevice();
     const auto device = Device::create(kName, mockDevice, kVersion).value();
@@ -681,7 +706,157 @@
 
     // run test
     const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
-                                             nn::Priority::DEFAULT, {}, {}, {}, {});
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfig) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    const auto mockPreparedModel = MockPreparedModel::create();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makePreparedModelWithConfigReturn(ErrorStatus::NONE, ErrorStatus::NONE,
+                                                               mockPreparedModel)));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_NE(result.value(), nullptr);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigLaunchError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makePreparedModelWithConfigReturn(
+                    ErrorStatus::GENERAL_FAILURE, ErrorStatus::GENERAL_FAILURE, nullptr)));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigReturnError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makePreparedModelWithConfigReturn(
+                    ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE, nullptr)));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigNullptrError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makePreparedModelWithConfigReturn(ErrorStatus::NONE, ErrorStatus::NONE,
+                                                               nullptr)));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigTransportFailure) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigDeadObject) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(DeviceTest, prepareModelWithConfigAsyncCrash) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockDevice = createMockDevice();
+    const auto device = Device::create(kName, mockDevice, kVersion).value();
+    const auto ret = [&device]() {
+        DeathMonitor::serviceDied(device->getDeathMonitor());
+        return ndk::ScopedAStatus::ok();
+    };
+    EXPECT_CALL(*mockDevice, prepareModelWithConfig(_, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(ret));
+
+    // run test
+    const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT,
+                                             nn::Priority::DEFAULT, {}, {}, {}, {}, kHints,
+                                             kExtensionNameToPrefix);
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/aidl/utils/test/MockBurst.h b/neuralnetworks/aidl/utils/test/MockBurst.h
index 5083bbd..4cf60b6 100644
--- a/neuralnetworks/aidl/utils/test/MockBurst.h
+++ b/neuralnetworks/aidl/utils/test/MockBurst.h
@@ -32,6 +32,10 @@
                  bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
                  ExecutionResult* executionResult),
                 (override));
+    MOCK_METHOD(ndk::ScopedAStatus, executeSynchronouslyWithConfig,
+                (const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+                 const ExecutionConfig& config, int64_t deadline, ExecutionResult* executionResult),
+                (override));
     MOCK_METHOD(ndk::ScopedAStatus, releaseMemoryResource, (int64_t memoryIdentifierToken),
                 (override));
 };
diff --git a/neuralnetworks/aidl/utils/test/MockDevice.h b/neuralnetworks/aidl/utils/test/MockDevice.h
index 3a28d55..47b8346 100644
--- a/neuralnetworks/aidl/utils/test/MockDevice.h
+++ b/neuralnetworks/aidl/utils/test/MockDevice.h
@@ -50,6 +50,10 @@
                  const std::vector<uint8_t>& token,
                  const std::shared_ptr<IPreparedModelCallback>& callback),
                 (override));
+    MOCK_METHOD(ndk::ScopedAStatus, prepareModelWithConfig,
+                (const Model& model, const PrepareModelConfig& config,
+                 const std::shared_ptr<IPreparedModelCallback>& callback),
+                (override));
     MOCK_METHOD(ndk::ScopedAStatus, prepareModelFromCache,
                 (int64_t deadline, const std::vector<ndk::ScopedFileDescriptor>& modelCache,
                  const std::vector<ndk::ScopedFileDescriptor>& dataCache,
diff --git a/neuralnetworks/aidl/utils/test/MockPreparedModel.h b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
index 0ed9af9..318acc2 100644
--- a/neuralnetworks/aidl/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
@@ -40,10 +40,19 @@
                  bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
                  int64_t duration, FencedExecutionResult* fencedExecutionResult),
                 (override));
+    MOCK_METHOD(ndk::ScopedAStatus, executeSynchronouslyWithConfig,
+                (const Request& request, const ExecutionConfig& config, int64_t deadline,
+                 ExecutionResult* executionResult),
+                (override));
+    MOCK_METHOD(ndk::ScopedAStatus, executeFencedWithConfig,
+                (const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+                 const ExecutionConfig& config, int64_t deadline, int64_t duration,
+                 FencedExecutionResult* fencedExecutionResult),
+                (override));
     MOCK_METHOD(ndk::ScopedAStatus, configureExecutionBurst, (std::shared_ptr<IBurst> * burst),
                 (override));
     MOCK_METHOD(ndk::ScopedAStatus, createReusableExecution,
-                (const Request& request, bool measureTiming, int64_t loopTimeoutDuration,
+                (const Request& request, const ExecutionConfig& config,
                  std::shared_ptr<IExecution>* execution),
                 (override));
 };
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index 8cfb7c1..bf6136d 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -70,6 +70,21 @@
 
 class PreparedModelTest : public VersionedAidlUtilsTestBase {};
 
+const std::vector<nn::TokenValuePair> kHints = {nn::TokenValuePair{.token = 0, .value = {1}}};
+const std::vector<nn::ExtensionNameAndPrefix> kExtensionNameToPrefix = {
+        nn::ExtensionNameAndPrefix{.name = "com.android.nn_test", .prefix = 1}};
+auto makeFencedExecutionWithConfigResult(
+        const std::shared_ptr<MockFencedExecutionCallback>& callback) {
+    return [callback](const Request& /*request*/,
+                      const std::vector<ndk::ScopedFileDescriptor>& /*waitFor*/,
+                      const ExecutionConfig& /*config*/, int64_t /*deadline*/, int64_t /*duration*/,
+                      FencedExecutionResult* fencedExecutionResult) {
+        *fencedExecutionResult = FencedExecutionResult{.callback = callback,
+                                                       .syncFence = ndk::ScopedFileDescriptor(-1)};
+        return ndk::ScopedAStatus::ok();
+    };
+}
+
 }  // namespace
 
 TEST_P(PreparedModelTest, invalidPreparedModel) {
@@ -82,6 +97,8 @@
 }
 
 TEST_P(PreparedModelTest, executeSync) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup call
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -96,7 +113,7 @@
                     DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     EXPECT_TRUE(result.has_value())
@@ -104,6 +121,8 @@
 }
 
 TEST_P(PreparedModelTest, executeSyncError) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -112,7 +131,7 @@
             .WillOnce(Invoke(makeGeneralFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -120,6 +139,8 @@
 }
 
 TEST_P(PreparedModelTest, executeSyncTransportFailure) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -128,7 +149,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -136,6 +157,8 @@
 }
 
 TEST_P(PreparedModelTest, executeSyncDeadObject) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -144,7 +167,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -152,6 +175,8 @@
 }
 
 TEST_P(PreparedModelTest, executeFenced) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup call
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -165,7 +190,7 @@
             .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -181,6 +206,8 @@
 }
 
 TEST_P(PreparedModelTest, executeFencedCallbackError) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup call
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -195,7 +222,7 @@
             .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -211,6 +238,8 @@
 }
 
 TEST_P(PreparedModelTest, executeFencedError) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -219,7 +248,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -227,6 +256,8 @@
 }
 
 TEST_P(PreparedModelTest, executeFencedTransportFailure) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -235,7 +266,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -243,6 +274,8 @@
 }
 
 TEST_P(PreparedModelTest, executeFencedDeadObject) {
+    if (kVersion.level >= nn::Version::Level::FEATURE_LEVEL_8) return;
+
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
@@ -251,7 +284,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -276,7 +309,7 @@
                     DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -300,7 +333,7 @@
             .WillOnce(Invoke(makeGeneralFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -322,7 +355,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -344,7 +377,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -372,7 +405,7 @@
             .WillRepeatedly(Invoke(makeFencedExecutionResult(mockCallback)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -410,7 +443,7 @@
             .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -440,7 +473,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -462,7 +495,7 @@
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -484,7 +517,7 @@
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
 
     // create execution
-    const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+    const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {});
     ASSERT_TRUE(createResult.has_value())
             << "Failed with " << createResult.error().code << ": " << createResult.error().message;
     ASSERT_NE(createResult.value(), nullptr);
@@ -495,6 +528,206 @@
     EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
+TEST_P(PreparedModelTest, executeSyncWithConfig) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    const auto mockExecutionResult = ExecutionResult{
+            .outputSufficientSize = true,
+            .outputShapes = {},
+            .timing = kNoTiming,
+    };
+    EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+            .Times(1)
+            .WillOnce(
+                    DoAll(SetArgPointee<3>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
+
+    // run test
+    const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    EXPECT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makeGeneralFailure));
+
+    // run test
+    const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigTransportFailure) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+    // run test
+    const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeSyncWithConfigDeadObject) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeSynchronouslyWithConfig(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+    // run test
+    const auto result = preparedModel->execute({}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfig) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    const auto mockCallback = MockFencedExecutionCallback::create();
+    EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+            .Times(1)
+            .WillOnce(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+                            SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
+    EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makeFencedExecutionWithConfigResult(mockCallback)));
+
+    // run test
+    const auto result =
+            preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    const auto& [syncFence, callback] = result.value();
+    EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+    ASSERT_NE(callback, nullptr);
+
+    // get results from callback
+    const auto callbackResult = callback();
+    ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code << ": "
+                                            << callbackResult.error().message;
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigCallbackError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup call
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    const auto mockCallback = MockFencedExecutionCallback::create();
+    EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+            .Times(1)
+            .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+                                   SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
+                                   Invoke(makeStatusOk))));
+    EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(Invoke(makeFencedExecutionWithConfigResult(mockCallback)));
+
+    // run test
+    const auto result =
+            preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    const auto& [syncFence, callback] = result.value();
+    EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+    ASSERT_NE(callback, nullptr);
+
+    // verify callback failure
+    const auto callbackResult = callback();
+    ASSERT_FALSE(callbackResult.has_value());
+    EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigError) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+
+    // run test
+    const auto result =
+            preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigTransportFailure) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+    // run test
+    const auto result =
+            preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(PreparedModelTest, executeFencedWithConfigDeadObject) {
+    if (kVersion.level < nn::Version::Level::FEATURE_LEVEL_8) return;
+
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
+    EXPECT_CALL(*mockPreparedModel, executeFencedWithConfig(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+    // run test
+    const auto result =
+            preparedModel->executeFenced({}, {}, {}, {}, {}, {}, kHints, kExtensionNameToPrefix);
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
 TEST_P(PreparedModelTest, configureExecutionBurst) {
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
@@ -567,13 +800,13 @@
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
     const auto mockExecution = ndk::SharedRefBase::make<MockExecution>();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
             .Times(1)
-            .WillOnce(DoAll(SetArgPointee<3>(mockExecution), Invoke(makeStatusOk)));
+            .WillOnce(DoAll(SetArgPointee<2>(mockExecution), Invoke(makeStatusOk)));
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -586,13 +819,13 @@
 
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -604,13 +837,13 @@
 
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -622,13 +855,13 @@
 
     // setup test
     const auto mockPreparedModel = MockPreparedModel::create();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
             .Times(1)
             .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
     const auto preparedModel = PreparedModel::create(mockPreparedModel, kVersion).value();
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 2460fba..8c8a87a 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -63,6 +63,8 @@
     // it is skipped. The field is set to true by default and is set to false in
     // quantization coupling tests to suppress skipping a test
     bool reportSkipping;
+    // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
+    bool useConfig;
     TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
                bool reusable)
         : executor(executor),
@@ -70,7 +72,8 @@
           outputType(outputType),
           memoryType(memoryType),
           reusable(reusable),
-          reportSkipping(true) {}
+          reportSkipping(true),
+          useConfig(false) {}
     TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
                bool reusable, bool reportSkipping)
         : executor(executor),
@@ -78,7 +81,17 @@
           outputType(outputType),
           memoryType(memoryType),
           reusable(reusable),
-          reportSkipping(reportSkipping) {}
+          reportSkipping(reportSkipping),
+          useConfig(false) {}
+    TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
+               bool reusable, bool reportSkipping, bool useConfig)
+        : executor(executor),
+          measureTiming(measureTiming),
+          outputType(outputType),
+          memoryType(memoryType),
+          reusable(reusable),
+          reportSkipping(reportSkipping),
+          useConfig(useConfig) {}
 };
 
 std::string toString(OutputType type) {
@@ -100,7 +113,8 @@
        << ", .measureTiming=" << (config.measureTiming ? "true" : "false")
        << ", .outputType=" << toString(config.outputType)
        << ", .memoryType=" << toString(config.memoryType)
-       << ", .reusable=" << (config.reusable ? "true" : "false") << "}";
+       << ", .reusable=" << (config.reusable ? "true" : "false")
+       << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
     return ss.str();
 }
 
@@ -587,8 +601,8 @@
 
     std::shared_ptr<IExecution> execution;
     if (testConfig.reusable) {
-        const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming,
-                                                                loopTimeoutDurationNs, &execution);
+        const auto ret = preparedModel->createReusableExecution(
+                request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
         ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
         ASSERT_NE(nullptr, execution.get());
     }
@@ -607,6 +621,10 @@
                 ::ndk::ScopedAStatus ret;
                 if (testConfig.reusable) {
                     ret = execution->executeSynchronously(kNoDeadline, &executionResult);
+                } else if (testConfig.useConfig) {
+                    ret = preparedModel->executeSynchronouslyWithConfig(
+                            request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+                            kNoDeadline, &executionResult);
                 } else {
                     ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
                                                               kNoDeadline, loopTimeoutDurationNs,
@@ -649,9 +667,16 @@
 
                 ExecutionResult executionResult;
                 // execute
-                ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
-                                                  kNoDeadline, loopTimeoutDurationNs,
-                                                  &executionResult);
+                if (testConfig.useConfig) {
+                    ret = burst->executeSynchronouslyWithConfig(
+                            request, slots,
+                            {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
+                            &executionResult);
+                } else {
+                    ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
+                                                      kNoDeadline, loopTimeoutDurationNs,
+                                                      &executionResult);
+                }
                 ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
                         << ret.getDescription();
                 if (ret.isOk()) {
@@ -680,6 +705,10 @@
                 ::ndk::ScopedAStatus ret;
                 if (testConfig.reusable) {
                     ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
+                } else if (testConfig.useConfig) {
+                    ret = preparedModel->executeFencedWithConfig(
+                            request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+                            kNoDeadline, kNoDuration, &executionResult);
                 } else {
                     ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
                                                        kNoDeadline, loopTimeoutDurationNs,
@@ -697,9 +726,19 @@
                     waitFor.emplace_back(dupFd);
                     // If a sync fence is returned, try start another run waiting for the sync
                     // fence.
-                    ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
-                                                       kNoDeadline, loopTimeoutDurationNs,
-                                                       kNoDuration, &executionResult);
+                    if (testConfig.reusable) {
+                        ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
+                                                       &executionResult);
+                    } else if (testConfig.useConfig) {
+                        ret = preparedModel->executeFencedWithConfig(
+                                request, waitFor,
+                                {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+                                kNoDeadline, kNoDuration, &executionResult);
+                    } else {
+                        ret = preparedModel->executeFenced(
+                                request, waitFor, testConfig.measureTiming, kNoDeadline,
+                                loopTimeoutDurationNs, kNoDuration, &executionResult);
+                    }
                     ASSERT_TRUE(ret.isOk());
                     waitForSyncFence(executionResult.syncFence.get());
                 }
@@ -830,11 +869,13 @@
     std::vector<Executor> executorList;
     std::vector<MemoryType> memoryTypeList;
     std::vector<bool> reusableList = {false};
+    std::vector<bool> useConfigList = {false};
 
     int deviceVersion;
     ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
     if (deviceVersion >= kMinAidlLevelForFL8) {
         reusableList.push_back(true);
+        useConfigList.push_back(true);
     }
 
     switch (testKind) {
@@ -879,11 +920,14 @@
             for (const Executor executor : executorList) {
                 for (const MemoryType memoryType : memoryTypeList) {
                     for (const bool reusable : reusableList) {
-                        if (executor == Executor::BURST && reusable) continue;
-                        const TestConfig testConfig(executor, measureTiming, outputType, memoryType,
-                                                    reusable);
-                        SCOPED_TRACE(toString(testConfig));
-                        EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+                        for (const bool useConfig : useConfigList) {
+                            if ((useConfig || executor == Executor::BURST) && reusable) continue;
+                            const TestConfig testConfig(executor, measureTiming, outputType,
+                                                        memoryType, reusable,
+                                                        /*reportSkipping=*/true, useConfig);
+                            SCOPED_TRACE(toString(testConfig));
+                            EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+                        }
                     }
                 }
             }
@@ -942,6 +986,13 @@
             createPreparedModel(device, model, &preparedModel);
             if (preparedModel == nullptr) return;
             EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+            int32_t deviceVersion;
+            ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
+            if (deviceVersion >= kMinAidlLevelForFL8) {
+                createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
+                                    /*useConfig*/ true);
+                EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+            }
         } break;
         case TestKind::QUANTIZATION_COUPLING: {
             ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index b3e9c63..97760ae 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -204,11 +204,23 @@
         return ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
     }
+    ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request&, const ExecutionConfig&,
+                                                      int64_t, ExecutionResult*) override {
+        return ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+    }
+    ndk::ScopedAStatus executeFencedWithConfig(const Request&,
+                                               const std::vector<ndk::ScopedFileDescriptor>&,
+                                               const ExecutionConfig&, int64_t, int64_t,
+                                               FencedExecutionResult*) override {
+        return ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
+    }
     ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override {
         return ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
     }
-    ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, bool, int64_t,
+    ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, const ExecutionConfig&,
                                                std::shared_ptr<aidl_hal::IExecution>*) override {
         return ndk::ScopedAStatus::fromServiceSpecificError(
                 static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index fdc7eff..931ba25 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -77,6 +77,28 @@
     ASSERT_EQ(nullptr, preparedModel.get());
 }
 
+static void validatePrepareModelWithConfig(const std::shared_ptr<IDevice>& device,
+                                           const std::string& message, const Model& model,
+                                           ExecutionPreference preference, Priority priority) {
+    SCOPED_TRACE(message + " [prepareModelWithConfig]");
+
+    std::shared_ptr<PreparedModelCallback> preparedModelCallback =
+            ndk::SharedRefBase::make<PreparedModelCallback>();
+    const auto prepareLaunchStatus = device->prepareModelWithConfig(
+            model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheToken, {}, {}},
+            preparedModelCallback);
+    ASSERT_FALSE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+    ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()),
+              ErrorStatus::INVALID_ARGUMENT);
+
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
+    std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+    ASSERT_EQ(nullptr, preparedModel.get());
+}
+
 static bool validExecutionPreference(ExecutionPreference preference) {
     return preference == ExecutionPreference::LOW_POWER ||
            preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
@@ -103,6 +125,13 @@
     }
 
     validatePrepareModel(device, message, model, preference, priority);
+
+    int32_t aidlVersion;
+    ASSERT_TRUE(device->getInterfaceVersion(&aidlVersion).isOk());
+    if (aidlVersion >= kMinAidlLevelForFL8) {
+        // prepareModelWithConfig must satisfy all requirements enforced by prepareModel.
+        validatePrepareModelWithConfig(device, message, model, preference, priority);
+    }
 }
 
 static uint32_t addOperand(Model* model) {
diff --git a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
index e8debf7..d749841 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
@@ -45,7 +45,7 @@
     {
         SCOPED_TRACE(message + " [createReusableExecution]");
         const auto createStatus = preparedModel->createReusableExecution(
-                request, measure, kOmittedTimeoutDuration, &execution);
+                request, {measure, kOmittedTimeoutDuration, {}, {}}, &execution);
         if (!createStatus.isOk()) {
             ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
             ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
@@ -149,10 +149,59 @@
 
     int32_t aidlVersion;
     ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());
+    if (aidlVersion < kMinAidlLevelForFL8) {
+        return;
+    }
 
     // validate reusable execution
-    if (aidlVersion >= kMinAidlLevelForFL8) {
-        validateReusableExecution(preparedModel, message, request, measure);
+    validateReusableExecution(preparedModel, message, request, measure);
+
+    // synchronous with empty hints
+    {
+        SCOPED_TRACE(message + " [executeSynchronouslyWithConfig]");
+        ExecutionResult executionResult;
+        const auto executeStatus = preparedModel->executeSynchronouslyWithConfig(
+                request, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, &executionResult);
+        ASSERT_FALSE(executeStatus.isOk());
+        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+                  ErrorStatus::INVALID_ARGUMENT);
+    }
+
+    // fenced with empty hints
+    {
+        SCOPED_TRACE(message + " [executeFencedWithConfig]");
+        FencedExecutionResult executionResult;
+        const auto executeStatus = preparedModel->executeFencedWithConfig(
+                request, {}, {false, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, kNoDuration,
+                &executionResult);
+        ASSERT_FALSE(executeStatus.isOk());
+        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+                  ErrorStatus::INVALID_ARGUMENT);
+    }
+
+    // burst with empty hints
+    {
+        SCOPED_TRACE(message + " [burst executeSynchronouslyWithConfig]");
+
+        // create burst
+        std::shared_ptr<IBurst> burst;
+        auto ret = preparedModel->configureExecutionBurst(&burst);
+        ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+        ASSERT_NE(nullptr, burst.get());
+
+        // use -1 for all memory identifier tokens
+        const std::vector<int64_t> slots(request.pools.size(), -1);
+
+        ExecutionResult executionResult;
+        const auto executeStatus = burst->executeSynchronouslyWithConfig(
+                request, slots, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline,
+                &executionResult);
+        ASSERT_FALSE(executeStatus.isOk());
+        ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
+        ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
+                  ErrorStatus::INVALID_ARGUMENT);
     }
 }
 
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
index c417356..ad93e6d 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp
@@ -41,7 +41,8 @@
 
 // internal helper function
 void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
-                         std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping) {
+                         std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping,
+                         bool useConfig) {
     ASSERT_NE(nullptr, preparedModel);
     *preparedModel = nullptr;
 
@@ -56,11 +57,25 @@
     // launch prepare model
     const std::shared_ptr<PreparedModelCallback> preparedModelCallback =
             ndk::SharedRefBase::make<PreparedModelCallback>();
-    const auto prepareLaunchStatus =
-            device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
-                                 kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
-
+    if (useConfig) {
+        const auto prepareLaunchStatus =
+                device->prepareModelWithConfig(model,
+                                               {ExecutionPreference::FAST_SINGLE_ANSWER,
+                                                kDefaultPriority,
+                                                kNoDeadline,
+                                                {},
+                                                {},
+                                                kEmptyCacheToken,
+                                                {},
+                                                {}},
+                                               preparedModelCallback);
+        ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+    } else {
+        const auto prepareLaunchStatus = device->prepareModel(
+                model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline, {},
+                {}, kEmptyCacheToken, preparedModelCallback);
+        ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription();
+    }
     // retrieve prepared model
     preparedModelCallback->wait();
     const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
index a900590..00d705c 100644
--- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h
@@ -51,8 +51,8 @@
 // Create an IPreparedModel object. If the model cannot be prepared,
 // "preparedModel" will be nullptr instead.
 void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model,
-                         std::shared_ptr<IPreparedModel>* preparedModel,
-                         bool reportSkipping = true);
+                         std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping = true,
+                         bool useConfig = false);
 
 enum class Executor { SYNC, BURST, FENCED };
 
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
index f2687c4..8d42e2f 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Burst.h
@@ -46,6 +46,11 @@
                                             bool measureTiming, int64_t deadlineNs,
                                             int64_t loopTimeoutDurationNs,
                                             ExecutionResult* executionResult) override;
+    ndk::ScopedAStatus executeSynchronouslyWithConfig(
+            const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+            const ExecutionConfig& config, int64_t deadlineNs,
+            ExecutionResult* executionResult) override;
+
     ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override;
 
     class ThreadSafeMemoryCache {
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
index aa29d63..c94f270 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Device.h
@@ -31,6 +31,7 @@
 #include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h>
 #include <aidl/android/hardware/neuralnetworks/Model.h>
 #include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h>
+#include <aidl/android/hardware/neuralnetworks/PrepareModelConfig.h>
 #include <aidl/android/hardware/neuralnetworks/Priority.h>
 #include <android/binder_auto_utils.h>
 #include <nnapi/IDevice.h>
@@ -72,6 +73,9 @@
             const std::vector<ndk::ScopedFileDescriptor>& dataCache,
             const std::vector<uint8_t>& token,
             const std::shared_ptr<IPreparedModelCallback>& callback) override;
+    ndk::ScopedAStatus prepareModelWithConfig(
+            const Model& model, const PrepareModelConfig& config,
+            const std::shared_ptr<IPreparedModelCallback>& callback) override;
 
   protected:
     const ::android::nn::SharedDevice kDevice;
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
index f92b0bc..d1359d6 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/PreparedModel.h
@@ -51,9 +51,17 @@
                                      int64_t loopTimeoutDurationNs, int64_t durationNs,
                                      FencedExecutionResult* executionResult) override;
     ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override;
-    ndk::ScopedAStatus createReusableExecution(const Request& request, bool measureTiming,
-                                               int64_t loopTimeoutDurationNs,
+    ndk::ScopedAStatus createReusableExecution(const Request& request,
+                                               const ExecutionConfig& config,
                                                std::shared_ptr<IExecution>* execution) override;
+    ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request& request,
+                                                      const ExecutionConfig& config,
+                                                      int64_t deadlineNs,
+                                                      ExecutionResult* executionResult) override;
+    ndk::ScopedAStatus executeFencedWithConfig(
+            const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+            const ExecutionConfig& config, int64_t deadlineNs, int64_t durationNs,
+            FencedExecutionResult* executionResult) override;
 
     ::android::nn::SharedPreparedModel getUnderlyingPreparedModel() const;
 
diff --git a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
index 4fabb20..a4a80fa 100644
--- a/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/Burst.cpp
@@ -93,7 +93,8 @@
 nn::ExecutionResult<ExecutionResult> executeSynchronously(
         const nn::IBurst& burst, const Burst::ThreadSafeMemoryCache& cache, const Request& request,
         const std::vector<int64_t>& memoryIdentifierTokens, bool measureTiming, int64_t deadlineNs,
-        int64_t loopTimeoutDurationNs) {
+        int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+        const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
     if (request.pools.size() != memoryIdentifierTokens.size()) {
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
                << "request.pools.size() != memoryIdentifierTokens.size()";
@@ -107,11 +108,13 @@
     const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
     const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
     const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+    auto nnHints = NN_TRY(convertInput(hints));
+    auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
 
     const auto hold = ensureAllMemoriesAreCached(&nnRequest, memoryIdentifierTokens, burst, cache);
 
-    const auto result =
-            burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+    const auto result = burst.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration,
+                                      nnHints, nnExtensionNameToPrefix);
 
     if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
         const auto& [message, code, outputShapes] = result.error();
@@ -155,7 +158,24 @@
                                                ExecutionResult* executionResult) {
     auto result =
             adapter::executeSynchronously(*kBurst, kMemoryCache, request, memoryIdentifierTokens,
-                                          measureTiming, deadlineNs, loopTimeoutDurationNs);
+                                          measureTiming, deadlineNs, loopTimeoutDurationNs, {}, {});
+    if (!result.has_value()) {
+        auto [message, code, _] = std::move(result).error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Burst::executeSynchronouslyWithConfig(
+        const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+        const ExecutionConfig& config, int64_t deadlineNs, ExecutionResult* executionResult) {
+    auto result = adapter::executeSynchronously(
+            *kBurst, kMemoryCache, request, memoryIdentifierTokens, config.measureTiming,
+            deadlineNs, config.loopTimeoutDurationNs, config.executionHints,
+            config.extensionNameToPrefix);
     if (!result.has_value()) {
         auto [message, code, _] = std::move(result).error();
         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
diff --git a/neuralnetworks/utils/adapter/aidl/src/Device.cpp b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
index 763be7f..84aaddb 100644
--- a/neuralnetworks/utils/adapter/aidl/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/Device.cpp
@@ -148,13 +148,14 @@
     }
 }
 
-nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
-                                     const Model& model, ExecutionPreference preference,
-                                     Priority priority, int64_t deadlineNs,
-                                     const std::vector<ndk::ScopedFileDescriptor>& modelCache,
-                                     const std::vector<ndk::ScopedFileDescriptor>& dataCache,
-                                     const std::vector<uint8_t>& token,
-                                     const std::shared_ptr<IPreparedModelCallback>& callback) {
+nn::GeneralResult<void> prepareModel(
+        const nn::SharedDevice& device, const Executor& executor, const Model& model,
+        ExecutionPreference preference, Priority priority, int64_t deadlineNs,
+        const std::vector<ndk::ScopedFileDescriptor>& modelCache,
+        const std::vector<ndk::ScopedFileDescriptor>& dataCache, const std::vector<uint8_t>& token,
+        const std::vector<TokenValuePair>& hints,
+        const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix,
+        const std::shared_ptr<IPreparedModelCallback>& callback) {
     if (callback.get() == nullptr) {
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
     }
@@ -166,12 +167,16 @@
     auto nnModelCache = NN_TRY(convertInput(modelCache));
     auto nnDataCache = NN_TRY(convertInput(dataCache));
     const auto nnToken = NN_TRY(convertCacheToken(token));
+    auto nnHints = NN_TRY(convertInput(hints));
+    auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
 
     Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
-                 nnToken, callback] {
-        auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
-                                           nnModelCache, nnDataCache, nnToken);
+                 nnToken, nnHints = std::move(nnHints),
+                 nnExtensionNameToPrefix = std::move(nnExtensionNameToPrefix), callback] {
+        auto result =
+                device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, nnModelCache,
+                                     nnDataCache, nnToken, nnHints, nnExtensionNameToPrefix);
         notify(callback.get(), std::move(result));
     };
     executor(std::move(task), nnDeadline);
@@ -273,8 +278,9 @@
                                         const std::vector<ndk::ScopedFileDescriptor>& dataCache,
                                         const std::vector<uint8_t>& token,
                                         const std::shared_ptr<IPreparedModelCallback>& callback) {
-    const auto result = adapter::prepareModel(kDevice, kExecutor, model, preference, priority,
-                                              deadlineNs, modelCache, dataCache, token, callback);
+    const auto result =
+            adapter::prepareModel(kDevice, kExecutor, model, preference, priority, deadlineNs,
+                                  modelCache, dataCache, token, {}, {}, callback);
     if (!result.has_value()) {
         const auto& [message, code] = result.error();
         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -301,4 +307,21 @@
     return ndk::ScopedAStatus::ok();
 }
 
+ndk::ScopedAStatus Device::prepareModelWithConfig(
+        const Model& model, const PrepareModelConfig& config,
+        const std::shared_ptr<IPreparedModelCallback>& callback) {
+    const auto result = adapter::prepareModel(
+            kDevice, kExecutor, model, config.preference, config.priority, config.deadlineNs,
+            config.modelCache, config.dataCache, config.cacheToken, config.compilationHints,
+            config.extensionNameToPrefix, callback);
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        callback->notify(aidlCode, nullptr);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    return ndk::ScopedAStatus::ok();
+}
+
 }  // namespace aidl::android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
index 5cab62c..790558f 100644
--- a/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/aidl/src/PreparedModel.cpp
@@ -118,17 +118,20 @@
     return durationNs < 0 ? nn::OptionalTimePoint{} : nn::TimePoint(makeDuration(durationNs));
 }
 
-nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IPreparedModel& preparedModel,
-                                                          const Request& request,
-                                                          bool measureTiming, int64_t deadlineNs,
-                                                          int64_t loopTimeoutDurationNs) {
+nn::ExecutionResult<ExecutionResult> executeSynchronously(
+        const nn::IPreparedModel& preparedModel, const Request& request, bool measureTiming,
+        int64_t deadlineNs, int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+        const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
     const auto nnRequest = NN_TRY(convertInput(request));
     const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
     const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
     const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
+    auto nnHints = NN_TRY(convertInput(hints));
+    auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
 
     const auto result =
-            preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration);
+            preparedModel.execute(nnRequest, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration,
+                                  nnHints, nnExtensionNameToPrefix);
 
     if (!result.ok() && result.error().code == nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
         const auto& [message, code, outputShapes] = result.error();
@@ -147,16 +150,21 @@
 nn::GeneralResult<FencedExecutionResult> executeFenced(
         const nn::IPreparedModel& preparedModel, const Request& request,
         const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measureTiming,
-        int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs) {
+        int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
+        const std::vector<TokenValuePair>& hints,
+        const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
     const auto nnRequest = NN_TRY(convertInput(request));
     const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
     const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
     const auto nnDeadline = NN_TRY(makeOptionalTimePoint(deadlineNs));
     const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
     const auto nnDuration = NN_TRY(makeOptionalDuration(durationNs));
+    auto nnHints = NN_TRY(convertInput(hints));
+    auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
 
     auto [syncFence, executeFencedInfoCallback] = NN_TRY(preparedModel.executeFenced(
-            nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+            nnRequest, nnWaitFor, nnMeasureTiming, nnDeadline, nnLoopTimeoutDuration, nnDuration,
+            nnHints, nnExtensionNameToPrefix));
 
     ndk::ScopedFileDescriptor fileDescriptor;
     if (syncFence.hasFd()) {
@@ -171,11 +179,16 @@
 
 nn::GeneralResult<nn::SharedExecution> createReusableExecution(
         const nn::IPreparedModel& preparedModel, const Request& request, bool measureTiming,
-        int64_t loopTimeoutDurationNs) {
+        int64_t loopTimeoutDurationNs, const std::vector<TokenValuePair>& hints,
+        const std::vector<ExtensionNameAndPrefix>& extensionNameToPrefix) {
     const auto nnRequest = NN_TRY(convertInput(request));
     const auto nnMeasureTiming = measureTiming ? nn::MeasureTiming::YES : nn::MeasureTiming::NO;
     const auto nnLoopTimeoutDuration = NN_TRY(makeOptionalDuration(loopTimeoutDurationNs));
-    return preparedModel.createReusableExecution(nnRequest, nnMeasureTiming, nnLoopTimeoutDuration);
+    auto nnHints = NN_TRY(convertInput(hints));
+    auto nnExtensionNameToPrefix = NN_TRY(convertInput(extensionNameToPrefix));
+
+    return preparedModel.createReusableExecution(nnRequest, nnMeasureTiming, nnLoopTimeoutDuration,
+                                                 nnHints, nnExtensionNameToPrefix);
 }
 
 nn::ExecutionResult<ExecutionResult> executeSynchronously(const nn::IExecution& execution,
@@ -231,7 +244,7 @@
                                                        int64_t loopTimeoutDurationNs,
                                                        ExecutionResult* executionResult) {
     auto result = adapter::executeSynchronously(*kPreparedModel, request, measureTiming, deadlineNs,
-                                                loopTimeoutDurationNs);
+                                                loopTimeoutDurationNs, {}, {});
     if (!result.has_value()) {
         const auto& [message, code, _] = result.error();
         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -247,7 +260,41 @@
         bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs,
         FencedExecutionResult* executionResult) {
     auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, measureTiming,
-                                         deadlineNs, loopTimeoutDurationNs, durationNs);
+                                         deadlineNs, loopTimeoutDurationNs, durationNs, {}, {});
+    if (!result.has_value()) {
+        const auto& [message, code] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeSynchronouslyWithConfig(const Request& request,
+                                                                 const ExecutionConfig& config,
+                                                                 int64_t deadlineNs,
+                                                                 ExecutionResult* executionResult) {
+    auto result = adapter::executeSynchronously(
+            *kPreparedModel, request, config.measureTiming, deadlineNs,
+            config.loopTimeoutDurationNs, config.executionHints, config.extensionNameToPrefix);
+    if (!result.has_value()) {
+        const auto& [message, code, _] = result.error();
+        const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
+        return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                static_cast<int32_t>(aidlCode), message.c_str());
+    }
+    *executionResult = std::move(result).value();
+    return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus PreparedModel::executeFencedWithConfig(
+        const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+        const ExecutionConfig& config, int64_t deadlineNs, int64_t durationNs,
+        FencedExecutionResult* executionResult) {
+    auto result = adapter::executeFenced(*kPreparedModel, request, waitFor, config.measureTiming,
+                                         deadlineNs, config.loopTimeoutDurationNs, durationNs,
+                                         config.executionHints, config.extensionNameToPrefix);
     if (!result.has_value()) {
         const auto& [message, code] = result.error();
         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
@@ -275,11 +322,11 @@
 }
 
 ndk::ScopedAStatus PreparedModel::createReusableExecution(const Request& request,
-                                                          bool measureTiming,
-                                                          int64_t loopTimeoutDurationNs,
+                                                          const ExecutionConfig& config,
                                                           std::shared_ptr<IExecution>* execution) {
-    auto result = adapter::createReusableExecution(*kPreparedModel, request, measureTiming,
-                                                   loopTimeoutDurationNs);
+    auto result = adapter::createReusableExecution(
+            *kPreparedModel, request, config.measureTiming, config.loopTimeoutDurationNs,
+            config.executionHints, config.extensionNameToPrefix);
     if (!result.has_value()) {
         const auto& [message, code] = result.error();
         const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE);
diff --git a/neuralnetworks/utils/adapter/hidl/src/Burst.cpp b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
index 8b2e1dd..e3b165b 100644
--- a/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
@@ -250,7 +250,7 @@
     nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
 
     const auto [outputShapes, timing] =
-            NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
+            NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}, {}, {}));
 
     return std::make_pair(NN_TRY(V1_2::utils::convert(outputShapes)),
                           NN_TRY(V1_2::utils::convert(timing)));
diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
index 4993a80..0f44638 100644
--- a/neuralnetworks/utils/adapter/hidl/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
@@ -135,7 +135,7 @@
 
     Task task = [device, nnModel = std::move(nnModel), executor, callback] {
         auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
-                                           nn::Priority::DEFAULT, {}, {}, {}, {});
+                                           nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
         notify(callback.get(), std::move(result), executor);
     };
     executor(std::move(task), {});
@@ -155,8 +155,8 @@
     const auto nnPreference = NN_TRY(convertInput(preference));
 
     Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
-        auto result =
-                device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
+        auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {},
+                                           {}, {}, {});
         notify(callback.get(), std::move(result), executor);
     };
     executor(std::move(task), {});
@@ -185,7 +185,7 @@
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
                  nnToken, executor, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
-                                           nnModelCache, nnDataCache, nnToken);
+                                           nnModelCache, nnDataCache, nnToken, {}, {});
         notify(callback.get(), std::move(result), executor);
     };
     executor(std::move(task), {});
@@ -215,7 +215,7 @@
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
                  nnToken, executor, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
-                                           nnModelCache, nnDataCache, nnToken);
+                                           nnModelCache, nnDataCache, nnToken, {}, {});
         notify(callback.get(), std::move(result), executor);
     };
     executor(std::move(task), nnDeadline);
diff --git a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
index 71060d5..c6055a6 100644
--- a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
@@ -159,7 +159,7 @@
     }
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
-        auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
+        auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {});
         notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
@@ -185,7 +185,7 @@
     }
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
-        auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
+        auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {});
         notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
@@ -216,8 +216,8 @@
 
     Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
                  nnLoopTimeoutDuration, callback] {
-        auto result =
-                preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
+        auto result = preparedModel->execute(nnRequest, nnMeasure, nnDeadline,
+                                             nnLoopTimeoutDuration, {}, {});
         notify(callback.get(), std::move(result));
     };
     executor(std::move(task), nnDeadline);
@@ -232,7 +232,7 @@
     const auto nnMeasure = NN_TRY(convertInput(measure));
 
     const auto [outputShapes, timing] =
-            NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}));
+            NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {}));
 
     auto hidlOutputShapes = NN_TRY(V1_2::utils::convert(outputShapes));
     const auto hidlTiming = NN_TRY(V1_2::utils::convert(timing));
@@ -248,8 +248,8 @@
     const auto nnDeadline = NN_TRY(convertInput(deadline));
     const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
 
-    const auto [outputShapes, timing] =
-            NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration));
+    const auto [outputShapes, timing] = NN_TRY(preparedModel->execute(
+            nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration, {}, {}));
 
     auto hidlOutputShapes = NN_TRY(V1_3::utils::convert(outputShapes));
     const auto hidlTiming = NN_TRY(V1_3::utils::convert(timing));
@@ -293,8 +293,9 @@
     const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
     const auto nnDuration = NN_TRY(convertInput(duration));
 
-    auto [syncFence, executeFencedCallback] = NN_TRY(preparedModel->executeFenced(
-            nnRequest, nnWaitFor, nnMeasure, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+    auto [syncFence, executeFencedCallback] =
+            NN_TRY(preparedModel->executeFenced(nnRequest, nnWaitFor, nnMeasure, nnDeadline,
+                                                nnLoopTimeoutDuration, nnDuration, {}, {}));
 
     auto hidlSyncFence = NN_TRY(V1_3::utils::convert(syncFence.getSharedHandle()));
     auto hidlExecuteFencedCallback = sp<FencedExecutionCallback>::make(executeFencedCallback);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index e86edda..1f1245f 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -33,12 +33,15 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 };
 
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
index 5e62b9a..9582873 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
@@ -52,8 +52,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index de30aae..3f1f290 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -31,18 +31,23 @@
   public:
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index fde2486..129431f 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -48,18 +48,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
   private:
     bool isValidInternal() const EXCLUDES(mMutex);
     nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
 
     const Factory kMakeBurst;
     mutable std::mutex mMutex;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 84ae799..267d634 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -65,8 +65,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache,
-            const nn::CacheToken& token) const override;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
@@ -83,7 +84,9 @@
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
             const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
+            const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal(
             nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
             const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index 86533ed..bbfc220 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -49,18 +49,23 @@
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalTimePoint& deadline,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
             const nn::OptionalDuration& loopTimeoutDuration,
-            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& timeoutDurationAfterFence,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedExecution> createReusableExecution(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& hints,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
 
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
 
@@ -70,7 +75,9 @@
     bool isValidInternal() const EXCLUDES(mMutex);
     nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
             const nn::Request& request, nn::MeasureTiming measure,
-            const nn::OptionalDuration& loopTimeoutDuration) const;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const std::vector<nn::TokenValuePair>& metaData,
+            const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const;
     nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
 
     const Factory kMakePreparedModel;
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 0191533..3fdfb5c 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -34,13 +34,17 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute(
         const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidBurst";
 }
 
 nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
         const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidBurst";
 }
 
diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp
index 535ccb4..c8cc287 100644
--- a/neuralnetworks/utils/common/src/InvalidDevice.cpp
+++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp
@@ -84,7 +84,9 @@
         const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/,
         nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/,
         const std::vector<nn::SharedHandle>& /*modelCache*/,
-        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidDevice";
 }
 
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 8195462..f6f978d 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -27,9 +27,12 @@
 namespace android::hardware::neuralnetworks::utils {
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
-                              const nn::OptionalTimePoint& /*deadline*/,
-                              const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+InvalidPreparedModel::execute(
+        const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+        const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
@@ -38,13 +41,17 @@
         const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
         nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
         const nn::OptionalDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+        const nn::OptionalDuration& /*timeoutDurationAfterFence*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
 nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
         const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
-        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const std::vector<nn::TokenValuePair>& /*hints*/,
+        const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 79cbe39..bf7a8ea 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -105,37 +105,49 @@
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalTimePoint& deadline,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
-    const auto fn = [&request, measure, deadline, loopTimeoutDuration](const nn::IBurst& burst) {
-        return burst.execute(request, measure, deadline, loopTimeoutDuration);
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+    const auto fn = [&request, measure, deadline, loopTimeoutDuration, &hints,
+                     &extensionNameToPrefix](const nn::IBurst& burst) {
+        return burst.execute(request, measure, deadline, loopTimeoutDuration, hints,
+                             extensionNameToPrefix);
     };
     return protect(*this, fn);
 }
 
 nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
 #if 0
     auto self = shared_from_this();
-    ResilientExecution::Factory makeExecution =
-            [burst = std::move(self), request, measure, loopTimeoutDuration] {
-        return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+    ResilientExecution::Factory makeExecution = [burst = std::move(self), request, measure,
+                                                 loopTimeoutDuration, &hints,
+                                                 &extensionNameToPrefix] {
+        return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+                                                      extensionNameToPrefix);
     };
     return ResilientExecution::create(std::move(makeExecution));
 #else
-    return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+    return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+                                           extensionNameToPrefix);
 #endif
 }
 
 nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     if (!isValidInternal()) {
         return std::make_shared<const InvalidExecution>();
     }
-    const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
-        return burst.createReusableExecution(request, measure, loopTimeoutDuration);
+    const auto fn = [&request, measure, &loopTimeoutDuration, &hints,
+                     &extensionNameToPrefix](const nn::IBurst& burst) {
+        return burst.createReusableExecution(request, measure, loopTimeoutDuration, hints,
+                                             extensionNameToPrefix);
     };
     return protect(*this, fn);
 }
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 2023c9a..a5c2640 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -179,19 +179,21 @@
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
 #if 0
     auto self = shared_from_this();
     ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model,
                                                          preference, priority, deadline, modelCache,
-                                                         dataCache, token] {
+                                                         dataCache, token, hints, extensionNameToPrefix] {
         return device->prepareModelInternal(model, preference, priority, deadline, modelCache,
-                                            dataCache, token);
+                                            dataCache, token, hints, extensionNameToPrefix);
     };
     return ResilientPreparedModel::create(std::move(makePreparedModel));
 #else
-    return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache,
-                                token);
+    return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, token,
+                                hints, extensionNameToPrefix);
 #endif
 }
 
@@ -234,14 +236,16 @@
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
         const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
         nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
-        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+        const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     if (!isValidInternal()) {
         return std::make_shared<const InvalidPreparedModel>();
     }
-    const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache,
-                     &token](const nn::IDevice& device) {
+    const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, &token,
+                     &hints, &extensionNameToPrefix](const nn::IDevice& device) {
         return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
-                                   token);
+                                   token, hints, extensionNameToPrefix);
     };
     return protect(*this, fn, /*blocking=*/false);
 }
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 1ae19bc..b5843c0 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -104,43 +104,53 @@
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
-                                const nn::OptionalTimePoint& deadline,
-                                const nn::OptionalDuration& loopTimeoutDuration) const {
-    const auto fn = [&request, measure, &deadline,
-                     &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
-        return preparedModel.execute(request, measure, deadline, loopTimeoutDuration);
+ResilientPreparedModel::execute(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
+    const auto fn = [&request, measure, &deadline, &loopTimeoutDuration, &hints,
+                     &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
+        return preparedModel.execute(request, measure, deadline, loopTimeoutDuration, hints,
+                                     extensionNameToPrefix);
     };
     return protect(*this, fn);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-ResilientPreparedModel::executeFenced(const nn::Request& request,
-                                      const std::vector<nn::SyncFence>& waitFor,
-                                      nn::MeasureTiming measure,
-                                      const nn::OptionalTimePoint& deadline,
-                                      const nn::OptionalDuration& loopTimeoutDuration,
-                                      const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ResilientPreparedModel::executeFenced(
+        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const nn::OptionalDuration& timeoutDurationAfterFence,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     const auto fn = [&request, &waitFor, measure, &deadline, &loopTimeoutDuration,
-                     &timeoutDurationAfterFence](const nn::IPreparedModel& preparedModel) {
+                     &timeoutDurationAfterFence, &hints,
+                     &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
         return preparedModel.executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
-                                           timeoutDurationAfterFence);
+                                           timeoutDurationAfterFence, hints, extensionNameToPrefix);
     };
     return protect(*this, fn);
 }
 
 nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
 #if 0
     auto self = shared_from_this();
-    ResilientExecution::Factory makeExecution =
-            [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
-        return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+    ResilientExecution::Factory makeExecution = [preparedModel = std::move(self), request, measure,
+                                                 loopTimeoutDuration, hints,
+                                                 extensionNameToPrefix] {
+        return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration,
+                                                              hints, extensionNameToPrefix);
     };
     return ResilientExecution::create(std::move(makeExecution));
 #else
-    return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+    return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints,
+                                           extensionNameToPrefix);
 #endif
 }
 
@@ -159,13 +169,16 @@
 
 nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
         const nn::Request& request, nn::MeasureTiming measure,
-        const nn::OptionalDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration,
+        const std::vector<nn::TokenValuePair>& hints,
+        const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
     if (!isValidInternal()) {
         return std::make_shared<const InvalidExecution>();
     }
-    const auto fn = [&request, measure,
-                     &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
-        return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+    const auto fn = [&request, measure, &loopTimeoutDuration, &hints,
+                     &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) {
+        return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration, hints,
+                                                     extensionNameToPrefix);
     };
     return protect(*this, fn);
 }
diff --git a/neuralnetworks/utils/common/test/MockDevice.h b/neuralnetworks/utils/common/test/MockDevice.h
index a9428bc..a0fc5c3 100644
--- a/neuralnetworks/utils/common/test/MockDevice.h
+++ b/neuralnetworks/utils/common/test/MockDevice.h
@@ -39,7 +39,9 @@
     MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModel,
                 (const Model& model, ExecutionPreference preference, Priority priority,
                  OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
-                 const std::vector<SharedHandle>& dataCache, const CacheToken& token),
+                 const std::vector<SharedHandle>& dataCache, const CacheToken& token,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModelFromCache,
                 (OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache,
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c8ce006..b8613b2 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -27,17 +27,23 @@
   public:
     MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), execute,
                 (const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline,
-                 const OptionalDuration& loopTimeoutDuration),
+                 const OptionalDuration& loopTimeoutDuration,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), executeFenced,
                 (const Request& request, const std::vector<SyncFence>& waitFor,
                  MeasureTiming measure, const OptionalTimePoint& deadline,
                  const OptionalDuration& loopTimeoutDuration,
-                 const OptionalDuration& timeoutDurationAfterFence),
+                 const OptionalDuration& timeoutDurationAfterFence,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
-                (const nn::Request& request, nn::MeasureTiming measure,
-                 const nn::OptionalDuration& loopTimeoutDuration),
+                (const Request& request, MeasureTiming measure,
+                 const OptionalDuration& loopTimeoutDuration,
+                 const std::vector<TokenValuePair>& hints,
+                 const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix),
                 (const, override));
     MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
     MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
diff --git a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
index 0488b63..d9b8505 100644
--- a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp
@@ -309,12 +309,12 @@
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
     const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(mockPreparedModel));
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -324,12 +324,12 @@
 TEST(ResilientDeviceTest, prepareModelError) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -339,13 +339,13 @@
 TEST(ResilientDeviceTest, prepareModelDeadObjectFailedRecovery) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -355,18 +355,18 @@
 TEST(ResilientDeviceTest, prepareModelDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockDevice, mockDeviceFactory, device] = setup();
-    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     const auto recoveredMockDevice = createConfiguredMockDevice();
     const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>();
-    EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _))
+    EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(mockPreparedModel));
     EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(Return(recoveredMockDevice));
 
     // run test
-    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -679,7 +679,7 @@
     device->recover(mockDevice.get(), /*blocking=*/false);
 
     // run test
-    auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {});
+    auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index d396ca8..276bfba 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -104,12 +104,12 @@
 TEST(ResilientPreparedModelTest, execute) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoExecutionError));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -119,10 +119,12 @@
 TEST(ResilientPreparedModelTest, executeError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _))
+            .Times(1)
+            .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -132,12 +134,12 @@
 TEST(ResilientPreparedModelTest, executeDeadObjectFailedRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
     constexpr auto ret = [] { return nn::error(nn::ErrorStatus::GENERAL_FAILURE); };
     EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(ret);
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -147,9 +149,9 @@
 TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
+    EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject);
     const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
-    EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _))
+    EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoExecutionError));
     EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -157,7 +159,7 @@
             .WillOnce(Return(recoveredMockPreparedModel));
 
     // run test
-    const auto result = preparedModel->execute({}, {}, {}, {});
+    const auto result = preparedModel->execute({}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -167,12 +169,12 @@
 TEST(ResilientPreparedModelTest, executeFenced) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoFencedExecutionError));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -182,12 +184,12 @@
 TEST(ResilientPreparedModelTest, executeFencedError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -197,13 +199,13 @@
 TEST(ResilientPreparedModelTest, executeFencedDeadObjectFailedRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());
@@ -213,11 +215,11 @@
 TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnDeadObject);
     const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel();
-    EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _))
+    EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoFencedExecutionError));
     EXPECT_CALL(*mockPreparedModelFactory, Call())
@@ -225,7 +227,7 @@
             .WillOnce(Return(recoveredMockPreparedModel));
 
     // run test
-    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {});
+    const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -235,12 +237,12 @@
 TEST(ResilientPreparedModelTest, createReusableExecution) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
             .Times(1)
             .WillOnce(Return(kNoCreateReusableExecutionError));
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_TRUE(result.has_value())
@@ -250,12 +252,12 @@
 TEST(ResilientPreparedModelTest, createReusableExecutionError) {
     // setup call
     const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
-    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+    EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _))
             .Times(1)
             .WillOnce(kReturnGeneralFailure);
 
     // run test
-    const auto result = preparedModel->createReusableExecution({}, {}, {});
+    const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {});
 
     // verify result
     ASSERT_FALSE(result.has_value());