Merge "Adds new error code which is required by keystore to handle strongbox hal related functionality. Test: N/A Bug: b/184306647"
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index 8329303..7849ca7 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -44,7 +44,9 @@
     OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
-            const nn::Request& request, nn::MeasureTiming measure) const override;
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
   private:
     const nn::SharedPreparedModel kPreparedModel;
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
index b695f48..1baabdf 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
@@ -22,10 +22,15 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
+constexpr auto kVersion = nn::Version::ANDROID_OC_MR1;
+
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
     const auto maybeCanonical = nn::convert(halObject);
@@ -45,6 +50,15 @@
 }
 
 template <typename Type>
+nn::GeneralResult<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+    if (version > kVersion) {
+        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
+    }
+    return {};
+}
+
+template <typename Type>
 auto convertFromNonCanonical(const Type& nonCanonicalObject)
         -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
     return convert(NN_TRY(nn::convert(nonCanonicalObject)));
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index 971ad08..e3a9757 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -20,6 +20,7 @@
 #include <nnapi/IBurst.h>
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 
 #include <memory>
@@ -48,8 +49,10 @@
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
-        const nn::Request& request, nn::MeasureTiming measure) const {
-    return kPreparedModel->execute(request, measure, {}, {});
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
+    return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
 }
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index 700b050..c0498eb 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -35,6 +35,8 @@
 #include <utility>
 #include <variant>
 
+#include "Utils.h"
+
 namespace {
 
 template <typename Type>
@@ -42,8 +44,6 @@
     return static_cast<std::underlying_type_t<Type>>(value);
 }
 
-constexpr auto kVersion = android::nn::Version::ANDROID_OC_MR1;
-
 }  // namespace
 
 namespace android::nn {
@@ -53,13 +53,13 @@
 using hardware::hidl_vec;
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
@@ -68,16 +68,9 @@
 }
 
 template <typename Type>
-decltype(nn::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& halObject) {
+GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
-    const auto maybeVersion = validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(hal::V1_0::utils::compliantVersion(canonical));
     return canonical;
 }
 
@@ -248,13 +241,13 @@
 namespace {
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(utils::unvalidatedConvert(arguments[i]));
     }
@@ -262,15 +255,8 @@
 }
 
 template <typename Type>
-decltype(utils::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& canonical) {
-    const auto maybeVersion = nn::validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return nn::error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+    NN_TRY(compliantVersion(canonical));
     return utils::unvalidatedConvert(canonical);
 }
 
diff --git a/neuralnetworks/1.0/utils/test/MockDevice.h b/neuralnetworks/1.0/utils/test/MockDevice.h
index 0fb59e3..7c399ec 100644
--- a/neuralnetworks/1.0/utils/test/MockDevice.h
+++ b/neuralnetworks/1.0/utils/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE_H
 
 #include <android/hardware/neuralnetworks/1.0/IDevice.h>
 #include <gmock/gmock.h>
@@ -83,4 +83,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/1.0/utils/test/MockPreparedModel.h b/neuralnetworks/1.0/utils/test/MockPreparedModel.h
index 7a48a83..03f1a4b 100644
--- a/neuralnetworks/1.0/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/1.0/utils/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL_H
 
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
 #include <gmock/gmock.h>
@@ -82,4 +82,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_TEST_MOCK_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
index a5cbc72..f19ed77 100644
--- a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
@@ -224,7 +224,19 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-// TODO: test burst execution if/when it is added to nn::IPreparedModel.
+TEST(PreparedModelTest, configureExecutionBurst) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_NE(result.value(), nullptr);
+}
 
 TEST(PreparedModelTest, getUnderlyingResource) {
     // setup test
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
index 09597a3..a8cf8cf 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
@@ -22,12 +22,16 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.1/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
+#include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_1::utils {
 
 constexpr auto kDefaultExecutionPreference = ExecutionPreference::FAST_SINGLE_ANSWER;
+constexpr auto kVersion = nn::Version::ANDROID_P;
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
@@ -48,6 +52,15 @@
 }
 
 template <typename Type>
+nn::GeneralResult<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+    if (version > kVersion) {
+        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
+    }
+    return {};
+}
+
+template <typename Type>
 auto convertFromNonCanonical(const Type& nonCanonicalObject)
         -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
     return convert(NN_TRY(nn::convert(nonCanonicalObject)));
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index d07f7d0..467ceb3 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -35,11 +35,7 @@
 #include <type_traits>
 #include <utility>
 
-namespace {
-
-constexpr auto kVersion = android::nn::Version::ANDROID_P;
-
-}  // namespace
+#include "Utils.h"
 
 namespace android::nn {
 namespace {
@@ -47,13 +43,13 @@
 using hardware::hidl_vec;
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
@@ -62,16 +58,9 @@
 }
 
 template <typename Type>
-decltype(nn::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& halObject) {
+GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
-    const auto maybeVersion = validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(hal::V1_1::utils::compliantVersion(canonical));
     return canonical;
 }
 
@@ -180,13 +169,13 @@
 }
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
     }
@@ -194,16 +183,9 @@
 }
 
 template <typename Type>
-decltype(utils::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& canonical) {
-    const auto maybeVersion = nn::validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return nn::error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
-    return utils::unvalidatedConvert(canonical);
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+    NN_TRY(compliantVersion(canonical));
+    return unvalidatedConvert(canonical);
 }
 
 }  // anonymous namespace
diff --git a/neuralnetworks/1.1/utils/test/MockDevice.h b/neuralnetworks/1.1/utils/test/MockDevice.h
index 3b92e58..db7392d 100644
--- a/neuralnetworks/1.1/utils/test/MockDevice.h
+++ b/neuralnetworks/1.1/utils/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE_H
 
 #include <android/hardware/neuralnetworks/1.1/IDevice.h>
 #include <gmock/gmock.h>
@@ -92,4 +92,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_1::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/1.1/utils/test/MockPreparedModel.h b/neuralnetworks/1.1/utils/test/MockPreparedModel.h
index aba731e..257397d 100644
--- a/neuralnetworks/1.1/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/1.1/utils/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL_H
 
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
 #include <gmock/gmock.h>
@@ -41,4 +41,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_0::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_1_UTILS_TEST_MOCK_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
index 6b6fc71..9669d8c0 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
@@ -57,7 +57,8 @@
   public:
     using FallbackFunction =
             std::function<nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>(
-                    const nn::Request&, nn::MeasureTiming)>;
+                    const nn::Request&, nn::MeasureTiming, const nn::OptionalTimePoint&,
+                    const nn::OptionalDuration&)>;
 
     /**
      * NN runtime memory cache.
@@ -168,7 +169,9 @@
 
     // See IBurst::execute for information on this method.
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
-            const nn::Request& request, nn::MeasureTiming measure) const override;
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
   private:
     mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 3233114..09691b6 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -22,19 +22,25 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
+#include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.1/Utils.h>
+#include <nnapi/hal/HandleError.h>
 
 #include <limits>
 
 namespace android::hardware::neuralnetworks::V1_2::utils {
 
 using CacheToken = hidl_array<uint8_t, static_cast<size_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using V1_1::utils::kDefaultExecutionPreference;
 
 constexpr auto kDefaultMesaureTiming = MeasureTiming::NO;
 constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits<uint64_t>::max(),
                                   .timeInDriver = std::numeric_limits<uint64_t>::max()};
+constexpr auto kVersion = nn::Version::ANDROID_Q;
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
@@ -55,6 +61,15 @@
 }
 
 template <typename Type>
+nn::GeneralResult<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+    if (version > kVersion) {
+        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
+    }
+    return {};
+}
+
+template <typename Type>
 auto convertFromNonCanonical(const Type& nonCanonicalObject)
         -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
     return convert(NN_TRY(nn::convert(nonCanonicalObject)));
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 2c45583..29945b7 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -37,6 +37,8 @@
 #include <type_traits>
 #include <utility>
 
+#include "Utils.h"
+
 namespace {
 
 template <typename Type>
@@ -45,50 +47,23 @@
 }
 
 using HalDuration = std::chrono::duration<uint64_t, std::micro>;
-constexpr auto kVersion = android::nn::Version::ANDROID_Q;
-constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max();
 
 }  // namespace
 
 namespace android::nn {
 namespace {
 
-constexpr bool validOperandType(OperandType operandType) {
-    switch (operandType) {
-        case OperandType::FLOAT32:
-        case OperandType::INT32:
-        case OperandType::UINT32:
-        case OperandType::TENSOR_FLOAT32:
-        case OperandType::TENSOR_INT32:
-        case OperandType::TENSOR_QUANT8_ASYMM:
-        case OperandType::BOOL:
-        case OperandType::TENSOR_QUANT16_SYMM:
-        case OperandType::TENSOR_FLOAT16:
-        case OperandType::TENSOR_BOOL8:
-        case OperandType::FLOAT16:
-        case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
-        case OperandType::TENSOR_QUANT16_ASYMM:
-        case OperandType::TENSOR_QUANT8_SYMM:
-        case OperandType::OEM:
-        case OperandType::TENSOR_OEM_BYTE:
-            return true;
-        default:
-            break;
-    }
-    return isExtension(operandType);
-}
-
 using hardware::hidl_handle;
 using hardware::hidl_vec;
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
@@ -97,29 +72,16 @@
 }
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
-        const hidl_vec<Type>& arguments) {
-    return unvalidatedConvertVec(arguments);
-}
-
-template <typename Type>
-decltype(nn::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& halObject) {
+GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
-    const auto maybeVersion = validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(hal::V1_2::utils::compliantVersion(canonical));
     return canonical;
 }
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> validatedConvert(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(validatedConvert(argument)));
@@ -145,8 +107,7 @@
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const hal::V1_2::Capabilities::OperandPerformance& operandPerformance) {
-                const auto maybeType = unvalidatedConvert(operandPerformance.type);
-                return !maybeType.has_value() ? false : validOperandType(maybeType.value());
+                return validatedConvert(operandPerformance.type).has_value();
             });
     if (!validOperandTypes) {
         return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
@@ -275,6 +236,7 @@
 GeneralResult<Timing> unvalidatedConvert(const hal::V1_2::Timing& timing) {
     constexpr uint64_t kMaxTiming = std::chrono::floor<HalDuration>(Duration::max()).count();
     constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration {
+        constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max();
         if (halTiming == kNoTiming) {
             return {};
         }
@@ -378,25 +340,19 @@
 }
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
     }
     return halObject;
 }
 
-template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
-        const std::vector<Type>& arguments) {
-    return unvalidatedConvertVec(arguments);
-}
-
 nn::GeneralResult<Operand::ExtraParams> makeExtraParams(nn::Operand::NoParams /*noParams*/) {
     return Operand::ExtraParams{};
 }
@@ -416,22 +372,15 @@
 }
 
 template <typename Type>
-decltype(utils::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& canonical) {
-    const auto maybeVersion = nn::validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return nn::error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
-    return utils::unvalidatedConvert(canonical);
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+    NN_TRY(compliantVersion(canonical));
+    return unvalidatedConvert(canonical);
 }
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> validatedConvert(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> validatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(validatedConvert(arguments[i]));
     }
@@ -469,7 +418,7 @@
                  capabilities.operandPerformance.asVector().end(),
                  std::back_inserter(operandPerformance),
                  [](const nn::Capabilities::OperandPerformance& operandPerformance) {
-                     return nn::validOperandType(operandPerformance.type);
+                     return compliantVersion(operandPerformance.type).has_value();
                  });
 
     return Capabilities{
@@ -570,6 +519,7 @@
 
 nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
     constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t {
+        constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max();
         if (!canonicalTiming.has_value()) {
             return kNoTiming;
         }
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index eedf591..7a17f25 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -276,7 +276,9 @@
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
-ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming measure) const {
+ExecutionBurstController::execute(const nn::Request& request, nn::MeasureTiming measure,
+                                  const nn::OptionalTimePoint& deadline,
+                                  const nn::OptionalDuration& loopTimeoutDuration) const {
     // This is the first point when we know an execution is occurring, so begin to collect
     // systraces. Note that the first point we can begin collecting systraces in
     // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
@@ -289,7 +291,7 @@
         version > nn::Version::ANDROID_Q) {
         // fallback to another execution path if the packet could not be sent
         if (kFallback) {
-            return kFallback(request, measure);
+            return kFallback(request, measure, deadline, loopTimeoutDuration);
         }
         return NN_ERROR() << "Request object has features not supported by IBurst::execute";
     }
@@ -323,7 +325,7 @@
     if (!sendStatus.ok()) {
         // fallback to another execution path if the packet could not be sent
         if (kFallback) {
-            return kFallback(request, measure);
+            return kFallback(request, measure, deadline, loopTimeoutDuration);
         }
         return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
     }
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
index 50af881..c67159e 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
@@ -259,7 +259,7 @@
     nn::MeasureTiming canonicalMeasure = NN_TRY(makeExecutionFailure(nn::convert(measure)));
 
     const auto [outputShapes, timing] =
-            NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure));
+            NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
 
     return std::make_pair(NN_TRY(makeExecutionFailure(convert(outputShapes))),
                           NN_TRY(makeExecutionFailure(convert(timing))));
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index 71a4ea8..b209a44 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -122,10 +122,12 @@
 
 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
     auto self = shared_from_this();
-    auto fallback = [preparedModel = std::move(self)](const nn::Request& request,
-                                                      nn::MeasureTiming measure)
+    auto fallback = [preparedModel = std::move(self)](
+                            const nn::Request& request, nn::MeasureTiming measure,
+                            const nn::OptionalTimePoint& deadline,
+                            const nn::OptionalDuration& loopTimeoutDuration)
             -> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
-        return preparedModel->execute(request, measure, {}, {});
+        return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
     };
     const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
     return ExecutionBurstController::create(kPreparedModel, std::move(fallback), pollingTimeWindow);
diff --git a/neuralnetworks/1.2/utils/test/MockBurstContext.h b/neuralnetworks/1.2/utils/test/MockBurstContext.h
new file mode 100644
index 0000000..e364178
--- /dev/null
+++ b/neuralnetworks/1.2/utils/test/MockBurstContext.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_BURST_CONTEXT_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_BURST_CONTEXT_H
+
+#include <android/hardware/neuralnetworks/1.2/IBurstContext.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/Status.h>
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+class MockBurstContext final : public IBurstContext {
+  public:
+    // V1_2 methods below.
+    MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override));
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_BURST_CONTEXT_H
diff --git a/neuralnetworks/1.2/utils/test/MockDevice.h b/neuralnetworks/1.2/utils/test/MockDevice.h
index b459943..0d34c70 100644
--- a/neuralnetworks/1.2/utils/test/MockDevice.h
+++ b/neuralnetworks/1.2/utils/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE_H
 
 #include <android/hardware/neuralnetworks/1.2/IDevice.h>
 #include <gmock/gmock.h>
@@ -114,4 +114,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/1.2/utils/test/MockPreparedModel.h b/neuralnetworks/1.2/utils/test/MockPreparedModel.h
index f5fd1f3..bd81712 100644
--- a/neuralnetworks/1.2/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/1.2/utils/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL_H
 
 #include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
 #include <gmock/gmock.h>
@@ -98,4 +98,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_2::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_TEST_MOCK_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
index 5062ac9..d297b1a 100644
--- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
@@ -16,6 +16,8 @@
 
 #include "MockPreparedModel.h"
 
+#include "MockBurstContext.h"
+
 #include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
@@ -67,6 +69,17 @@
         return launchStatus;
     };
 }
+auto makeConfigureExecutionBurstReturn(V1_0::ErrorStatus status,
+                                       const sp<MockBurstContext>& burstContext) {
+    return [status, burstContext](
+                   const sp<V1_2::IBurstCallback>& /*callback*/,
+                   const MQDescriptorSync<V1_2::FmqRequestDatum>& /*requestChannel*/,
+                   const MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
+                   V1_2::IPreparedModel::configureExecutionBurst_cb cb) -> hardware::Return<void> {
+        cb(status, burstContext);
+        return hardware::Void();
+    };
+}
 
 std::function<hardware::Status()> makeTransportFailure(status_t status) {
     return [status] { return hardware::Status::fromStatusT(status); };
@@ -321,7 +334,76 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
 }
 
-// TODO: test burst execution if/when it is added to nn::IPreparedModel.
+TEST(PreparedModelTest, configureExecutionBurst) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto mockBurstContext = sp<MockBurstContext>::make();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(makeConfigureExecutionBurstReturn(V1_0::ErrorStatus::NONE, mockBurstContext));
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_NE(result.value(), nullptr);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstError) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(
+                    makeConfigureExecutionBurstReturn(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstTransportFailure) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstDeadObject) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
 
 TEST(PreparedModelTest, getUnderlyingResource) {
     // setup test
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
index 3ce412c..1d76caa 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
@@ -22,14 +22,25 @@
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
+#include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.1/Utils.h>
 #include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.2/Utils.h>
+#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
+using V1_1::utils::kDefaultExecutionPreference;
+using V1_2::utils::CacheToken;
+using V1_2::utils::kDefaultMesaureTiming;
+using V1_2::utils::kNoTiming;
+
 constexpr auto kDefaultPriority = Priority::MEDIUM;
+constexpr auto kVersion = nn::Version::ANDROID_R;
 
 template <typename Type>
 nn::Result<void> validate(const Type& halObject) {
@@ -50,6 +61,15 @@
 }
 
 template <typename Type>
+nn::GeneralResult<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(canonical)));
+    if (version > kVersion) {
+        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
+    }
+    return {};
+}
+
+template <typename Type>
 auto convertFromNonCanonical(const Type& nonCanonicalObject)
         -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
     return convert(NN_TRY(nn::convert(nonCanonicalObject)));
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 9788fe1..8083ae4 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -38,6 +38,8 @@
 #include <type_traits>
 #include <utility>
 
+#include "Utils.h"
+
 namespace {
 
 template <typename Type>
@@ -45,48 +47,21 @@
     return static_cast<std::underlying_type_t<Type>>(value);
 }
 
-constexpr auto kVersion = android::nn::Version::ANDROID_R;
-
 }  // namespace
 
 namespace android::nn {
 namespace {
 
-constexpr auto validOperandType(nn::OperandType operandType) {
-    switch (operandType) {
-        case nn::OperandType::FLOAT32:
-        case nn::OperandType::INT32:
-        case nn::OperandType::UINT32:
-        case nn::OperandType::TENSOR_FLOAT32:
-        case nn::OperandType::TENSOR_INT32:
-        case nn::OperandType::TENSOR_QUANT8_ASYMM:
-        case nn::OperandType::BOOL:
-        case nn::OperandType::TENSOR_QUANT16_SYMM:
-        case nn::OperandType::TENSOR_FLOAT16:
-        case nn::OperandType::TENSOR_BOOL8:
-        case nn::OperandType::FLOAT16:
-        case nn::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
-        case nn::OperandType::TENSOR_QUANT16_ASYMM:
-        case nn::OperandType::TENSOR_QUANT8_SYMM:
-        case nn::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
-        case nn::OperandType::SUBGRAPH:
-        case nn::OperandType::OEM:
-        case nn::OperandType::TENSOR_OEM_BYTE:
-            return true;
-    }
-    return nn::isExtension(operandType);
-}
-
 using hardware::hidl_vec;
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
@@ -95,29 +70,16 @@
 }
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
-        const hidl_vec<Type>& arguments) {
-    return unvalidatedConvertVec(arguments);
-}
-
-template <typename Type>
-decltype(nn::unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& halObject) {
+GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
-    const auto maybeVersion = validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(hal::V1_3::utils::compliantVersion(canonical));
     return canonical;
 }
 
 template <typename Type>
-GeneralResult<std::vector<unvalidatedConvertOutput<Type>>> validatedConvert(
+GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
         const hidl_vec<Type>& arguments) {
-    std::vector<unvalidatedConvertOutput<Type>> canonical;
+    std::vector<UnvalidatedConvertOutput<Type>> canonical;
     canonical.reserve(arguments.size());
     for (const auto& argument : arguments) {
         canonical.push_back(NN_TRY(validatedConvert(argument)));
@@ -143,8 +105,7 @@
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
-                const auto maybeType = unvalidatedConvert(operandPerformance.type);
-                return !maybeType.has_value() ? false : validOperandType(maybeType.value());
+                return validatedConvert(operandPerformance.type).has_value();
             });
     if (!validOperandTypes) {
         return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
@@ -244,7 +205,7 @@
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
-            .frequency = bufferRole.frequency,
+            .probability = bufferRole.frequency,
     };
 }
 
@@ -401,25 +362,19 @@
 }
 
 template <typename Input>
-using unvalidatedConvertOutput =
+using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
     }
     return halObject;
 }
 
-template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> unvalidatedConvert(
-        const std::vector<Type>& arguments) {
-    return unvalidatedConvertVec(arguments);
-}
-
 nn::GeneralResult<Request::MemoryPool> makeMemoryPool(const nn::SharedMemory& memory) {
     Request::MemoryPool ret;
     ret.hidlMemory(NN_TRY(unvalidatedConvert(memory)));
@@ -439,22 +394,15 @@
 using utils::unvalidatedConvert;
 
 template <typename Type>
-decltype(unvalidatedConvert(std::declval<Type>())) validatedConvert(const Type& canonical) {
-    const auto maybeVersion = nn::validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return nn::error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+    NN_TRY(compliantVersion(canonical));
     return unvalidatedConvert(canonical);
 }
 
 template <typename Type>
-nn::GeneralResult<hidl_vec<unvalidatedConvertOutput<Type>>> validatedConvert(
+nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> validatedConvert(
         const std::vector<Type>& arguments) {
-    hidl_vec<unvalidatedConvertOutput<Type>> halObject(arguments.size());
+    hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
     for (size_t i = 0; i < arguments.size(); ++i) {
         halObject[i] = NN_TRY(validatedConvert(arguments[i]));
     }
@@ -482,7 +430,7 @@
                  capabilities.operandPerformance.asVector().end(),
                  std::back_inserter(operandPerformance),
                  [](const nn::Capabilities::OperandPerformance& operandPerformance) {
-                     return nn::validOperandType(operandPerformance.type);
+                     return compliantVersion(operandPerformance.type).has_value();
                  });
 
     return Capabilities{
@@ -577,7 +525,7 @@
     return BufferRole{
             .modelIndex = bufferRole.modelIndex,
             .ioIndex = bufferRole.ioIndex,
-            .frequency = bufferRole.frequency,
+            .frequency = bufferRole.probability,
     };
 }
 
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 64275a3..fd7f8f2 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -201,10 +201,12 @@
 
 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
     auto self = shared_from_this();
-    auto fallback = [preparedModel = std::move(self)](const nn::Request& request,
-                                                      nn::MeasureTiming measure)
+    auto fallback = [preparedModel = std::move(self)](
+                            const nn::Request& request, nn::MeasureTiming measure,
+                            const nn::OptionalTimePoint& deadline,
+                            const nn::OptionalDuration& loopTimeoutDuration)
             -> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
-        return preparedModel->execute(request, measure, {}, {});
+        return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
     };
     const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
     return V1_2::utils::ExecutionBurstController::create(kPreparedModel, std::move(fallback),
diff --git a/neuralnetworks/1.3/utils/test/MockBuffer.h b/neuralnetworks/1.3/utils/test/MockBuffer.h
index fb31b51..a67c5f6 100644
--- a/neuralnetworks/1.3/utils/test/MockBuffer.h
+++ b/neuralnetworks/1.3/utils/test/MockBuffer.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER_H
 
 #include <android/hardware/neuralnetworks/1.3/IBuffer.h>
 #include <gmock/gmock.h>
@@ -40,4 +40,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BUFFER_H
diff --git a/neuralnetworks/1.3/utils/test/MockBurstContext.h b/neuralnetworks/1.3/utils/test/MockBurstContext.h
new file mode 100644
index 0000000..e102b46
--- /dev/null
+++ b/neuralnetworks/1.3/utils/test/MockBurstContext.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BURST_CONTEXT_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BURST_CONTEXT_H
+
+#include <android/hardware/neuralnetworks/1.2/IBurstContext.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/Status.h>
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class MockBurstContext final : public V1_2::IBurstContext {
+  public:
+    // V1_2 methods below.
+    MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override));
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_BURST_CONTEXT_H
diff --git a/neuralnetworks/1.3/utils/test/MockDevice.h b/neuralnetworks/1.3/utils/test/MockDevice.h
index 85d3750..b79037f 100644
--- a/neuralnetworks/1.3/utils/test/MockDevice.h
+++ b/neuralnetworks/1.3/utils/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE_H
 
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
 #include <gmock/gmock.h>
@@ -136,4 +136,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/1.3/utils/test/MockFencedExecutionCallback.h b/neuralnetworks/1.3/utils/test/MockFencedExecutionCallback.h
index fc08a7f..04c0a92 100644
--- a/neuralnetworks/1.3/utils/test/MockFencedExecutionCallback.h
+++ b/neuralnetworks/1.3/utils/test/MockFencedExecutionCallback.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
 
 #include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
 #include <gmock/gmock.h>
@@ -39,4 +39,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
diff --git a/neuralnetworks/1.3/utils/test/MockPreparedModel.h b/neuralnetworks/1.3/utils/test/MockPreparedModel.h
index e441524..ef64fa4 100644
--- a/neuralnetworks/1.3/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/1.3/utils/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL_H
 
 #include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
 #include <gmock/gmock.h>
@@ -118,4 +118,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_TEST_MOCK_PREPARED_MODEL_H
diff --git a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
index 11796dd..5303c2a 100644
--- a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "MockBurstContext.h"
 #include "MockFencedExecutionCallback.h"
 #include "MockPreparedModel.h"
 
@@ -96,6 +97,17 @@
         return hardware::Void();
     };
 }
+auto makeConfigureExecutionBurstReturn(V1_0::ErrorStatus status,
+                                       const sp<MockBurstContext>& burstContext) {
+    return [status, burstContext](
+                   const sp<V1_2::IBurstCallback>& /*callback*/,
+                   const MQDescriptorSync<V1_2::FmqRequestDatum>& /*requestChannel*/,
+                   const MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/,
+                   V1_2::IPreparedModel::configureExecutionBurst_cb cb) -> hardware::Return<void> {
+        cb(status, burstContext);
+        return hardware::Void();
+    };
+}
 
 std::function<hardware::Status()> makeTransportFailure(status_t status) {
     return [status] { return hardware::Status::fromStatusT(status); };
@@ -450,7 +462,76 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-// TODO: test burst execution if/when it is added to nn::IPreparedModel.
+TEST(PreparedModelTest, configureExecutionBurst) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto mockBurstContext = sp<MockBurstContext>::make();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(makeConfigureExecutionBurstReturn(V1_0::ErrorStatus::NONE, mockBurstContext));
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_NE(result.value(), nullptr);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstError) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(
+                    makeConfigureExecutionBurstReturn(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstTransportFailure) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstDeadObject) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto preparedModel =
+            PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_, _, _, _))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
 
 TEST(PreparedModelTest, getUnderlyingResource) {
     // setup test
diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/BufferRole.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/BufferRole.aidl
index f18e92a..10a6b75 100644
--- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/BufferRole.aidl
+++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/BufferRole.aidl
@@ -36,5 +36,5 @@
 parcelable BufferRole {
   int modelIndex;
   int ioIndex;
-  float frequency;
+  float probability;
 }
diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/BufferRole.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/BufferRole.aidl
index 0d7f678..c444851 100644
--- a/neuralnetworks/aidl/android/hardware/neuralnetworks/BufferRole.aidl
+++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/BufferRole.aidl
@@ -35,5 +35,5 @@
      * used in the specified role. This is provided as a hint to optimize the case when multiple
      * roles prefer different buffer locations or data layouts.
      */
-    float frequency;
+    float probability;
 }
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index 476dac9..ad961cf 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -34,7 +34,6 @@
         "libarect",
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
-        "neuralnetworks_utils_hal_1_0",
     ],
     shared_libs: [
         "android.hardware.neuralnetworks-V1-ndk_platform",
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
new file mode 100644
index 0000000..008e4e4
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/IBurst.h>
+#include <android-base/scopeguard.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+#include <utility>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on AIDL interface
+// lifetimes across processes and for protecting asynchronous calls across AIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+// Class that adapts aidl_hal::IBurst to nn::IBurst.
+class Burst final : public nn::IBurst {
+    struct PrivateConstructorTag {};
+
+  public:
+    /**
+     * Thread-safe, self-cleaning cache that relates an nn::Memory object to a unique int64_t
+     * identifier.
+     */
+    class MemoryCache : public std::enable_shared_from_this<MemoryCache> {
+      public:
+        using Task = std::function<void()>;
+        using Cleanup = ::android::base::ScopeGuard<Task>;
+        using SharedCleanup = std::shared_ptr<const Cleanup>;
+        using WeakCleanup = std::weak_ptr<const Cleanup>;
+
+        explicit MemoryCache(std::shared_ptr<aidl_hal::IBurst> burst);
+
+        /**
+         * Get or cache a memory object in the MemoryCache object.
+         *
+         * @param memory Memory object to be cached while the returned `SharedCleanup` is alive.
+         * @return A pair of (1) a unique identifier for the cache entry and (2) a ref-counted
+         *     "hold" object which preserves the cache as long as the hold object is alive.
+         */
+        std::pair<int64_t, SharedCleanup> getOrCacheMemory(const nn::SharedMemory& memory);
+
+        /**
+         * Get a cached memory object in the MemoryCache object if it exists, otherwise
+         * std::nullopt.
+         *
+         * @param memory Memory object to be cached while the returned `SharedCleanup` is alive.
+         * @return A pair of (1) a unique identifier for the cache entry and (2) a ref-counted
+         *     "hold" object which preserves the cache as long as the hold object is alive. IF the
+         *     cache entry is not present, std::nullopt is returned instead.
+         */
+        std::optional<std::pair<int64_t, SharedCleanup>> getMemoryIfAvailable(
+                const nn::SharedMemory& memory);
+
+      private:
+        void tryFreeMemory(const nn::SharedMemory& memory, int64_t identifier);
+
+        const std::shared_ptr<aidl_hal::IBurst> kBurst;
+        std::mutex mMutex;
+        int64_t mUnusedIdentifier GUARDED_BY(mMutex) = 0;
+        std::unordered_map<nn::SharedMemory, std::pair<int64_t, WeakCleanup>> mCache
+                GUARDED_BY(mMutex);
+    };
+
+    static nn::GeneralResult<std::shared_ptr<const Burst>> create(
+            std::shared_ptr<aidl_hal::IBurst> burst);
+
+    Burst(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IBurst> burst);
+
+    // See IBurst::cacheMemory for information.
+    OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
+
+    // See IBurst::execute for information.
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+  private:
+    mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
+    const std::shared_ptr<aidl_hal::IBurst> kBurst;
+    const std::shared_ptr<MemoryCache> kMemoryCache;
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_BURST_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 4922a6e..5eab9ff 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -99,6 +99,9 @@
         const ::aidl::android::hardware::common::NativeHandle& handle);
 GeneralResult<SyncFence> unvalidatedConvert(const ndk::ScopedFileDescriptor& syncFence);
 
+GeneralResult<std::vector<Operation>> unvalidatedConvert(
+        const std::vector<aidl_hal::Operation>& operations);
+
 GeneralResult<Capabilities> convert(const aidl_hal::Capabilities& capabilities);
 GeneralResult<DeviceType> convert(const aidl_hal::DeviceType& deviceType);
 GeneralResult<ErrorStatus> convert(const aidl_hal::ErrorStatus& errorStatus);
@@ -106,16 +109,13 @@
         const aidl_hal::ExecutionPreference& executionPreference);
 GeneralResult<SharedMemory> convert(const aidl_hal::Memory& memory);
 GeneralResult<Model> convert(const aidl_hal::Model& model);
-GeneralResult<Operand> convert(const aidl_hal::Operand& operand);
 GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType);
 GeneralResult<Priority> convert(const aidl_hal::Priority& priority);
-GeneralResult<Request::MemoryPool> convert(const aidl_hal::RequestMemoryPool& memoryPool);
 GeneralResult<Request> convert(const aidl_hal::Request& request);
 GeneralResult<Timing> convert(const aidl_hal::Timing& timing);
 GeneralResult<SyncFence> convert(const ndk::ScopedFileDescriptor& syncFence);
 
 GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
-GeneralResult<std::vector<Operation>> convert(const std::vector<aidl_hal::Operation>& outputShapes);
 GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
 GeneralResult<std::vector<OutputShape>> convert(
         const std::vector<aidl_hal::OutputShape>& outputShapes);
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 9b28588..abce6cc 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -22,7 +22,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/aidl/ProtectCallback.h>
 
 #include <memory>
 #include <tuple>
@@ -35,8 +34,7 @@
 namespace aidl::android::hardware::neuralnetworks::utils {
 
 // Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel.
-class PreparedModel final : public nn::IPreparedModel,
-                            public std::enable_shared_from_this<PreparedModel> {
+class PreparedModel final : public nn::IPreparedModel {
     struct PrivateConstructorTag {};
 
   public:
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index 58dcfe3..316d34f 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -21,6 +21,7 @@
 
 #include <android-base/logging.h>
 #include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/HandleError.h>
@@ -48,6 +49,22 @@
     return result.has_value();
 }
 
+template <typename Type>
+nn::GeneralResult<void> compliantVersion(const Type& canonical) {
+    const auto version = NN_TRY(::android::hardware::neuralnetworks::utils::makeGeneralFailure(
+            nn::validate(canonical)));
+    if (version > kVersion) {
+        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
+    }
+    return {};
+}
+
+template <typename Type>
+auto convertFromNonCanonical(const Type& nonCanonicalObject)
+        -> decltype(convert(nn::convert(nonCanonicalObject).value())) {
+    return convert(NN_TRY(nn::convert(nonCanonicalObject)));
+}
+
 nn::GeneralResult<Memory> clone(const Memory& memory);
 nn::GeneralResult<Request> clone(const Request& request);
 nn::GeneralResult<RequestMemoryPool> clone(const RequestMemoryPool& requestPool);
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
new file mode 100644
index 0000000..0b475bc
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Burst.h"
+
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <android/binder_auto_utils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <utility>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+namespace {
+
+nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
+        const std::vector<OutputShape>& outputShapes, const Timing& timing) {
+    return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
+}
+
+}  // namespace
+
+Burst::MemoryCache::MemoryCache(std::shared_ptr<aidl_hal::IBurst> burst)
+    : kBurst(std::move(burst)) {}
+
+std::pair<int64_t, Burst::MemoryCache::SharedCleanup> Burst::MemoryCache::getOrCacheMemory(
+        const nn::SharedMemory& memory) {
+    std::lock_guard lock(mMutex);
+
+    // Get the cache payload or create it (with default values) if it does not exist.
+    auto& cachedPayload = mCache[memory];
+    {
+        const auto& [identifier, maybeCleaner] = cachedPayload;
+        // If cache payload already exists, reuse it.
+        if (auto cleaner = maybeCleaner.lock()) {
+            return std::make_pair(identifier, std::move(cleaner));
+        }
+    }
+
+    // If the code reaches this point, the cached payload either did not exist or expired prior to
+    // this call.
+
+    // Allocate a new identifier.
+    CHECK_LT(mUnusedIdentifier, std::numeric_limits<int64_t>::max());
+    const int64_t identifier = mUnusedIdentifier++;
+
+    // Create reference-counted self-cleaning cache object.
+    auto self = weak_from_this();
+    Task cleanup = [memory, identifier, maybeMemoryCache = std::move(self)] {
+        if (const auto memoryCache = maybeMemoryCache.lock()) {
+            memoryCache->tryFreeMemory(memory, identifier);
+        }
+    };
+    auto cleaner = std::make_shared<const Cleanup>(std::move(cleanup));
+
+    // Store the result in the cache and return it.
+    auto result = std::make_pair(identifier, std::move(cleaner));
+    cachedPayload = result;
+    return result;
+}
+
+std::optional<std::pair<int64_t, Burst::MemoryCache::SharedCleanup>>
+Burst::MemoryCache::getMemoryIfAvailable(const nn::SharedMemory& memory) {
+    std::lock_guard lock(mMutex);
+
+    // Get the existing cached entry if it exists.
+    const auto iter = mCache.find(memory);
+    if (iter != mCache.end()) {
+        const auto& [identifier, maybeCleaner] = iter->second;
+        if (auto cleaner = maybeCleaner.lock()) {
+            return std::make_pair(identifier, std::move(cleaner));
+        }
+    }
+
+    // If the code reaches this point, the cached payload did not exist or was actively being
+    // deleted.
+    return std::nullopt;
+}
+
+void Burst::MemoryCache::tryFreeMemory(const nn::SharedMemory& memory, int64_t identifier) {
+    {
+        std::lock_guard guard(mMutex);
+        // Remove the cached memory and payload if it is present but expired. Note that it may not
+        // be present or may not be expired because another thread may have removed or cached the
+        // same memory object before the current thread locked mMutex in tryFreeMemory.
+        const auto iter = mCache.find(memory);
+        if (iter != mCache.end()) {
+            if (std::get<WeakCleanup>(iter->second).expired()) {
+                mCache.erase(iter);
+            }
+        }
+    }
+    kBurst->releaseMemoryResource(identifier);
+}
+
+nn::GeneralResult<std::shared_ptr<const Burst>> Burst::create(
+        std::shared_ptr<aidl_hal::IBurst> burst) {
+    if (burst == nullptr) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+               << "aidl_hal::utils::Burst::create must have non-null burst";
+    }
+
+    return std::make_shared<const Burst>(PrivateConstructorTag{}, std::move(burst));
+}
+
+Burst::Burst(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IBurst> burst)
+    : kBurst(std::move(burst)), kMemoryCache(std::make_shared<MemoryCache>(kBurst)) {
+    CHECK(kBurst != nullptr);
+}
+
+Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) const {
+    auto [identifier, hold] = kMemoryCache->getOrCacheMemory(memory);
+    return hold;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute(
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
+    // Ensure that at most one execution is in flight at any given time.
+    const bool alreadyInFlight = mExecutionInFlight.test_and_set();
+    if (alreadyInFlight) {
+        return NN_ERROR() << "IBurst already has an execution in flight";
+    }
+    const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
+
+    // Ensure that request is ready for IPC.
+    std::optional<nn::Request> maybeRequestInShared;
+    const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+
+    const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+    const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+    const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+    const auto aidlLoopTimeoutDuration =
+            NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+
+    std::vector<int64_t> memoryIdentifierTokens;
+    std::vector<OptionalCacheHold> holds;
+    memoryIdentifierTokens.reserve(request.pools.size());
+    holds.reserve(request.pools.size());
+    for (const auto& memoryPool : request.pools) {
+        if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
+            if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
+                auto& [identifier, hold] = *cached;
+                memoryIdentifierTokens.push_back(identifier);
+                holds.push_back(std::move(hold));
+                continue;
+            }
+        }
+        memoryIdentifierTokens.push_back(-1);
+    }
+    CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
+
+    ExecutionResult executionResult;
+    const auto ret =
+            kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
+                                         aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+    HANDLE_ASTATUS(ret) << "execute failed";
+    if (!executionResult.outputSufficientSize) {
+        auto canonicalOutputShapes =
+                nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
+        return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+               << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+    }
+    auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
+            convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
+
+    NN_TRY(hal::utils::makeExecutionFailure(
+            hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
+
+    return std::make_pair(std::move(outputShapes), timing);
+}
+
+}  // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index c47ba0e..c74c509 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -41,6 +41,8 @@
 #include <type_traits>
 #include <utility>
 
+#include "Utils.h"
+
 #define VERIFY_NON_NEGATIVE(value) \
     while (UNLIKELY(value < 0)) return NN_ERROR()
 
@@ -53,7 +55,6 @@
     return static_cast<std::underlying_type_t<Type>>(value);
 }
 
-constexpr auto kVersion = android::nn::Version::ANDROID_S;
 constexpr int64_t kNoTiming = -1;
 
 }  // namespace
@@ -63,32 +64,6 @@
 
 using ::aidl::android::hardware::common::NativeHandle;
 
-constexpr auto validOperandType(nn::OperandType operandType) {
-    switch (operandType) {
-        case nn::OperandType::FLOAT32:
-        case nn::OperandType::INT32:
-        case nn::OperandType::UINT32:
-        case nn::OperandType::TENSOR_FLOAT32:
-        case nn::OperandType::TENSOR_INT32:
-        case nn::OperandType::TENSOR_QUANT8_ASYMM:
-        case nn::OperandType::BOOL:
-        case nn::OperandType::TENSOR_QUANT16_SYMM:
-        case nn::OperandType::TENSOR_FLOAT16:
-        case nn::OperandType::TENSOR_BOOL8:
-        case nn::OperandType::FLOAT16:
-        case nn::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
-        case nn::OperandType::TENSOR_QUANT16_ASYMM:
-        case nn::OperandType::TENSOR_QUANT8_SYMM:
-        case nn::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
-        case nn::OperandType::SUBGRAPH:
-            return true;
-        case nn::OperandType::OEM:
-        case nn::OperandType::TENSOR_OEM_BYTE:
-            return false;
-    }
-    return nn::isExtension(operandType);
-}
-
 template <typename Input>
 using UnvalidatedConvertOutput =
         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
@@ -113,14 +88,7 @@
 template <typename Type>
 GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
-    const auto maybeVersion = validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(aidl_hal::utils::compliantVersion(canonical));
     return canonical;
 }
 
@@ -185,13 +153,21 @@
 
 GeneralResult<OperandType> unvalidatedConvert(const aidl_hal::OperandType& operandType) {
     VERIFY_NON_NEGATIVE(underlyingType(operandType)) << "Negative operand types are not allowed.";
-    return static_cast<OperandType>(operandType);
+    const auto canonical = static_cast<OperandType>(operandType);
+    if (canonical == OperandType::OEM || canonical == OperandType::TENSOR_OEM_BYTE) {
+        return NN_ERROR() << "Unable to convert invalid OperandType " << canonical;
+    }
+    return canonical;
 }
 
 GeneralResult<OperationType> unvalidatedConvert(const aidl_hal::OperationType& operationType) {
     VERIFY_NON_NEGATIVE(underlyingType(operationType))
             << "Negative operation types are not allowed.";
-    return static_cast<OperationType>(operationType);
+    const auto canonical = static_cast<OperationType>(operationType);
+    if (canonical == OperationType::OEM_OPERATION) {
+        return NN_ERROR() << "Unable to convert invalid OperationType OEM_OPERATION";
+    }
+    return canonical;
 }
 
 GeneralResult<DeviceType> unvalidatedConvert(const aidl_hal::DeviceType& deviceType) {
@@ -206,8 +182,7 @@
     const bool validOperandTypes = std::all_of(
             capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
             [](const aidl_hal::OperandPerformance& operandPerformance) {
-                const auto maybeType = unvalidatedConvert(operandPerformance.type);
-                return !maybeType.has_value() ? false : validOperandType(maybeType.value());
+                return validatedConvert(operandPerformance.type).has_value();
             });
     if (!validOperandTypes) {
         return NN_ERROR() << "Invalid OperandType when unvalidatedConverting OperandPerformance in "
@@ -472,7 +447,7 @@
     return BufferRole{
             .modelIndex = static_cast<uint32_t>(bufferRole.modelIndex),
             .ioIndex = static_cast<uint32_t>(bufferRole.ioIndex),
-            .frequency = bufferRole.frequency,
+            .probability = bufferRole.probability,
     };
 }
 
@@ -534,6 +509,11 @@
     return std::make_shared<const Handle>(NN_TRY(unvalidatedConvertHelper(aidlNativeHandle)));
 }
 
+GeneralResult<std::vector<Operation>> unvalidatedConvert(
+        const std::vector<aidl_hal::Operation>& operations) {
+    return unvalidatedConvertVec(operations);
+}
+
 GeneralResult<SyncFence> unvalidatedConvert(const ndk::ScopedFileDescriptor& syncFence) {
     auto duplicatedFd = NN_TRY(dupFd(syncFence.get()));
     return SyncFence::create(std::move(duplicatedFd));
@@ -564,22 +544,14 @@
     return validatedConvert(model);
 }
 
-GeneralResult<Operand> convert(const aidl_hal::Operand& operand) {
-    return unvalidatedConvert(operand);
-}
-
 GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType) {
-    return unvalidatedConvert(operandType);
+    return validatedConvert(operandType);
 }
 
 GeneralResult<Priority> convert(const aidl_hal::Priority& priority) {
     return validatedConvert(priority);
 }
 
-GeneralResult<Request::MemoryPool> convert(const aidl_hal::RequestMemoryPool& memoryPool) {
-    return unvalidatedConvert(memoryPool);
-}
-
 GeneralResult<Request> convert(const aidl_hal::Request& request) {
     return validatedConvert(request);
 }
@@ -589,17 +561,13 @@
 }
 
 GeneralResult<SyncFence> convert(const ndk::ScopedFileDescriptor& syncFence) {
-    return unvalidatedConvert(syncFence);
+    return validatedConvert(syncFence);
 }
 
 GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension) {
     return validatedConvert(extension);
 }
 
-GeneralResult<std::vector<Operation>> convert(const std::vector<aidl_hal::Operation>& operations) {
-    return unvalidatedConvert(operations);
-}
-
 GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories) {
     return validatedConvert(memories);
 }
@@ -644,14 +612,7 @@
 
 template <typename Type>
 nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
-    const auto maybeVersion = nn::validate(canonical);
-    if (!maybeVersion.has_value()) {
-        return nn::error() << maybeVersion.error();
-    }
-    const auto version = maybeVersion.value();
-    if (version > kVersion) {
-        return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion;
-    }
+    NN_TRY(compliantVersion(canonical));
     return utils::unvalidatedConvert(canonical);
 }
 
@@ -718,7 +679,7 @@
     return BufferRole{
             .modelIndex = static_cast<int32_t>(bufferRole.modelIndex),
             .ioIndex = static_cast<int32_t>(bufferRole.ioIndex),
-            .frequency = bufferRole.frequency,
+            .probability = bufferRole.probability,
     };
 }
 
@@ -797,6 +758,9 @@
 }
 
 nn::GeneralResult<OperandType> unvalidatedConvert(const nn::OperandType& operandType) {
+    if (operandType == nn::OperandType::OEM || operandType == nn::OperandType::TENSOR_OEM_BYTE) {
+        return NN_ERROR() << "Unable to convert invalid OperandType " << operandType;
+    }
     return static_cast<OperandType>(operandType);
 }
 
@@ -864,6 +828,9 @@
 }
 
 nn::GeneralResult<OperationType> unvalidatedConvert(const nn::OperationType& operationType) {
+    if (operationType == nn::OperationType::OEM_OPERATION) {
+        return NN_ERROR() << "Unable to convert invalid OperationType OEM_OPERATION";
+    }
     return static_cast<OperationType>(operationType);
 }
 
@@ -1004,7 +971,7 @@
 }
 
 nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
-    return unvalidatedConvert(cacheToken);
+    return validatedConvert(cacheToken);
 }
 
 nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
@@ -1076,7 +1043,7 @@
 
 nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
         const std::vector<nn::SyncFence>& syncFences) {
-    return unvalidatedConvert(syncFences);
+    return validatedConvert(syncFences);
 }
 
 nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index aee4d90..003965b 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -16,9 +16,9 @@
 
 #include "PreparedModel.h"
 
+#include "Burst.h"
 #include "Callbacks.h"
 #include "Conversions.h"
-#include "ProtectCallback.h"
 #include "Utils.h"
 
 #include <android/binder_auto_utils.h>
@@ -26,7 +26,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
-#include <nnapi/hal/1.0/Burst.h>
 #include <nnapi/hal/CommonUtils.h>
 #include <nnapi/hal/HandleError.h>
 
@@ -161,7 +160,10 @@
 }
 
 nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
-    return hal::V1_0::utils::Burst::create(shared_from_this());
+    std::shared_ptr<IBurst> burst;
+    const auto ret = kPreparedModel->configureExecutionBurst(&burst);
+    HANDLE_ASTATUS(ret) << "configureExecutionBurst failed";
+    return Burst::create(std::move(burst));
 }
 
 std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/aidl/utils/test/MockBuffer.h b/neuralnetworks/aidl/utils/test/MockBuffer.h
index 5746176..f77fa86 100644
--- a/neuralnetworks/aidl/utils/test/MockBuffer.h
+++ b/neuralnetworks/aidl/utils/test/MockBuffer.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER_H
 
 #include <aidl/android/hardware/neuralnetworks/BnBuffer.h>
 #include <android/binder_interface_utils.h>
@@ -40,4 +40,4 @@
 
 }  // namespace aidl::android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BUFFER_H
diff --git a/neuralnetworks/aidl/utils/test/MockBurst.h b/neuralnetworks/aidl/utils/test/MockBurst.h
new file mode 100644
index 0000000..5083bbd
--- /dev/null
+++ b/neuralnetworks/aidl/utils/test/MockBurst.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
+
+#include <aidl/android/hardware/neuralnetworks/BnBurst.h>
+#include <android/binder_interface_utils.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/Status.h>
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class MockBurst final : public BnBurst {
+  public:
+    MOCK_METHOD(ndk::ScopedAStatus, executeSynchronously,
+                (const Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+                 bool measureTiming, int64_t deadline, int64_t loopTimeoutDuration,
+                 ExecutionResult* executionResult),
+                (override));
+    MOCK_METHOD(ndk::ScopedAStatus, releaseMemoryResource, (int64_t memoryIdentifierToken),
+                (override));
+};
+
+}  // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_BURST_H
diff --git a/neuralnetworks/aidl/utils/test/MockDevice.h b/neuralnetworks/aidl/utils/test/MockDevice.h
index 9b35bf8..3a28d55 100644
--- a/neuralnetworks/aidl/utils/test/MockDevice.h
+++ b/neuralnetworks/aidl/utils/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE_H
 
 #include <aidl/android/hardware/neuralnetworks/BnDevice.h>
 #include <android/binder_auto_utils.h>
@@ -64,4 +64,4 @@
 
 }  // namespace aidl::android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h b/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
index 463e1c9..06f9ea2 100644
--- a/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
+++ b/neuralnetworks/aidl/utils/test/MockFencedExecutionCallback.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
 
 #include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h>
 #include <android/binder_auto_utils.h>
@@ -42,4 +42,4 @@
 
 }  // namespace aidl::android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_FENCED_EXECUTION_CALLBACK_H
diff --git a/neuralnetworks/aidl/utils/test/MockPreparedModel.h b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
index 36e0ec3..a4ae2b7 100644
--- a/neuralnetworks/aidl/utils/test/MockPreparedModel.h
+++ b/neuralnetworks/aidl/utils/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL_H
 
 #include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h>
 #include <android/binder_interface_utils.h>
@@ -49,4 +49,4 @@
 
 }  // namespace aidl::android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_TEST_MOCK_PREPARED_MODEL_H
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index 7e28861..630a460 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "MockBurst.h"
 #include "MockFencedExecutionCallback.h"
 #include "MockPreparedModel.h"
 
@@ -252,7 +253,71 @@
     EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
 }
 
-// TODO: test burst execution if/when it is added to nn::IPreparedModel.
+TEST(PreparedModelTest, configureExecutionBurst) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    const auto mockBurst = ndk::SharedRefBase::make<MockBurst>();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+            .Times(1)
+            .WillOnce(DoAll(SetArgPointee<0>(mockBurst), Invoke(makeStatusOk)));
+    const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_TRUE(result.has_value())
+            << "Failed with " << result.error().code << ": " << result.error().message;
+    EXPECT_NE(result.value(), nullptr);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstError) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+    const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstTransportFailure) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+    const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, configureExecutionBurstDeadObject) {
+    // setup test
+    const auto mockPreparedModel = MockPreparedModel::create();
+    EXPECT_CALL(*mockPreparedModel, configureExecutionBurst(_))
+            .Times(1)
+            .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+    const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+    // run test
+    const auto result = preparedModel->configureExecutionBurst();
+
+    // verify result
+    ASSERT_FALSE(result.has_value());
+    EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
 
 TEST(PreparedModelTest, getUnderlyingResource) {
     // setup test
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 2dd02dd..1440429 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -102,7 +102,7 @@
         ASSERT_NE(result, nullptr);
 
         // Prepare arguments.
-        BufferRole role = {.modelIndex = 0, .ioIndex = index, .frequency = 1.0f};
+        BufferRole role = {.modelIndex = 0, .ioIndex = index, .probability = 1.0f};
         std::vector<BufferRole> inputRoles, outputRoles;
         if constexpr (ioType == IOType::INPUT) {
             inputRoles = {role};
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index 627c26a..596f8ae 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -337,18 +337,18 @@
                               const std::shared_ptr<IPreparedModel>& model2) {
         validateAllocate({
                 .preparedModels = {model1, model2},
-                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                               {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                               {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
         });
         validateAllocate({
                 .preparedModels = {model1, model2},
-                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
-                .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
+                .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
         });
         validateAllocate({
                 .preparedModels = {model1, model2},
-                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                                {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                                {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
         });
     }
 };
@@ -370,13 +370,13 @@
     // Test with nullptr prepared model as input role.
     validateAllocate({
             .preparedModels = {nullptr},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 
     // Test with nullptr prepared model as output role.
     validateAllocate({
             .preparedModels = {nullptr},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -387,13 +387,13 @@
     // Test with invalid prepared model as input role.
     validateAllocate({
             .preparedModels = {invalidPreparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 
     // Test with invalid prepared model as output role.
     validateAllocate({
             .preparedModels = {invalidPreparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -404,13 +404,13 @@
     // This should fail, because the model index is out of bound.
     validateAllocate({
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
     });
 
     // This should fail, because the model index is out of bound.
     validateAllocate({
             .preparedModels = {preparedModel},
-            .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -421,30 +421,30 @@
     // This should fail, because the model only has one input.
     validateAllocate({
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
     });
 
     // This should fail, because the model only has one output.
     validateAllocate({
             .preparedModels = {preparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .probability = 1.0f}},
     });
 }
 
-TEST_P(MemoryDomainAllocateTest, InvalidFrequency) {
+TEST_P(MemoryDomainAllocateTest, InvalidProbability) {
     auto preparedModel = createConvPreparedModel(kTestOperand);
     if (preparedModel == nullptr) return;
 
     for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
-        // Test with invalid frequency for input roles.
+        // Test with invalid probability for input roles.
         validateAllocate({
                 .preparedModels = {preparedModel},
-                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
         });
-        // Test with invalid frequency for output roles.
+        // Test with invalid probability for output roles.
         validateAllocate({
                 .preparedModels = {preparedModel},
-                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
+                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = invalidFreq}},
         });
     }
 }
@@ -456,25 +456,25 @@
     // Same role with same model index.
     validateAllocate({
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                           {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                           {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
     validateAllocate({
             .preparedModels = {preparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                            {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                            {.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 
     // Different model indexes, but logically referring to the same role.
     validateAllocate({
             .preparedModels = {preparedModel, preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                           {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                           {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
     });
     validateAllocate({
             .preparedModels = {preparedModel, preparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
-                            {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f},
+                            {.modelIndex = 1, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -553,12 +553,12 @@
     validateAllocate({
             .dimensions = badDimensions,
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
     validateAllocate({
             .dimensions = badDimensions,
             .preparedModels = {preparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -572,12 +572,12 @@
     validateAllocate({
             .dimensions = badDimensions,
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
     validateAllocate({
             .dimensions = badDimensions,
             .preparedModels = {preparedModel},
-            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .probability = 1.0f}},
     });
 }
 
@@ -590,7 +590,7 @@
     validateAllocate({
             .dimensions = {1},
             .preparedModels = {preparedModel},
-            .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .frequency = 1.0f}},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .probability = 1.0f}},
     });
 }
 
@@ -624,7 +624,7 @@
 
         std::vector<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
         auto trans = [](int32_t ind) -> BufferRole {
-            return {.modelIndex = 0, .ioIndex = ind, .frequency = 1.0f};
+            return {.modelIndex = 0, .ioIndex = ind, .probability = 1.0f};
         };
         std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
         std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index 6d84e1e..94d3daf 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -1312,7 +1312,7 @@
 void validateModel(const std::shared_ptr<IDevice>& device, const Model& model) {
     const auto numberOfConsumers =
             nn::countNumberOfConsumers(model.main.operands.size(),
-                                       nn::convert(model.main.operations).value())
+                                       nn::unvalidatedConvert(model.main.operations).value())
                     .value();
     mutateExecutionOrderTest(device, model, numberOfConsumers);
     mutateOperandTypeTest(device, model);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index 996858c..17b3fd9 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -32,7 +32,9 @@
     OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
-            const nn::Request& request, nn::MeasureTiming measure) const override;
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 };
 
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index 3b87330..c92cc41 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -47,7 +47,9 @@
     OptionalCacheHold cacheMemory(const nn::SharedMemory& memory) const override;
 
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
-            const nn::Request& request, nn::MeasureTiming measure) const override;
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
   private:
     const Factory kMakeBurst;
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 81ca18d..0c34f05 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -32,7 +32,9 @@
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute(
-        const nn::Request& /*request*/, nn::MeasureTiming /*measure*/) const {
+        const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+        const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     return NN_ERROR() << "InvalidBurst";
 }
 
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 5ca868b..38ccc62 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -100,9 +100,11 @@
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute(
-        const nn::Request& request, nn::MeasureTiming measure) const {
-    const auto fn = [&request, measure](const nn::IBurst& burst) {
-        return burst.execute(request, measure);
+        const nn::Request& request, nn::MeasureTiming measure,
+        const nn::OptionalTimePoint& deadline,
+        const nn::OptionalDuration& loopTimeoutDuration) const {
+    const auto fn = [&request, measure, deadline, loopTimeoutDuration](const nn::IBurst& burst) {
+        return burst.execute(request, measure, deadline, loopTimeoutDuration);
     };
     return protect(*this, fn);
 }
diff --git a/neuralnetworks/utils/common/test/MockBuffer.h b/neuralnetworks/utils/common/test/MockBuffer.h
index 59d5700..3599d0c 100644
--- a/neuralnetworks/utils/common/test/MockBuffer.h
+++ b/neuralnetworks/utils/common/test/MockBuffer.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER_H
 
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
@@ -34,4 +34,4 @@
 
 }  // namespace android::nn
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_BUFFER_H
diff --git a/neuralnetworks/utils/common/test/MockDevice.h b/neuralnetworks/utils/common/test/MockDevice.h
index 5566968..b274716 100644
--- a/neuralnetworks/utils/common/test/MockDevice.h
+++ b/neuralnetworks/utils/common/test/MockDevice.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE_H
 
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
@@ -55,4 +55,4 @@
 
 }  // namespace android::nn
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_DEVICE_H
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index 418af61..c004861 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL_H
 
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
@@ -41,4 +41,4 @@
 
 }  // namespace android::nn
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_PREPARED_MODEL_H