Change NN canonical timings to nanoseconds -- hal

A sibling CL to this CL changes the definition of nn::TimePoint to the
same type as std::chrono::steady_clock::time_point but has changed the
underlying duration representation to use uint64_t. That sibling CL also
renames nn::OptionalTimeoutDuration to nn::OptionalDuration, and changes
the definition to the same type as std::nanoseconds except the
underlying duration representation now uses uint64_t.

This CL makes changes to the NN HAL utility code in response to the
changes in the sibling CL.

Bug: 174297663
Test: mma
Test: NeuralNetworksTest_static
Change-Id: If44d9aefadb2c78b632ff289b5ff5a49f766525c
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index 31f366d..198cbc8 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -44,13 +44,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 46dd3f8..add8275 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -55,7 +55,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming /*measure*/,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
@@ -81,11 +81,12 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(
-        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
-        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(const nn::Request& /*request*/,
+                             const std::vector<nn::SyncFence>& /*waitFor*/,
+                             nn::MeasureTiming /*measure*/,
+                             const nn::OptionalTimePoint& /*deadline*/,
+                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
+                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
 }
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 65e1e8a..53bd4d1 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -45,13 +45,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index f11474f..3790d1f 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -43,7 +43,9 @@
     return static_cast<std::underlying_type_t<Type>>(value);
 }
 
+using HalDuration = std::chrono::duration<uint64_t, std::micro>;
 constexpr auto kVersion = android::nn::Version::ANDROID_Q;
+constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max();
 
 }  // namespace
 
@@ -270,7 +272,18 @@
 }
 
 GeneralResult<Timing> unvalidatedConvert(const hal::V1_2::Timing& timing) {
-    return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
+    constexpr uint64_t kMaxTiming = std::chrono::floor<HalDuration>(Duration::max()).count();
+    constexpr auto convertTiming = [](uint64_t halTiming) -> OptionalDuration {
+        if (halTiming == kNoTiming) {
+            return {};
+        }
+        if (halTiming > kMaxTiming) {
+            return Duration::max();
+        }
+        return HalDuration{halTiming};
+    };
+    return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice),
+                  .timeInDriver = convertTiming(timing.timeInDriver)};
 }
 
 GeneralResult<Extension> unvalidatedConvert(const hal::V1_2::Extension& extension) {
@@ -547,7 +560,14 @@
 }
 
 nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
-    return Timing{.timeOnDevice = timing.timeOnDevice, .timeInDriver = timing.timeInDriver};
+    constexpr auto convertTiming = [](nn::OptionalDuration canonicalTiming) -> uint64_t {
+        if (!canonicalTiming.has_value()) {
+            return kNoTiming;
+        }
+        return std::chrono::ceil<HalDuration>(*canonicalTiming).count();
+    };
+    return Timing{.timeOnDevice = convertTiming(timing.timeOnDevice),
+                  .timeInDriver = convertTiming(timing.timeInDriver)};
 }
 
 nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index dad9a7e..32c2651 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -106,7 +106,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
@@ -140,11 +140,12 @@
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(
-        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
-        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+PreparedModel::executeFenced(const nn::Request& /*request*/,
+                             const std::vector<nn::SyncFence>& /*waitFor*/,
+                             nn::MeasureTiming /*measure*/,
+                             const nn::OptionalTimePoint& /*deadline*/,
+                             const nn::OptionalDuration& /*loopTimeoutDuration*/,
+                             const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
 }
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index 9653a05..477bb7b 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -44,7 +44,7 @@
         const hal::V1_3::Request::MemoryPool& memoryPool);
 GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const hal::V1_3::OptionalTimePoint& optionalTimePoint);
-GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
+GeneralResult<OptionalDuration> unvalidatedConvert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
 GeneralResult<ErrorStatus> unvalidatedConvert(const hal::V1_3::ErrorStatus& errorStatus);
 
@@ -54,7 +54,7 @@
 GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc);
 GeneralResult<Request> convert(const hal::V1_3::Request& request);
 GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint);
-GeneralResult<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration);
 GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus);
 
@@ -86,7 +86,7 @@
 nn::GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const nn::OptionalTimePoint& optionalTimePoint);
 nn::GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
+        const nn::OptionalDuration& optionalTimeoutDuration);
 nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus);
 
 nn::GeneralResult<Priority> convert(const nn::Priority& priority);
@@ -96,7 +96,7 @@
 nn::GeneralResult<Request> convert(const nn::Request& request);
 nn::GeneralResult<OptionalTimePoint> convert(const nn::OptionalTimePoint& optionalTimePoint);
 nn::GeneralResult<OptionalTimeoutDuration> convert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration);
+        const nn::OptionalDuration& optionalTimeoutDuration);
 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
 
 nn::GeneralResult<hidl_handle> convert(const nn::SharedHandle& handle);
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index e0d69dd..09360ec 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -44,13 +44,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index 949dd0d..c89a69f 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -272,47 +272,26 @@
 
 GeneralResult<OptionalTimePoint> unvalidatedConvert(
         const hal::V1_3::OptionalTimePoint& optionalTimePoint) {
-    constexpr auto kTimePointMaxCount = TimePoint::max().time_since_epoch().count();
-    const auto makeTimePoint = [](uint64_t count) -> GeneralResult<OptionalTimePoint> {
-        if (count > kTimePointMaxCount) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimePoint because the count exceeds "
-                      "the max";
-        }
-        const auto nanoseconds = std::chrono::nanoseconds{count};
-        return TimePoint{nanoseconds};
-    };
-
     using Discriminator = hal::V1_3::OptionalTimePoint::hidl_discriminator;
     switch (optionalTimePoint.getDiscriminator()) {
         case Discriminator::none:
-            return std::nullopt;
+            return {};
         case Discriminator::nanosecondsSinceEpoch:
-            return makeTimePoint(optionalTimePoint.nanosecondsSinceEpoch());
+            return TimePoint{Duration{optionalTimePoint.nanosecondsSinceEpoch()}};
     }
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "Invalid OptionalTimePoint discriminator "
            << underlyingType(optionalTimePoint.getDiscriminator());
 }
 
-GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
+GeneralResult<OptionalDuration> unvalidatedConvert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
-    constexpr auto kTimeoutDurationMaxCount = TimeoutDuration::max().count();
-    const auto makeTimeoutDuration = [](uint64_t count) -> GeneralResult<OptionalTimeoutDuration> {
-        if (count > kTimeoutDurationMaxCount) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimeoutDuration because the count "
-                      "exceeds the max";
-        }
-        return TimeoutDuration{count};
-    };
-
     using Discriminator = hal::V1_3::OptionalTimeoutDuration::hidl_discriminator;
     switch (optionalTimeoutDuration.getDiscriminator()) {
         case Discriminator::none:
-            return std::nullopt;
+            return {};
         case Discriminator::nanoseconds:
-            return makeTimeoutDuration(optionalTimeoutDuration.nanoseconds());
+            return Duration(optionalTimeoutDuration.nanoseconds());
     }
     return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
            << "Invalid OptionalTimeoutDuration discriminator "
@@ -360,7 +339,7 @@
     return validatedConvert(optionalTimePoint);
 }
 
-GeneralResult<OptionalTimeoutDuration> convert(
+GeneralResult<OptionalDuration> convert(
         const hal::V1_3::OptionalTimeoutDuration& optionalTimeoutDuration) {
     return validatedConvert(optionalTimeoutDuration);
 }
@@ -629,27 +608,16 @@
     OptionalTimePoint ret;
     if (optionalTimePoint.has_value()) {
         const auto count = optionalTimePoint.value().time_since_epoch().count();
-        if (count < 0) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimePoint because time since epoch "
-                      "count is "
-                      "negative";
-        }
         ret.nanosecondsSinceEpoch(count);
     }
     return ret;
 }
 
 nn::GeneralResult<OptionalTimeoutDuration> unvalidatedConvert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
+        const nn::OptionalDuration& optionalTimeoutDuration) {
     OptionalTimeoutDuration ret;
     if (optionalTimeoutDuration.has_value()) {
         const auto count = optionalTimeoutDuration.value().count();
-        if (count < 0) {
-            return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-                   << "Unable to unvalidatedConvert OptionalTimeoutDuration because count is "
-                      "negative";
-        }
         ret.nanoseconds(count);
     }
     return ret;
@@ -697,7 +665,7 @@
 }
 
 nn::GeneralResult<OptionalTimeoutDuration> convert(
-        const nn::OptionalTimeoutDuration& optionalTimeoutDuration) {
+        const nn::OptionalDuration& optionalTimeoutDuration) {
     return validatedConvert(optionalTimeoutDuration);
 }
 
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 49b9b0b..124a8db 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -159,7 +159,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
         const nn::Request& request, nn::MeasureTiming measure,
         const nn::OptionalTimePoint& deadline,
-        const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+        const nn::OptionalDuration& loopTimeoutDuration) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
@@ -200,8 +200,8 @@
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
 PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
                              nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-                             const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-                             const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+                             const nn::OptionalDuration& loopTimeoutDuration,
+                             const nn::OptionalDuration& timeoutDurationAfterFence) const {
     // Ensure that request is ready for IPC.
     std::optional<nn::Request> maybeRequestInShared;
     const nn::Request& requestInShared =
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index 4b32b4e..985cddb 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -32,13 +32,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 };
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index c2940d1..d86c88b 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -49,13 +49,13 @@
     nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
             const nn::Request& request, nn::MeasureTiming measure,
             const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+            const nn::OptionalDuration& loopTimeoutDuration) const override;
 
     nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
             const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
             nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+            const nn::OptionalDuration& loopTimeoutDuration,
+            const nn::OptionalDuration& timeoutDurationAfterFence) const override;
 
     std::any getUnderlyingResource() const override;
 
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9ae7a63..a46f4ac 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -29,7 +29,7 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
                               const nn::OptionalTimePoint& /*deadline*/,
-                              const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+                              const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
@@ -37,8 +37,8 @@
 InvalidPreparedModel::executeFenced(
         const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
         nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
-        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
-        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+        const nn::OptionalDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
     return NN_ERROR() << "InvalidPreparedModel";
 }
 
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 1c9ecba..012a1de 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -64,16 +64,17 @@
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
 ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure,
                                 const nn::OptionalTimePoint& deadline,
-                                const nn::OptionalTimeoutDuration& loopTimeoutDuration) const {
+                                const nn::OptionalDuration& loopTimeoutDuration) const {
     return getPreparedModel()->execute(request, measure, deadline, loopTimeoutDuration);
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-ResilientPreparedModel::executeFenced(
-        const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
-        nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
-        const nn::OptionalTimeoutDuration& loopTimeoutDuration,
-        const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const {
+ResilientPreparedModel::executeFenced(const nn::Request& request,
+                                      const std::vector<nn::SyncFence>& waitFor,
+                                      nn::MeasureTiming measure,
+                                      const nn::OptionalTimePoint& deadline,
+                                      const nn::OptionalDuration& loopTimeoutDuration,
+                                      const nn::OptionalDuration& timeoutDurationAfterFence) const {
     return getPreparedModel()->executeFenced(request, waitFor, measure, deadline,
                                              loopTimeoutDuration, timeoutDurationAfterFence);
 }