Merge "Make NN canonical->HIDL adapter execute* methods synchronous" am: f955569c8a

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/2005130

Change-Id: Icff3acb7b47cc84518222e9f37b7515d6783cb73
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
index 4c0b328..80ed41d 100644
--- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
+++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h
@@ -46,9 +46,6 @@
 /**
  * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
  *
- * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
- * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
- *
  * @param device NNAPI canonical IDevice interface object to be adapted.
  * @param executor Type-erased executor to handle executing tasks asynchronously.
  * @return AIDL NN HAL IDevice interface object.
@@ -58,9 +55,6 @@
 /**
  * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object.
  *
- * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
- * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
- *
  * This function uses a default executor, which will execute tasks from a detached thread.
  *
  * @param device NNAPI canonical IDevice interface object to be adapted.
diff --git a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
index 6fba4ab..3bd93e0 100644
--- a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
+++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h
@@ -46,9 +46,6 @@
 /**
  * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
  *
- * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
- * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
- *
  * @param device NNAPI canonical IDevice interface object to be adapted.
  * @param executor Type-erased executor to handle executing tasks asynchronously.
  * @return HIDL NN HAL IDevice interface object.
@@ -58,9 +55,6 @@
 /**
  * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object.
  *
- * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache
- * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource().
- *
  * This function uses a default executor, which will execute tasks from a detached thread.
  *
  * @param device NNAPI canonical IDevice interface object to be adapted.
diff --git a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
index 9482b0d..01cd4bc 100644
--- a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
+++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h
@@ -39,7 +39,7 @@
 // Class that adapts nn::IPreparedModel to V1_3::IPreparedModel.
 class PreparedModel final : public V1_3::IPreparedModel {
   public:
-    PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor);
+    explicit PreparedModel(nn::SharedPreparedModel preparedModel);
 
     Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
                                       const sp<V1_0::IExecutionCallback>& callback) override;
@@ -70,7 +70,6 @@
 
   private:
     const nn::SharedPreparedModel kPreparedModel;
-    const Executor kExecutor;
 };
 
 }  // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
index 0f44638..305a1b4 100644
--- a/neuralnetworks/utils/adapter/hidl/src/Device.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
@@ -62,11 +62,11 @@
 
 using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
 
-sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) {
+sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
     if (preparedModel == nullptr) {
         return nullptr;
     }
-    return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor));
+    return sp<PreparedModel>::make(std::move(preparedModel));
 }
 
 void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
@@ -105,14 +105,14 @@
 }
 
 template <typename CallbackType>
-void notify(CallbackType* callback, PrepareModelResult result, Executor executor) {
+void notify(CallbackType* callback, PrepareModelResult result) {
     if (!result.has_value()) {
         const auto [message, status] = std::move(result).error();
         LOG(ERROR) << message;
         notify(callback, status, nullptr);
     } else {
         auto preparedModel = std::move(result).value();
-        auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor));
+        auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
         notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
     }
 }
@@ -133,10 +133,10 @@
 
     auto nnModel = NN_TRY(convertInput(model));
 
-    Task task = [device, nnModel = std::move(nnModel), executor, callback] {
+    Task task = [device, nnModel = std::move(nnModel), callback] {
         auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
                                            nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
 
@@ -154,10 +154,10 @@
     auto nnModel = NN_TRY(convertInput(model));
     const auto nnPreference = NN_TRY(convertInput(preference));
 
-    Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
+    Task task = [device, nnModel = std::move(nnModel), nnPreference, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {},
                                            {}, {}, {});
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
 
@@ -183,10 +183,10 @@
 
     Task task = [device, nnModel = std::move(nnModel), nnPreference,
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
-                 nnToken, executor, callback] {
+                 nnToken, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
                                            nnModelCache, nnDataCache, nnToken, {}, {});
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
 
@@ -213,10 +213,10 @@
 
     Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
-                 nnToken, executor, callback] {
+                 nnToken, callback] {
         auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
                                            nnModelCache, nnDataCache, nnToken, {}, {});
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), nnDeadline);
 
@@ -238,9 +238,9 @@
     const auto nnToken = nn::CacheToken(token);
 
     Task task = [device, nnModelCache = std::move(nnModelCache),
-                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
+                 nnDataCache = std::move(nnDataCache), nnToken, callback] {
         auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), {});
 
@@ -262,9 +262,9 @@
     const auto nnToken = nn::CacheToken(token);
 
     auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
-                 nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
+                 nnDataCache = std::move(nnDataCache), nnToken, callback] {
         auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
-        notify(callback.get(), std::move(result), executor);
+        notify(callback.get(), std::move(result));
     };
     executor(std::move(task), nnDeadline);
 
diff --git a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
index c6055a6..3570a74 100644
--- a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
@@ -55,15 +55,6 @@
     return result;
 }
 
-nn::GeneralResult<nn::Version> validateRequestForModel(const nn::Request& request,
-                                                       const nn::Model& model) {
-    nn::GeneralResult<nn::Version> version = nn::validateRequestForModel(request, model);
-    if (!version.ok()) {
-        version.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
-    }
-    return version;
-}
-
 class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback {
   public:
     explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback)
@@ -144,58 +135,48 @@
 }
 
 nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel,
-                                const Executor& executor, const V1_0::Request& request,
+                                const V1_0::Request& request,
                                 const sp<V1_0::IExecutionCallback>& callback) {
     if (callback.get() == nullptr) {
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
     }
 
-    auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnRequest = NN_TRY(convertInput(request));
 
-    const std::any resource = preparedModel->getUnderlyingResource();
-    if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
-        CHECK(*model != nullptr);
-        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+    auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {});
+
+    if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) {
+        const auto& [message, code, outputShapes] = result.error();
+        return nn::error(code) << message;
     }
 
-    Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
-        auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {});
-        notify(callback.get(), std::move(result));
-    };
-    executor(std::move(task), {});
-
+    notify(callback.get(), std::move(result));
     return {};
 }
 
 nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel,
-                                    const Executor& executor, const V1_0::Request& request,
-                                    V1_2::MeasureTiming measure,
+                                    const V1_0::Request& request, V1_2::MeasureTiming measure,
                                     const sp<V1_2::IExecutionCallback>& callback) {
     if (callback.get() == nullptr) {
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
     }
 
-    auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnRequest = NN_TRY(convertInput(request));
     const auto nnMeasure = NN_TRY(convertInput(measure));
 
-    const std::any resource = preparedModel->getUnderlyingResource();
-    if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
-        CHECK(*model != nullptr);
-        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+    auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {});
+
+    if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) {
+        const auto& [message, code, outputShapes] = result.error();
+        return nn::error(code) << message;
     }
 
-    Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
-        auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {});
-        notify(callback.get(), std::move(result));
-    };
-    executor(std::move(task), {});
-
+    notify(callback.get(), std::move(result));
     return {};
 }
 
 nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel,
-                                    const Executor& executor, const V1_3::Request& request,
-                                    V1_2::MeasureTiming measure,
+                                    const V1_3::Request& request, V1_2::MeasureTiming measure,
                                     const V1_3::OptionalTimePoint& deadline,
                                     const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
                                     const sp<V1_3::IExecutionCallback>& callback) {
@@ -203,25 +184,20 @@
         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
     }
 
-    auto nnRequest = NN_TRY(convertInput(request));
+    const auto nnRequest = NN_TRY(convertInput(request));
     const auto nnMeasure = NN_TRY(convertInput(measure));
     const auto nnDeadline = NN_TRY(convertInput(deadline));
     const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
 
-    const std::any resource = preparedModel->getUnderlyingResource();
-    if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
-        CHECK(*model != nullptr);
-        NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+    auto result =
+            preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration, {}, {});
+
+    if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) {
+        const auto& [message, code, outputShapes] = result.error();
+        return nn::error(code) << message;
     }
 
-    Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
-                 nnLoopTimeoutDuration, callback] {
-        auto result = preparedModel->execute(nnRequest, nnMeasure, nnDeadline,
-                                             nnLoopTimeoutDuration, {}, {});
-        notify(callback.get(), std::move(result));
-    };
-    executor(std::move(task), nnDeadline);
-
+    notify(callback.get(), std::move(result));
     return {};
 }
 
@@ -304,10 +280,9 @@
 
 }  // namespace
 
-PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor)
-    : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) {
+PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel)
+    : kPreparedModel(std::move(preparedModel)) {
     CHECK(kPreparedModel != nullptr);
-    CHECK(kExecutor != nullptr);
 }
 
 nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
@@ -316,7 +291,7 @@
 
 Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
                                                  const sp<V1_0::IExecutionCallback>& callback) {
-    auto result = adapter::execute(kPreparedModel, kExecutor, request, callback);
+    auto result = adapter::execute(kPreparedModel, request, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
         LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message;
@@ -329,7 +304,7 @@
 Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request,
                                                      V1_2::MeasureTiming measure,
                                                      const sp<V1_2::IExecutionCallback>& callback) {
-    auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback);
+    auto result = adapter::execute_1_2(kPreparedModel, request, measure, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
         LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message;
@@ -344,7 +319,7 @@
         const V1_3::OptionalTimePoint& deadline,
         const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
         const sp<V1_3::IExecutionCallback>& callback) {
-    auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline,
+    auto result = adapter::execute_1_3(kPreparedModel, request, measure, deadline,
                                        loopTimeoutDuration, callback);
     if (!result.has_value()) {
         auto [message, code] = std::move(result).error();
@@ -405,8 +380,8 @@
         cb(V1_2::utils::convert(code).value(), nullptr);
         return Void();
     }
-    auto burstContext = std::move(result).value();
-    cb(V1_0::ErrorStatus::NONE, std::move(burstContext));
+    const auto burstContext = std::move(result).value();
+    cb(V1_0::ErrorStatus::NONE, burstContext);
     return Void();
 }