Add 1.2 NN HAL: IPreparedModel & callbacks.
Create 1.2 version IPreparedModel, IPreparedModelCallback, and
IExecutionCallback.
Currently the new interfaces are created the same as 1.0 version,
but will have more methods introduced in later CLs.
Bug: 73506513
Test: VtsHalNeuralnetworksV1_xTargetTest with 1.2 sample driver
Change-Id: Icf4d04c22f88e825d87562f1489377fdf6bf585d
Merged-In: Icf4d04c22f88e825d87562f1489377fdf6bf585d
(cherry picked from commit b5cb8f76321f2217c47e9183506c94dfa5627dbf)
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.cpp b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
index 46bf243..a1c5a1a 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.cpp
@@ -1,10 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "Callbacks.h"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
namespace implementation {
CallbackBase::CallbackBase() : mNotified(false) {}
@@ -88,7 +104,15 @@
PreparedModelCallback::~PreparedModelCallback() {}
Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
- const sp<IPreparedModel>& preparedModel) {
+ const sp<V1_0::IPreparedModel>& preparedModel) {
+ mErrorStatus = errorStatus;
+ mPreparedModel = preparedModel;
+ CallbackBase::notify();
+ return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+ const sp<V1_2::IPreparedModel>& preparedModel) {
mErrorStatus = errorStatus;
mPreparedModel = preparedModel;
CallbackBase::notify();
@@ -100,7 +124,7 @@
return mErrorStatus;
}
-sp<IPreparedModel> PreparedModelCallback::getPreparedModel() {
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
wait();
return mPreparedModel;
}
@@ -115,13 +139,19 @@
return Void();
}
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus) {
+ mErrorStatus = errorStatus;
+ CallbackBase::notify();
+ return Void();
+}
+
ErrorStatus ExecutionCallback::getStatus() {
wait();
return mErrorStatus;
}
} // namespace implementation
-} // namespace V1_0
+} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Callbacks.h b/neuralnetworks/1.0/vts/functional/Callbacks.h
index 570a4fb..e89980d 100644
--- a/neuralnetworks/1.0/vts/functional/Callbacks.h
+++ b/neuralnetworks/1.0/vts/functional/Callbacks.h
@@ -1,22 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_CALLBACKS_H
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
#include <chrono>
#include <condition_variable>
#include <functional>
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
#include <mutex>
#include <thread>
namespace android {
namespace hardware {
namespace neuralnetworks {
-namespace V1_0 {
+namespace V1_2 {
namespace implementation {
+using V1_0::ErrorStatus;
+
/**
* The CallbackBase class is used internally by the NeuralNetworks runtime to
* synchronize between different threads. An asynchronous task is launched
@@ -156,11 +176,11 @@
* asynchronously with respect to the runtime. If a calling thread calls wait*
* or get* on a PreparedModelCallback object and the corresponding asynchronous
* task has not finished preparing the model, the calling thread will block
- * until the asynchronous task has called notify. For more information on the
- * synchronization behavior, refer to the CallbackBase class.
+ * until the asynchronous task has either called notify or notify_1_2. For more
+ * information on the synchronization behavior, refer to the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
* IPreparedModelCallback. This callback object is passed as an argument to
* IDevice::prepareModel.
*/
@@ -170,15 +190,15 @@
~PreparedModelCallback() override;
/**
- * IPreparedModelCallback::notify marks the callback object with the return
- * status of the asynchronous model preparation along with the prepared
- * model, and calls CallbackBase::notify, enabling all prior and future
- * wait* calls on the PreparedModelCallback object to proceed. For more
- * information on the synchronization behavior, refer to the CallbackBase
- * class.
+ * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
+ * mark the callback object with the return status of the asynchronous
+ * model preparation along with the prepared model, and call
+ * CallbackBase::notify, enabling all prior and future wait* calls on the
+ * PreparedModelCallback object to proceed. For more information on the
+ * synchronization behavior, refer to the CallbackBase class.
*
- * IPreparedModelCallback::notify must be called exactly once on a given
- * PreparedModelCallback object.
+ * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
+ * must be called exactly once on a given PreparedModelCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
@@ -189,7 +209,9 @@
* @param preparedModel Returned model that has been prepared for execution,
* nullptr if the model was unable to be prepared.
*/
- Return<void> notify(ErrorStatus status, const sp<IPreparedModel>& preparedModel) override;
+ Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
+ Return<void> notify_1_2(ErrorStatus status,
+ const sp<V1_2::IPreparedModel>& preparedModel) override;
/**
* Retrieves the error status returned from the asynchronous task launched
@@ -217,11 +239,11 @@
* execution, nullptr if the model was unable to be
* prepared.
*/
- sp<IPreparedModel> getPreparedModel();
+ sp<V1_0::IPreparedModel> getPreparedModel();
- private:
+ private:
ErrorStatus mErrorStatus;
- sp<IPreparedModel> mPreparedModel;
+ sp<V1_0::IPreparedModel> mPreparedModel;
};
/**
@@ -229,12 +251,12 @@
* execution from a task executing asynchronously with respect to the runtime.
* If a calling thread calls wait* or get* on a PreparedModelCallback object and
* the corresponding asynchronous task has not finished the execution, the
- * calling thread will block until the asynchronous task has called notify. For
- * more information on the synchronization behavior, refer to the CallbackBase
- * class.
+ * calling thread will block until the asynchronous task has either called notify
+ * or notify_1_2. For more information on the synchronization behavior, refer to
+ * the CallbackBase class.
*
* This class inherits the basic blocking and signaling calls from
- * CallbackBase, and implements the HIDL notify call from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
* IExecutionCallback. This callback object is passed as an argument to
* IPreparedModel::execute.
*/
@@ -244,14 +266,14 @@
~ExecutionCallback() override;
/**
- * IExecutionCallback::notify marks the callback object with the return
- * status of the asynchronous execution that held this callback and enables
- * all prior and future wait* calls on the ExecutionCallback object to
- * proceed. For more information on the synchronization behavior, refer to
- * the CallbackBase class.
+ * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
+ * callback object with the return status of the asynchronous execution that
+ * held this callback and enable all prior and future wait* calls on the
+ * ExecutionCallback object to proceed. For more information on the
+ * synchronization behavior, refer to the CallbackBase class.
*
- * IExecutionCallback::notify must be called exactly once on a given
- * ExecutionCallback object.
+ * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
+ * be called exactly once on a given ExecutionCallback object.
*
* @param status Error status returned from asynchronously preparing the
* model; will be:
@@ -263,6 +285,7 @@
* - INVALID_ARGUMENT if the input request is invalid
*/
Return<void> notify(ErrorStatus status) override;
+ Return<void> notify_1_2(ErrorStatus status) override;
/**
* Retrieves the error status returned from the asynchronous task launched
@@ -299,7 +322,7 @@
}
} // namespace implementation
-} // namespace V1_0
+} // namespace V1_2
} // namespace neuralnetworks
} // namespace hardware
} // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index d2703cb..ab524c2 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -24,6 +24,11 @@
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
@@ -34,8 +39,8 @@
namespace neuralnetworks {
namespace generated_tests {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::test_helper::bool8;
using ::test_helper::compare;
using ::test_helper::expectMultinomialDistributionWithinTolerance;
@@ -73,7 +78,18 @@
// Top level driver for models and examples generated by test_generator.py
// Test driver for those generated from ml/nn/runtime/test/spec
-void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+static Return<ErrorStatus> ExecutePreparedModel(sp<V1_0::IPreparedModel>& preparedModel,
+ const Request& request,
+ sp<ExecutionCallback>& callback) {
+ return preparedModel->execute(request, callback);
+}
+static Return<ErrorStatus> ExecutePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+ const Request& request,
+ sp<ExecutionCallback>& callback) {
+ return preparedModel->execute_1_2(request, callback);
+}
+template <typename T_IPreparedModel>
+void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
const std::vector<MixedTypedExample>& examples,
bool hasRelaxedFloat32Model = false, float fpAtol = 1e-5f,
float fpRtol = 1e-5f) {
@@ -172,8 +188,9 @@
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
- Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
- {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
+ Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+ preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
+ executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@@ -199,6 +216,16 @@
}
}
+static void getPreparedModel(sp<PreparedModelCallback> callback,
+ sp<V1_0::IPreparedModel>* preparedModel) {
+ *preparedModel = callback->getPreparedModel();
+}
+static void getPreparedModel(sp<PreparedModelCallback> callback,
+ sp<V1_2::IPreparedModel>* preparedModel) {
+ sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+ *preparedModel = V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+}
+
void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples) {
V1_0::Model model = create_model();
@@ -224,7 +251,8 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ sp<V1_0::IPreparedModel> preparedModel;
+ getPreparedModel(preparedModelCallback, &preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -270,7 +298,8 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ sp<V1_0::IPreparedModel> preparedModel;
+ getPreparedModel(preparedModelCallback, &preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
@@ -316,7 +345,8 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ sp<V1_2::IPreparedModel> preparedModel;
+ getPreparedModel(preparedModelCallback, &preparedModel);
// early termination if vendor service cannot fully prepare model
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
index 26b4d8b..55e5861 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
index b813c39..5d24fb5 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp
@@ -27,8 +27,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 1d3dee3..72a5007 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
index 290a9d3..d98ea04 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
index a36b24c..1df3218 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestsV1_0.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
index d6c6533..b35a901 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp
@@ -33,8 +33,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index e7d96c7..5225bf7 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index a64268f..970e8b5 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -36,6 +36,7 @@
using V1_0::DeviceStatus;
using V1_0::ErrorStatus;
+using V1_0::IPreparedModel;
using V1_0::Operand;
using V1_0::OperandType;
using V1_0::Request;
diff --git a/neuralnetworks/1.2/Android.bp b/neuralnetworks/1.2/Android.bp
index e155bbd..528a2c7 100644
--- a/neuralnetworks/1.2/Android.bp
+++ b/neuralnetworks/1.2/Android.bp
@@ -9,6 +9,9 @@
srcs: [
"types.hal",
"IDevice.hal",
+ "IExecutionCallback.hal",
+ "IPreparedModel.hal",
+ "IPreparedModelCallback.hal",
],
interfaces: [
"android.hardware.neuralnetworks@1.0",
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index aff4cf3..6a77961 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -17,9 +17,9 @@
package android.hardware.neuralnetworks@1.2;
import @1.0::ErrorStatus;
-import @1.0::IPreparedModelCallback;
import @1.1::ExecutionPreference;
import @1.1::IDevice;
+import IPreparedModelCallback;
/**
* This interface represents a device driver.
diff --git a/neuralnetworks/1.2/IExecutionCallback.hal b/neuralnetworks/1.2/IExecutionCallback.hal
new file mode 100644
index 0000000..667e0d6
--- /dev/null
+++ b/neuralnetworks/1.2/IExecutionCallback.hal
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IExecutionCallback;
+
+/**
+ * IExecutionCallback must be used to return the error status result from an
+ * execution asynchronously launched from IPreparedModel::execute.
+ */
+interface IExecutionCallback extends @1.0::IExecutionCallback {
+
+ /**
+ * Either notify_1_2 or notify must be invoked immediately after the asynchronous
+ * task has finished performing the execution. Either notify_1_2 or notify must be
+ * provided with the ErrorStatus from the execution. If the asynchronous task is
+ * not launched, either notify_1_2 or notify must be invoked with the appropriate
+ * error.
+ *
+ * @param status Error status returned from launching the asynchronous task
+ * (if the launch fails) or from the asynchronous task itself
+ * (if the launch succeeds). Must be:
+ * - NONE if the asynchronous execution was successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the asynchronous task resulted in an
+ * unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+ * not large enough to store the resultant values
+ * - INVALID_ARGUMENT if one of the input arguments to
+ * prepareModel is invalid
+ */
+ oneway notify_1_2(ErrorStatus status);
+};
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
new file mode 100644
index 0000000..5590487
--- /dev/null
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IPreparedModel;
+import @1.0::Request;
+import IExecutionCallback;
+
+/**
+ * IPreparedModel describes a model that has been prepared for execution and
+ * is used to launch executions.
+ */
+interface IPreparedModel extends @1.0::IPreparedModel {
+ /**
+ * Launches an asynchronous execution on a prepared model.
+ *
+ * The execution is performed asynchronously with respect to the caller.
+ * execute_1_2 must verify the inputs to the function are correct. If there is
+ * an error, execute_1_2 must immediately invoke the callback with the
+ * appropriate ErrorStatus value, then return with the same ErrorStatus. If
+ * the inputs to the function are valid and there is no error, execute_1_2 must
+ * launch an asynchronous task to perform the execution in the background,
+ * and immediately return with ErrorStatus::NONE. If the asynchronous task
+ * fails to launch, execute_1_2 must immediately invoke the callback with
+ * ErrorStatus::GENERAL_FAILURE, then return with
+ * ErrorStatus::GENERAL_FAILURE.
+ *
+ * When the asynchronous task has finished its execution, it must
+ * immediately invoke the callback object provided as an input to the
+ * execute_1_2 function. This callback must be provided with the ErrorStatus of
+ * the execution.
+ *
+ * If the prepared model was prepared from a model wherein all
+ * tensor operands have fully specified dimensions, and the inputs
+ * to the function are valid, then the execution should launch
+ * and complete successfully (ErrorStatus::NONE). There must be
+ * no failure unless the device itself is in a bad state.
+ *
+ * Multiple threads can call the execute_1_2 function on the same IPreparedModel
+ * object concurrently with different requests.
+ *
+ * @param request The input and output information on which the prepared
+ * model is to be executed.
+ * @param callback A callback object used to return the error status of
+ * the execution. The callback object's notify function must
+ * be called exactly once, even if the execution was
+ * unsuccessful.
+ * @return status Error status of the call, must be:
+ * - NONE if task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+ * not large enough to store the resultant values
+ * - INVALID_ARGUMENT if one of the input arguments is
+ * invalid
+ */
+ execute_1_2(Request request, IExecutionCallback callback)
+ generates (ErrorStatus status);
+};
diff --git a/neuralnetworks/1.2/IPreparedModelCallback.hal b/neuralnetworks/1.2/IPreparedModelCallback.hal
new file mode 100644
index 0000000..d3830c6
--- /dev/null
+++ b/neuralnetworks/1.2/IPreparedModelCallback.hal
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::ErrorStatus;
+import @1.0::IPreparedModelCallback;
+import IPreparedModel;
+
+/**
+ * IPreparedModelCallback must be used to return a prepared model produced by an
+ * asynchronous task launched from IDevice::prepareModel.
+ */
+interface IPreparedModelCallback extends @1.0::IPreparedModelCallback {
+
+ /**
+ * Either notify_1_2 or notify must be invoked immediately after the asynchronous
+ * task holding this callback has finished preparing the model. If the model was
+ * successfully prepared, either notify_1_2 or notify must be invoked with
+ * ErrorStatus::NONE and the prepared model. If the model was not able to be
+ * successfully prepared, either notify_1_2 or notify must be invoked with the
+ * appropriate ErrorStatus and nullptr as the IPreparedModel. If the asynchronous
+ * task holding this callback fails to launch or if the model provided to
+ * IDevice::prepareModel is invalid, either notify_1_2 or notify must be invoked
+ * with the appropriate error as well as nullptr for the IPreparedModel.
+ *
+ * @param status Error status returned from the asynchronous model
+ * preparation task; must be:
+ * - NONE if the asynchronous task successfully prepared the
+ * model
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if the asynchronous task resulted in an
+ * unspecified error
+ * - INVALID_ARGUMENT if one of the input arguments to
+ * prepareModel is invalid
+ * @param preparedModel A model that has been asynchronously prepared for
+ * execution. If the model was unable to be prepared
+ * due to an error, nullptr must be passed in place of
+ * the IPreparedModel object.
+ */
+ oneway notify_1_2(ErrorStatus status, IPreparedModel preparedModel);
+};
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
index 79d5a60..9bff09c 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 42e22b0..56a61d4 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index aab5cb6..1c781ec 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -40,8 +40,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index c4f1b5e..8024992 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -25,15 +25,14 @@
namespace neuralnetworks {
namespace V1_2 {
-using V1_0::IPreparedModel;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -62,7 +61,7 @@
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
- sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
+ sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
ASSERT_EQ(nullptr, preparedModel.get());
}
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index b663535..e2722aa 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -33,8 +33,8 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -68,7 +68,7 @@
// retrieve prepared model
preparedModelCallback->wait();
ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- *preparedModel = preparedModelCallback->getPreparedModel();
+ *preparedModel = getPreparedModel_1_2(preparedModelCallback);
// The getSupportedOperations_1_2 call returns a list of operations that are
// guaranteed not to fail if prepareModel_1_2 is called, and
@@ -101,7 +101,8 @@
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
ASSERT_NE(nullptr, executionCallback.get());
- Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback);
+ Return<ErrorStatus> executeLaunchStatus =
+ preparedModel->execute_1_2(request, executionCallback);
ASSERT_TRUE(executeLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 90a910c..4eced82 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -58,6 +58,12 @@
::testing::VtsHalHidlTargetTestBase::TearDown();
}
+sp<IPreparedModel> getPreparedModel_1_2(
+ const sp<V1_2::implementation::PreparedModelCallback>& callback) {
+ sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+ return V1_2::IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+}
+
} // namespace functional
} // namespace vts
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index a87d788..dedab8d 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -17,6 +17,8 @@
#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
#define VTS_HAL_NEURALNETWORKS_V1_2_H
+#include "Callbacks.h"
+
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
@@ -77,6 +79,10 @@
// Tag for the generated tests
class GeneratedTest : public NeuralnetworksHidlTest {};
+// Utility function to get PreparedModel from callback and downcast to V1_2.
+sp<IPreparedModel> getPreparedModel_1_2(
+ const sp<V1_2::implementation::PreparedModelCallback>& callback);
+
} // namespace functional
} // namespace vts