NN HAL: Upgrade IPreparedModelCallback::notify to 1.3.

Bug: 143242728
Test: 1.3 VTS with sample driver
Change-Id: I56bc7a2fb179a9576036ad0c2aae0e1f41ec4e2c
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index 0f2720e..e2795de 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -14,6 +14,24 @@
 // limitations under the License.
 //
 
+cc_library_static {
+    name: "VtsHalNeuralNetworksV1_3Callbacks",
+    defaults: ["VtsHalTargetTestDefaults"],
+    export_include_dirs: ["include"],
+    srcs: [
+        "Callbacks.cpp",
+    ],
+    static_libs: [
+        "android.hardware.neuralnetworks@1.0",
+        "android.hardware.neuralnetworks@1.1",
+        "android.hardware.neuralnetworks@1.2",
+        "android.hardware.neuralnetworks@1.3",
+    ],
+    header_libs: [
+        "libbase_headers",
+    ]
+}
+
 cc_test {
     name: "VtsHalNeuralnetworksV1_3TargetTest",
     defaults: ["VtsHalTargetTestDefaults"],
@@ -44,6 +62,7 @@
         "libneuralnetworks_utils",
         "VtsHalNeuralNetworksV1_0_utils",
         "VtsHalNeuralNetworksV1_2Callbacks",
+        "VtsHalNeuralNetworksV1_3Callbacks",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_V1_0_example",
diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
new file mode 100644
index 0000000..435db46
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Callbacks"
+
+#include "1.3/Callbacks.h"
+
+#include <android-base/logging.h>
+
+#include <limits>
+
+namespace android::hardware::neuralnetworks::V1_3::implementation {
+
+using V1_0::ErrorStatus;
+
+// PreparedModelCallback methods begin here
+
+Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    {
+        std::lock_guard<std::mutex> hold(mMutex);
+
+        // quick-return if object has already been notified
+        if (mNotified) {
+            return Void();
+        }
+
+        // store results and mark as notified
+        mErrorStatus = errorStatus;
+        mPreparedModel = preparedModel;
+        mNotified = true;
+    }
+
+    mCondition.notify_all();
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
+    return notify(errorStatus, preparedModel);
+}
+
+Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
+    return notify(errorStatus, preparedModel);
+}
+
+void PreparedModelCallback::wait() const {
+    std::unique_lock<std::mutex> lock(mMutex);
+    mCondition.wait(lock, [this] { return mNotified; });
+}
+
+ErrorStatus PreparedModelCallback::getStatus() const {
+    wait();
+    return mErrorStatus;
+}
+
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const {
+    wait();
+    return mPreparedModel;
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3::implementation
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index 0ac4738..ea2398b 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -28,7 +28,7 @@
 #include <random>
 #include <thread>
 
-#include "1.2/Callbacks.h"
+#include "1.3/Callbacks.h"
 #include "GeneratedTestHarness.h"
 #include "MemoryUtils.h"
 #include "TestHarness.h"
@@ -48,12 +48,12 @@
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
 using namespace test_helper;
+using implementation::PreparedModelCallback;
 using V1_0::ErrorStatus;
 using V1_1::ExecutionPreference;
 using V1_2::Constant;
 using V1_2::IPreparedModel;
 using V1_2::OperationType;
-using V1_2::implementation::PreparedModelCallback;
 
 namespace float32_model {
 
@@ -231,7 +231,7 @@
         ASSERT_NE(kDevice.get(), nullptr);
 
         // Create cache directory. The cache directory and a temporary cache file is always created
-        // to test the behavior of prepareModelFromCache, even when caching is not supported.
+        // to test the behavior of prepareModelFromCache_1_3, even when caching is not supported.
         char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
         char* cacheDir = mkdtemp(cacheDirTemp);
         ASSERT_NE(cacheDir, nullptr);
@@ -370,7 +370,7 @@
         // Launch prepare model from cache.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
-        Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache(
+        Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache_1_3(
                 modelCache, dataCache, cacheToken, preparedModelCallback);
         ASSERT_TRUE(prepareLaunchStatus.isOk());
         if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 8a7ed24..4f9b6f9 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -29,6 +29,7 @@
 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
 #include <android/hardware/neuralnetworks/1.3/types.h>
 #include <android/hidl/allocator/1.0/IAllocator.h>
 #include <android/hidl/memory/1.0/IMemory.h>
@@ -42,6 +43,7 @@
 
 #include "1.0/Utils.h"
 #include "1.2/Callbacks.h"
+#include "1.3/Callbacks.h"
 #include "ExecutionBurstController.h"
 #include "MemoryUtils.h"
 #include "TestHarness.h"
@@ -52,6 +54,7 @@
 
 using namespace test_helper;
 using hidl::memory::V1_0::IMemory;
+using implementation::PreparedModelCallback;
 using V1_0::DataLocation;
 using V1_0::ErrorStatus;
 using V1_0::OperandLifeTime;
@@ -65,7 +68,6 @@
 using V1_2::SymmPerChannelQuantParams;
 using V1_2::Timing;
 using V1_2::implementation::ExecutionCallback;
-using V1_2::implementation::PreparedModelCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
 enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 44b32a9..bdda790 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -17,12 +17,13 @@
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
 #include "1.0/Utils.h"
-#include "1.2/Callbacks.h"
+#include "1.3/Callbacks.h"
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
+using implementation::PreparedModelCallback;
 using V1_0::ErrorStatus;
 using V1_0::OperandLifeTime;
 using V1_1::ExecutionPreference;
@@ -30,7 +31,6 @@
 using V1_2::OperationType;
 using V1_2::OperationTypeRange;
 using V1_2::SymmPerChannelQuantParams;
-using V1_2::implementation::PreparedModelCallback;
 using HidlToken =
         hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 4f0e150..da5d95b 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -21,8 +21,8 @@
 #include <hidl/ServiceManagement.h>
 #include <string>
 #include <utility>
-#include "1.0/Callbacks.h"
 #include "1.0/Utils.h"
+#include "1.3/Callbacks.h"
 #include "GeneratedTestHarness.h"
 #include "TestHarness.h"
 
@@ -30,11 +30,11 @@
 
 using HidlToken =
         hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using implementation::PreparedModelCallback;
 using V1_0::ErrorStatus;
 using V1_0::Request;
 using V1_1::ExecutionPreference;
 using V1_2::IPreparedModel;
-using V1_2::implementation::PreparedModelCallback;
 
 // internal helper function
 void createPreparedModel(const sp<IDevice>& device, const Model& model,
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index fc654ce..9ccc911 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -22,7 +22,7 @@
 #include <android/hardware/neuralnetworks/1.3/types.h>
 #include <gtest/gtest.h>
 #include "1.0/Utils.h"
-#include "1.2/Callbacks.h"
+#include "1.3/Callbacks.h"
 
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
@@ -51,7 +51,7 @@
 
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<V1_2::IPreparedModel> getPreparedModel_1_2(
-        const sp<V1_2::implementation::PreparedModelCallback>& callback);
+        const sp<implementation::PreparedModelCallback>& callback);
 
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
 
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
new file mode 100644
index 0000000..9376a92
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H
+
+#include <android-base/thread_annotations.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
+#include <hidl/Status.h>
+#include <condition_variable>
+#include <mutex>
+
+/*
+ * The Callback classes are used internally by the NeuralNetworks runtime to
+ * synchronize between different threads. An asynchronous task is launched
+ * paired with a callback object. When a client thread requires the output being
+ * generated by the asynchronous task, the client thread can wait for the result
+ * and be blocked until it has completed. Any wait may safely be called
+ * concurrently, even on the same callback object. When the asynchronous task
+ * has finished its workload, it must immediately call "notify*". If the
+ * asynchronous task has failed to launch, the function that tried to launch the
+ * asynchronous task must immediately call "notify*". This "notify*" call
+ * awakens any client threads waiting on the callback object.
+ *
+ * These classes exist to enable synchronization across HIDL. When
+ * synchronization is only required in the same process, consider using
+ * std::future, std::mutex, std::condition_variable, or std::experimental::latch
+ * instead.
+ */
+
+namespace android::hardware::neuralnetworks::V1_3::implementation {
+
+/**
+ * The PreparedModelCallback class is used to receive the error status of
+ * preparing a model as well as the prepared model from a task executing
+ * asynchronously with respect to the runtime. If a calling thread calls wait
+ * or get* on a PreparedModelCallback object and the corresponding asynchronous
+ * task has not finished preparing the model, the calling thread will block
+ * until the asynchronous task has called notify*.
+ *
+ * If the callback object is notified more than once, only the results of the
+ * first call to notify* are used, and the results from subsequent calls are
+ * discarded.
+ *
+ * This callback object is passed as an argument to IDevice::prepareModel*.
+ */
+class PreparedModelCallback : public IPreparedModelCallback {
+  public:
+    /**
+     * IPreparedModelCallback::notify marks the callback object with the return
+     * status of the asynchronous model preparation along with the prepared
+     * model, and allows all prior and future wait calls on the
+     * PreparedModelCallback object to proceed.
+     *
+     * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2,
+     * or IPreparedModelCallback::notify_1_3 must be called on a given
+     * PreparedModelCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from asynchronously preparing the
+     *     model; will be:
+     *     - NONE if the asynchronous preparation was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if the input model is invalid
+     * @param preparedModel Returned model that has been prepared for execution,
+     *     nullptr if the model was unable to be prepared.
+     */
+    Return<void> notify(V1_0::ErrorStatus status,
+                        const sp<V1_0::IPreparedModel>& preparedModel) override;
+
+    /**
+     * IPreparedModelCallback::notify_1_2 marks the callback object with the
+     * return status of the asynchronous model preparation along with the
+     * prepared model, and allows all prior and future wait calls on the
+     * PreparedModelCallback object to proceed.
+     *
+     * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2,
+     * or IPreparedModelCallback::notify_1_3 must be called on a given
+     * PreparedModelCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from asynchronously preparing the
+     *     model; will be:
+     *     - NONE if the asynchronous preparation was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if the input model is invalid
+     * @param preparedModel Returned model that has been prepared for execution,
+     *     nullptr if the model was unable to be prepared.
+     */
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
+                            const sp<V1_2::IPreparedModel>& preparedModel) override;
+
+    /**
+     * IPreparedModelCallback::notify_1_3 marks the callback object with the
+     * return status of the asynchronous model preparation along with the
+     * prepared model, and allows all prior and future wait calls on the
+     * PreparedModelCallback object to proceed.
+     *
+     * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2,
+     * or IPreparedModelCallback::notify_1_3 must be called on a given
+     * PreparedModelCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from asynchronously preparing the
+     *     model; will be:
+     *     - NONE if the asynchronous preparation was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if the input model is invalid
+     * @param preparedModel Returned model that has been prepared for execution,
+     *     nullptr if the model was unable to be prepared.
+     */
+    Return<void> notify_1_3(V1_0::ErrorStatus status,
+                            const sp<V1_2::IPreparedModel>& preparedModel) override;
+
+    /**
+     * PreparedModelCallback::wait blocks until notify* has been called on the
+     * callback object.
+     */
+    void wait() const;
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished
+     * asynchronously preparing the model, this call will block until the
+     * asynchronous task notifies the object.
+     *
+     * @return status Error status returned from asynchronously preparing the
+     *     model; will be:
+     *     - NONE if the asynchronous preparation was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - INVALID_ARGUMENT if the input model is invalid
+     */
+    V1_0::ErrorStatus getStatus() const;
+
+    /**
+     * Retrieves the model that has been prepared for execution from the
+     * asynchronous task launched by IDevice::prepareModel*. If
+     * IDevice::prepareModel* has not finished asynchronously preparing the
+     * model, this call will block until the asynchronous task notifies the
+     * object.
+     *
+     * @return preparedModel Returned model that has been prepared for
+     *     execution, nullptr if the model was unable to be prepared.
+     */
+    sp<V1_0::IPreparedModel> getPreparedModel() const;
+
+  private:
+    mutable std::mutex mMutex;
+    mutable std::condition_variable mCondition;
+    bool mNotified GUARDED_BY(mMutex) = false;
+    V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
+    sp<V1_0::IPreparedModel> mPreparedModel;
+};
+
+}  // namespace android::hardware::neuralnetworks::V1_3::implementation
+
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H