Merge "Fix VtsHalGraphicsComposerV2_1TargetTest" into oc-mr1-dev
diff --git a/media/omx/1.0/vts/functional/audio/VtsHalMediaOmxV1_0TargetAudioDecTest.cpp b/media/omx/1.0/vts/functional/audio/VtsHalMediaOmxV1_0TargetAudioDecTest.cpp
index 6790ebf..8520757 100644
--- a/media/omx/1.0/vts/functional/audio/VtsHalMediaOmxV1_0TargetAudioDecTest.cpp
+++ b/media/omx/1.0/vts/functional/audio/VtsHalMediaOmxV1_0TargetAudioDecTest.cpp
@@ -765,13 +765,13 @@
     int bytesCount = 0;
     uint32_t flags = 0;
     uint32_t timestamp = 0;
-    timestampDevTest = true;
+    timestampDevTest = false;
     while (1) {
         if (!(eleInfo >> bytesCount)) break;
         eleInfo >> flags;
         eleInfo >> timestamp;
         Info.push_back({bytesCount, flags, timestamp});
-        if (flags != OMX_BUFFERFLAG_CODECCONFIG)
+        if (timestampDevTest && (flags != OMX_BUFFERFLAG_CODECCONFIG))
             timestampUslist.push_back(timestamp);
     }
     eleInfo.close();
@@ -813,7 +813,7 @@
     packedArgs audioArgs = {eEncoding, compName};
     testEOS(omxNode, observer, &iBuffer, &oBuffer, false, eosFlag, nullptr,
             portReconfiguration, kPortIndexInput, kPortIndexOutput, &audioArgs);
-    EXPECT_EQ(timestampUslist.empty(), true);
+    if (timestampDevTest) EXPECT_EQ(timestampUslist.empty(), true);
     // set state to idle
     changeStateExecutetoIdle(omxNode, observer, &iBuffer, &oBuffer);
     // set state to executing
diff --git a/media/omx/1.0/vts/functional/component/VtsHalMediaOmxV1_0TargetComponentTest.cpp b/media/omx/1.0/vts/functional/component/VtsHalMediaOmxV1_0TargetComponentTest.cpp
index 0f29d91..235cfef 100644
--- a/media/omx/1.0/vts/functional/component/VtsHalMediaOmxV1_0TargetComponentTest.cpp
+++ b/media/omx/1.0/vts/functional/component/VtsHalMediaOmxV1_0TargetComponentTest.cpp
@@ -371,7 +371,7 @@
         kPortIndexOutput = kPortIndexInput + 1;
     }
 
-    for (size_t i = kPortIndexInput; i < kPortIndexOutput; i++) {
+    for (size_t i = kPortIndexInput; i <= kPortIndexOutput; i++) {
         OMX_PARAM_PORTDEFINITIONTYPE portDef;
         status =
             getPortParam(omxNode, OMX_IndexParamPortDefinition, i, &portDef);
@@ -406,10 +406,32 @@
             setPortParam(omxNode, OMX_IndexParamPortDefinition, i, &mirror);
 
             portDef = mirror;
-            portDef.nBufferSize >>= 1;
+            OMX_U32 nBufferSize = portDef.nBufferSize >> 1;
+            if (nBufferSize != 0) {
+                if (!strncmp(gEnv->getComponent().c_str(), "OMX.google.", 11)) {
+                    portDef.nBufferSize = nBufferSize;
+                } else {
+                    // Probable alignment requirements of vendor component
+                    portDef.nBufferSize = ALIGN_POWER_OF_TWO(nBufferSize, 12);
+                    nBufferSize = portDef.nBufferSize;
+                }
+            } else {
+                ASSERT_TRUE(false) << "Unexpected buffer size";
+            }
             setPortParam(omxNode, OMX_IndexParamPortDefinition, i, &portDef);
             getPortParam(omxNode, OMX_IndexParamPortDefinition, i, &portDef);
-            EXPECT_EQ(portDef.nBufferSize, mirror.nBufferSize);
+            // SPECIAL CASE: For video decoder, allow configuration of input
+            // buffer size even if it is less than minimum requirement and
+            // similarly for encoder allow configuration of output port buffer
+            // size.
+            if ((compClass == video_encoder && i == kPortIndexOutput) ||
+                (compClass == video_decoder && i == kPortIndexInput)) {
+                double dev = (portDef.nBufferSize / (double)nBufferSize);
+                dev -= 1;
+                if (dev < 0 || dev > 0.1) EXPECT_TRUE(false);
+            } else {
+                EXPECT_EQ(portDef.nBufferSize, mirror.nBufferSize);
+            }
             setPortParam(omxNode, OMX_IndexParamPortDefinition, i, &mirror);
 
             portDef = mirror;
@@ -467,6 +489,11 @@
         portBase = params.nStartPortNumber;
     }
 
+    // set state to idle
+    status = omxNode->sendCommand(toRawCommandType(OMX_CommandStateSet),
+                                  OMX_StateIdle);
+    ASSERT_EQ(status, android::hardware::media::omx::V1_0::Status::OK);
+
     OMX_PARAM_PORTDEFINITIONTYPE portDef;
     status =
         getPortParam(omxNode, OMX_IndexParamPortDefinition, portBase, &portDef);
diff --git a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
index c75bd7c..1663ae7 100644
--- a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
+++ b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoDecTest.cpp
@@ -906,7 +906,7 @@
         eleInfo >> flags;
         eleInfo >> timestamp;
         Info.push_back({bytesCount, flags, timestamp});
-        if (flags != OMX_BUFFERFLAG_CODECCONFIG)
+        if (timestampDevTest && (flags != OMX_BUFFERFLAG_CODECCONFIG))
             timestampUslist.push_back(timestamp);
         if (maxBytesCount < bytesCount) maxBytesCount = bytesCount;
     }
@@ -980,7 +980,7 @@
                            kPortIndexInput, kPortIndexOutput, portMode[1]);
     testEOS(omxNode, observer, &iBuffer, &oBuffer, false, eosFlag, portMode,
             portReconfiguration, kPortIndexInput, kPortIndexOutput, nullptr);
-    EXPECT_EQ(timestampUslist.empty(), true);
+    if (timestampDevTest) EXPECT_EQ(timestampUslist.empty(), true);
     // set state to idle
     changeStateExecutetoIdle(omxNode, observer, &iBuffer, &oBuffer);
     // set state to executing
diff --git a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoEncTest.cpp b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoEncTest.cpp
index f4a4e9b..23f051d 100644
--- a/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoEncTest.cpp
+++ b/media/omx/1.0/vts/functional/video/VtsHalMediaOmxV1_0TargetVideoEncTest.cpp
@@ -1300,7 +1300,7 @@
     eleStream.close();
     waitOnInputConsumption(omxNode, observer, &iBuffer, &oBuffer);
     testEOS(omxNode, observer, &iBuffer, &oBuffer, false, eosFlag);
-    EXPECT_EQ(timestampUslist.empty(), true);
+    if (timestampDevTest) EXPECT_EQ(timestampUslist.empty(), true);
 
     // set state to idle
     changeStateExecutetoIdle(omxNode, observer, &iBuffer, &oBuffer);
diff --git a/neuralnetworks/1.0/Android.bp b/neuralnetworks/1.0/Android.bp
index b5603a2..d7c3bbb 100644
--- a/neuralnetworks/1.0/Android.bp
+++ b/neuralnetworks/1.0/Android.bp
@@ -5,6 +5,7 @@
     srcs: [
         "types.hal",
         "IDevice.hal",
+        "IEvent.hal",
         "IPreparedModel.hal",
     ],
 }
@@ -19,6 +20,7 @@
     out: [
         "android/hardware/neuralnetworks/1.0/types.cpp",
         "android/hardware/neuralnetworks/1.0/DeviceAll.cpp",
+        "android/hardware/neuralnetworks/1.0/EventAll.cpp",
         "android/hardware/neuralnetworks/1.0/PreparedModelAll.cpp",
     ],
 }
@@ -38,6 +40,11 @@
         "android/hardware/neuralnetworks/1.0/BnHwDevice.h",
         "android/hardware/neuralnetworks/1.0/BpHwDevice.h",
         "android/hardware/neuralnetworks/1.0/BsDevice.h",
+        "android/hardware/neuralnetworks/1.0/IEvent.h",
+        "android/hardware/neuralnetworks/1.0/IHwEvent.h",
+        "android/hardware/neuralnetworks/1.0/BnHwEvent.h",
+        "android/hardware/neuralnetworks/1.0/BpHwEvent.h",
+        "android/hardware/neuralnetworks/1.0/BsEvent.h",
         "android/hardware/neuralnetworks/1.0/IPreparedModel.h",
         "android/hardware/neuralnetworks/1.0/IHwPreparedModel.h",
         "android/hardware/neuralnetworks/1.0/BnHwPreparedModel.h",
diff --git a/neuralnetworks/1.0/IEvent.hal b/neuralnetworks/1.0/IEvent.hal
new file mode 100644
index 0000000..63afeaf
--- /dev/null
+++ b/neuralnetworks/1.0/IEvent.hal
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This HAL is a work in progress */
+
+package android.hardware.neuralnetworks@1.0;
+
+/**
+ * The IEvent interface is a callback object passed by the
+ * Neuralnetworks runtime to the vendor service. It is used as a
+ * synchronization primitive between one or more runtime threads and a
+ * single asynchronous vendor thread.  An event object is passed as an
+ * argument to a HIDL call that is expected to take a non-trivial
+ * amount of time. When the asynchronous execution thread has
+ * completed its computation, it must call "notify" on the event to
+ * indicate to the Neuralnetworks runtime whether the computation was
+ * successful or not, and that the corresponding output is ready to be
+ * consumed if the execution was successful.
+ *
+ * TODO: Mention that "notify" is also called by a runtime thread
+ * during CPU fallback execution? Depends on whether the HIDL comments
+ * are strictly for vendors or not.
+ */
+interface IEvent {
+
+    /**
+     * IEvent::notify is called by the server thread (i.e. the thread doing the
+     * work) to mark the event as completed so that any threads requiring the
+     * corresponding resources can continue executing.
+     *
+     * @param status Status of the execution associated with the Event.
+     *               Should be SUCCESS or ERROR.
+     */
+    oneway notify(Status status);
+
+};
diff --git a/neuralnetworks/1.0/IPreparedModel.hal b/neuralnetworks/1.0/IPreparedModel.hal
index 566d6ac..428ddc7 100644
--- a/neuralnetworks/1.0/IPreparedModel.hal
+++ b/neuralnetworks/1.0/IPreparedModel.hal
@@ -18,8 +18,9 @@
 
 package android.hardware.neuralnetworks@1.0;
 
+import IEvent;
+
 interface IPreparedModel {
-    // TODO: The execution is synchronous.  Change that to have a callback on completion.
     // Multiple threads can call this execute function concurrently.
-    execute(Request request) generates(bool success);
+    execute(Request request, IEvent event) generates(bool success);
 };
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 4bd6a08..61af70b 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -32,7 +32,8 @@
     UINT32                    = 7,
     TENSOR_FLOAT16            = 8,
     TENSOR_FLOAT32            = 9,
-    TENSOR_QUANT8_ASYMM       = 10,
+    TENSOR_INT32              = 10,
+    TENSOR_QUANT8_ASYMM       = 11,
 };
 
 // The type of operations.  Unlike the operation types found in
@@ -41,40 +42,40 @@
 // TODO: Currently they are the same.  Add a conversion when finalizing the model.
 // When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
 enum OperationType : uint32_t {
-    AVERAGE_POOL                 = 0,
-    CONCATENATION                = 1,
-    CONV                         = 2,
-    DEPTHWISE_CONV               = 3,
-    MAX_POOL                     = 4,
-    L2_POOL                      = 5,
-    DEPTH_TO_SPACE               = 6,
-    SPACE_TO_DEPTH               = 7,
-    LOCAL_RESPONSE_NORMALIZATION = 8,
-    SOFTMAX                      = 9,
-    RESHAPE                      = 10,
-    SPLIT                        = 11,
-    FAKE_QUANT                   = 12,
-    ADD                          = 13,
-    FULLY_CONNECTED              = 14,
-    CAST                         = 15,
-    MUL                          = 16,
-    L2_NORMALIZATION             = 17,
+    OEM_OPERATION                = 0,
+    ADD                          = 1,
+    AVERAGE_POOL                 = 2,
+    CAST                         = 3,
+    CONCATENATION                = 4,
+    CONV                         = 5,
+    DEPTHWISE_CONV               = 6,
+    DEPTH_TO_SPACE               = 7,
+    DEQUANTIZE                   = 8,
+    EMBEDDING_LOOKUP             = 9,
+    FAKE_QUANT                   = 10,
+    FLOOR                        = 11,
+    FULLY_CONNECTED              = 12,
+    GATHER                       = 13,
+    HASHTABLE_LOOKUP             = 14,
+    L2_NORMALIZATION             = 15,
+    L2_POOL                      = 16,
+    LOCAL_RESPONSE_NORMALIZATION = 17,
     LOGISTIC                     = 18,
-    RELU                         = 19,
-    RELU6                        = 20,
-    RELU1                        = 21,
-    TANH                         = 22,
-    DEQUANTIZE                   = 23,
-    FLOOR                        = 24,
-    GATHER                       = 25,
-    RESIZE_BILINEAR              = 26,
-    LSH_PROJECTION               = 27,
-    LSTM                         = 28,
-    SVDF                         = 29,
-    RNN                          = 30,
-    N_GRAM                       = 31,
-    EMBEDDING_LOOKUP             = 32,
-    HASHTABLE_LOOKUP             = 33,
+    LSH_PROJECTION               = 19,
+    LSTM                         = 20,
+    MAX_POOL                     = 21,
+    MUL                          = 22,
+    RELU                         = 23,
+    RELU1                        = 24,
+    RELU6                        = 25,
+    RESHAPE                      = 26,
+    RESIZE_BILINEAR              = 27,
+    RNN                          = 28,
+    SOFTMAX                      = 29,
+    SPACE_TO_DEPTH               = 30,
+    SPLIT                        = 31,
+    SVDF                         = 32,
+    TANH                         = 33,
 };
 
 // Two special values that can be used instead of a regular poolIndex.
@@ -180,3 +181,8 @@
     vec<InputOutputInfo> outputs;
     vec<memory> pools;
 };
+
+enum Status : uint32_t {
+    SUCCESS,
+    ERROR,
+};
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index d76b2c3..1efff0e 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -16,7 +16,10 @@
 
 cc_test {
     name: "VtsHalNeuralnetworksV1_0TargetTest",
-    srcs: ["VtsHalNeuralnetworksV1_0TargetTest.cpp"],
+    srcs: [
+        "Event.cpp",
+        "VtsHalNeuralnetworksV1_0TargetTest.cpp",
+    ],
     defaults: ["VtsHalTargetTestDefaults"],
     static_libs: [
         "android.hardware.neuralnetworks@1.0",
diff --git a/neuralnetworks/1.0/vts/functional/Event.cpp b/neuralnetworks/1.0/vts/functional/Event.cpp
new file mode 100644
index 0000000..0fab86b
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Event.cpp
@@ -0,0 +1,76 @@
+#include "Event.h"
+#include <android-base/logging.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace implementation {
+
+Event::Event() : mStatus(Status::WAITING) {}
+
+Event::~Event() {
+    if (mThread.joinable()) {
+        mThread.join();
+    }
+}
+
+Return<void> Event::notify(ReturnedStatus status) {
+    {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mStatus = status == ReturnedStatus::SUCCESS ? Status::SUCCESS : Status::ERROR;
+        if (mStatus == Status::SUCCESS && mCallback != nullptr) {
+            bool success = mCallback();
+            if (!success) {
+                LOG(ERROR) << "Event::notify -- callback failed";
+            }
+        }
+    }
+    mCondition.notify_all();
+    return Void();
+}
+
+Event::Status Event::poll() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    return mStatus;
+}
+
+Event::Status Event::wait() {
+    std::unique_lock<std::mutex> lock(mMutex);
+    mCondition.wait(lock, [this]{return mStatus != Status::WAITING;});
+    return mStatus;
+}
+
+bool Event::on_finish(std::function<bool(void)> callback) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mCallback != nullptr) {
+        LOG(ERROR) << "Event::on_finish -- a callback has already been bound to this event";
+        return false;
+    }
+    if (callback == nullptr) {
+        LOG(ERROR) << "Event::on_finish -- the new callback is invalid";
+        return false;
+    }
+    mCallback = std::move(callback);
+    return true;
+}
+
+bool Event::bind_thread(std::thread&& asyncThread) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mThread.joinable()) {
+        LOG(ERROR) << "Event::bind_thread -- a thread has already been bound to this event";
+        return false;
+    }
+    if (!asyncThread.joinable()) {
+        LOG(ERROR) << "Event::bind_thread -- the new thread is not joinable";
+        return false;
+    }
+    mThread = std::move(asyncThread);
+    return true;
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.0/vts/functional/Event.h b/neuralnetworks/1.0/vts/functional/Event.h
new file mode 100644
index 0000000..2e19585
--- /dev/null
+++ b/neuralnetworks/1.0/vts/functional/Event.h
@@ -0,0 +1,192 @@
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
+
+#include <android/hardware/neuralnetworks/1.0/IEvent.h>
+#include <chrono>
+#include <condition_variable>
+#include <functional>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <mutex>
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ReturnedStatus = ::android::hardware::neuralnetworks::V1_0::Status;
+
+/**
+ * The Event class is used internally by the Neuralnetworks runtime to
+ * synchronize between different threads. An asynchronous task is launched
+ * paired with an event object. When a client thread requires the output being
+ * processed by the asynchronous task, the client thread can wait for the result
+ * and be blocked until it has completed or a timeout condition has been
+ * reached, or poll the result periodically. Both poll and wait* may safely be
+ * called concurrently, even on the same event. When the server thread has
+ * completed, it should immediately call "notify" to indicate the corresponding
+ * output has been produced and awaken any client threads waiting on the event.
+ *
+ * This class exists to enable synchronization across HIDL. When synchronization
+ * is only required in the same process, consider using std::future, std::mutex,
+ * std::condition_variable, or std::experimental::latch instead.
+ */
+struct Event : public IEvent {
+    Event();
+    ~Event() override;
+
+    /**
+     * Event::Status::WAITING -- The corresponding asynchronous execution has
+     *                           not yet finished.
+     * Event::Status::SUCCESS -- The corresponding asynchronous execution has
+     *                           succeeded and the output is ready to be
+     *                           consumed.
+     * Event::Status::TIMEOUT -- The calling thread has waited longer than the
+     *                           user has specified. This only applies to the
+     *                           methods Event::wait_for and Event::wait_until.
+     * Event::Status::ERROR   -- The corresponding asynchronous execution has
+     *                           failed to properly execute.
+     */
+    enum class Status : uint32_t {
+        WAITING,
+        SUCCESS,
+        TIMEOUT,
+        ERROR,
+    };
+
+    /**
+     * IEvent::notify marks the event with the return status of the
+     * asynchronous call the event is paired with and enables all
+     * prior and future wait calls on the Event object to proceed. The
+     * call to IEvent::notify happens before any wait* calls on
+     * this event return (except in the case of TIMEOUT) and before
+     * any poll calls that see the resulting status. The asynchronous
+     * call the event is paired with must ensure that any update to
+     * state that should be visible to the caller of wait* or poll
+     * happens before the call to IEvent::notify.
+     *
+     * IEvent::notify can be called at most once on a given event.
+     *
+     * @param neuralnetworks::V1_0::Status SUCCESS or ERROR
+     */
+    Return<void> notify(ReturnedStatus status) override;
+
+    /**
+     * Event::poll returns the current status of the event.
+     *
+     * @return Status SUCCESS, ERROR, or WAITING
+     */
+    Event::Status poll();
+
+    /**
+     * Event::wait blocks until the event has been signaled.
+     *
+     * @return Status SUCCESS or ERROR
+     */
+    Event::Status wait();
+
+    /**
+     * Event::wait_for blocks until the event has been signaled or the time
+     * duration from the time the wait_for function was called has expired,
+     * whichever comes first.
+     *
+     * @return Status SUCCESS, ERROR, or TIMEOUT
+     */
+    template<class Rep, class Period>
+    Event::Status wait_for(const std::chrono::duration<Rep,Period>& timeout_duration);
+
+    /**
+     * Event::wait_until blocks until the event has been signaled or a certain
+     * time has been reached, whichever comes first.
+     *
+     * @return Status SUCCESS, ERROR, or TIMEOUT
+     */
+    template<class Clock, class Duration>
+    Event::Status wait_until(const std::chrono::time_point<Clock,Duration>& timeout_duration);
+
+    /**
+     * Event::on_finish binds a callback function to the event. The
+     * callback will be executed when IEvent::notify is called, before
+     * any calls to wait* return. (Note that wait_for or wait_until
+     * can return TIMEOUT before IEvent::notify is called for the
+     * first time, and hence before the callback is executed.)
+     *
+     * The callback function must not synchronize with or otherwise
+     * access the event object it is bound to.
+     *
+     * Event::on_finish can be called at most once on a given event.
+     *
+     * @param callback Function to be invoked the first time IEvent::notify is
+     *                 called. Must have a target -- i.e., must not compare equal
+     *                 to nullptr. Callback returns true if it successfully
+     *                 completes, false if it fails.
+     * @return bool True if the callback was successfully bound, false if
+     *              unsuccessful.
+     *
+     * TODO: What if notify has already been called before on_finish?
+     * TODO: Why does the return value of the callback matter?
+     */
+     bool on_finish(std::function<bool(void)> callback);
+
+    /**
+     * Event::bind_thread binds a thread to the event ensuring that the thread
+     * has fully finished and cleaned its resources before the event is
+     * destroyed. The thread should be bound using std::move.
+     *
+     * The bound thread shall not call any Event method with the exception of
+     * IEvent::notify, which it will call when the thread has finished its
+     * computation.
+     *
+     * Event::bind_thread can be called at most once on a given event.
+     *
+     * @param asyncThread Thread to be bound to the event. The thread object
+     *                    must represent a thread of execution -- i.e.,
+     *                    asyncThread.joinable() must be true.
+     * @return bool True if successful, false if thread was not properly bound.
+     */
+     bool bind_thread(std::thread&& asyncThread);
+
+ private:
+    Status                    mStatus;
+    std::mutex                mMutex;
+    std::condition_variable   mCondition;
+    std::function<bool(void)> mCallback;
+    std::thread               mThread;
+};
+
+
+// template function implementations
+
+template<class Rep, class Period>
+Event::Status Event::wait_for(const std::chrono::duration<Rep,Period>& timeout_duration) {
+    std::unique_lock<std::mutex> lock(mMutex);
+    std::cv_status status = mCondition.wait_for(lock, timeout_duration,
+                                                [this]{return mStatus != Status::WAITING;});
+    return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
+}
+
+template<class Clock, class Duration>
+Event::Status Event::wait_until(const std::chrono::time_point<Clock,Duration>& timeout_time) {
+    std::unique_lock<std::mutex> lock(mMutex);
+    std::cv_status status = mCondition.wait_until(lock, timeout_time,
+                                                  [this]{return mStatus != Status::WAITING;});
+    return status != std::cv_status::timeout ? mStatus : Status::TIMEOUT;
+}
+
+}  // namespace implementation
+}  // namespace V1_0
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_0_EVENT_H
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
index 84e67cb..6655ad3 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp
@@ -16,6 +16,7 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
+#include "Event.h"
 #include "VtsHalNeuralnetworksV1_0TargetTest.h"
 #include <android-base/logging.h>
 #include <android/hidl/memory/1.0/IMemory.h>
@@ -29,9 +30,13 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
+NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
+
 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
     // This has to return a "new" object because it is freed inside
     // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
@@ -44,6 +49,8 @@
 }
 
 // The main test class for NEURALNETWORK HIDL HAL.
+NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
+
 void NeuralnetworksHidlTest::SetUp() {
     device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
         NeuralnetworksHidlEnvironment::getInstance());
@@ -226,21 +233,32 @@
     float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
     ASSERT_NE(nullptr, inputPtr);
     ASSERT_NE(nullptr, outputPtr);
+    inputMemory->update();
+    outputMemory->update();
     std::copy(inputData.begin(), inputData.end(), inputPtr);
     std::copy(outputData.begin(), outputData.end(), outputPtr);
     inputMemory->commit();
     outputMemory->commit();
 
     // execute request
-    bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools});
+    sp<Event> event = sp<Event>(new Event());
+    ASSERT_NE(nullptr, event.get());
+    bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools},
+                                          event);
     EXPECT_TRUE(success);
+    Event::Status status = event->wait();
+    EXPECT_EQ(Event::Status::SUCCESS, status);
 
     // validate results { 1+5, 2+6, 3+7, 4+8 }
-    outputMemory->update();
+    outputMemory->read();
     std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
+    outputMemory->commit();
     EXPECT_EQ(expectedData, outputData);
 }
 
+// TODO: Add tests for execution failure, or wait_for/wait_until timeout.
+//       Discussion: https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_0
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
index bb0cdaa..fdd3b0b 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.h
@@ -59,15 +59,17 @@
     NeuralnetworksHidlEnvironment& operator=(NeuralnetworksHidlEnvironment&&) = delete;
 
    public:
+    ~NeuralnetworksHidlEnvironment() override;
     static NeuralnetworksHidlEnvironment* getInstance();
-    virtual void registerTestServices() override;
+    void registerTestServices() override;
 };
 
 // The main test class for NEURALNETWORKS HIDL HAL.
 class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
    public:
-    virtual void SetUp() override;
-    virtual void TearDown() override;
+    ~NeuralnetworksHidlTest() override;
+    void SetUp() override;
+    void TearDown() override;
 
     sp<IDevice> device;
 };