Refactor NNAPI VTS to remove unreasonable dependence between versions

To make it easier to create the next version of NNAPI, this change
removes the following nonsensical dependence:
- NNAPI 1.0 VTS depends on NNAPI 1.1 and 1.2
- NNAPI 1.1 VTS depends on NNAPI 1.2

In particular, I made the following changes:
- split GeneratedTestHarness.cpp into three separate implementations,
- created a restricted version of Callbacks.h for 1.0 and 1.1,
- removed the dependency on frameworks/ml/nn/HalInterfaces.h,
- refactored Android.bp files for more autonomy between 1.0, 1.1, and 1.2,
- consolidated some common code into Utils.h,
- created structure for sharing code between VTS versions (VtsHalNeuralNetworksV1_0_utils).

Bug: 74827824
Bug: 124462414
Test: VtsHalNeuralnetworksV1_0TargetTest
Test: VtsHalNeuralnetworksV1_1TargetTest
Test: VtsHalNeuralnetworksV1_1CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_0TargetTest
Test: VtsHalNeuralnetworksV1_2CompatV1_1TargetTest
Change-Id: I4243d0b5e574255cef1070850f4d0a284f65f54e
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 6c26820..b48646f 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -14,10 +14,44 @@
 // limitations under the License.
 //
 
+cc_defaults {
+    name: "VtsHalNeuralNetworksV1_2TargetTestDefaults",
+    defaults: ["VtsHalTargetTestDefaults"],
+    srcs: [
+        "ValidateModel.cpp",
+        "ValidateRequest.cpp",
+        "VtsHalNeuralnetworks.cpp",
+        "Callbacks.cpp",
+        "GeneratedTestHarness.cpp",
+    ],
+    local_include_dirs: ["include"],
+    shared_libs: [
+        "libfmq",
+        "libnativewindow",
+    ],
+    static_libs: [
+        "android.hardware.neuralnetworks@1.0",
+        "android.hardware.neuralnetworks@1.1",
+        "android.hardware.neuralnetworks@1.2",
+        "android.hidl.allocator@1.0",
+        "android.hidl.memory@1.0",
+        "libgmock",
+        "libhidlmemory",
+        "libneuralnetworks_utils",
+        "VtsHalNeuralNetworksV1_0_utils",
+    ],
+    header_libs: [
+        "libneuralnetworks_headers",
+        "libneuralnetworks_generated_test_harness_headers",
+        "libneuralnetworks_generated_tests",
+    ],
+    test_suites: ["general-tests"],
+}
+
 // Tests for V1_0 models using the V1_2 HAL.
 cc_test {
     name: "VtsHalNeuralnetworksV1_2CompatV1_0TargetTest",
-    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
     srcs: [
         "GeneratedTestsV1_0.cpp",
         "ValidateBurst.cpp",
@@ -30,7 +64,7 @@
 // Tests for V1_1 models using the V1_2 HAL.
 cc_test {
     name: "VtsHalNeuralnetworksV1_2CompatV1_1TargetTest",
-    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
     srcs: [
         "GeneratedTestsV1_1.cpp",
         "ValidateBurst.cpp",
@@ -43,11 +77,11 @@
 // Tests for V1_2 models.
 cc_test {
     name: "VtsHalNeuralnetworksV1_2TargetTest",
-    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
     srcs: [
         "BasicTests.cpp",
         "CompilationCachingTests.cpp",
-        "GeneratedTests.cpp",
+        "GeneratedTestsV1_2.cpp",
         "ValidateBurst.cpp",
     ],
     cflags: [
@@ -57,10 +91,10 @@
 
 cc_test {
     name: "PresubmitHalNeuralnetworksV1_2TargetTest",
-    defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
+    defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
     srcs: [
         "BasicTests.cpp",
-        "GeneratedTests.cpp",
+        "GeneratedTestsV1_2.cpp",
         "ValidateBurst.cpp",
     ],
     cflags: [
diff --git a/neuralnetworks/1.2/vts/functional/Callbacks.cpp b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
new file mode 100644
index 0000000..cfaf91d
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/Callbacks.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "1.2/Callbacks.h"
+#include <android-base/logging.h>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace implementation {
+
+CallbackBase::CallbackBase() : mNotified(false) {}
+
+CallbackBase::~CallbackBase() {
+    // Note that we cannot call CallbackBase::join_thread from here:
+    // CallbackBase is intended to be reference counted, and it is possible that
+    // the reference count drops to zero in the bound thread, causing the
+    // bound thread to call this destructor. If a thread tries to join
+    // itself, it throws an exception, producing a message like the
+    // following:
+    //
+    //     terminating with uncaught exception of type std::__1::system_error:
+    //     thread::join failed: Resource deadlock would occur
+}
+
+void CallbackBase::wait() {
+    std::unique_lock<std::mutex> lock(mMutex);
+    mCondition.wait(lock, [this] { return mNotified; });
+    join_thread_locked();
+}
+
+bool CallbackBase::on_finish(std::function<bool(void)> post_work) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mPostWork != nullptr) {
+        LOG(ERROR) << "CallbackBase::on_finish -- a post-work function has already been bound to "
+                      "this callback object";
+        return false;
+    }
+    if (post_work == nullptr) {
+        LOG(ERROR) << "CallbackBase::on_finish -- the new post-work function is invalid";
+        return false;
+    }
+    mPostWork = std::move(post_work);
+    return true;
+}
+
+bool CallbackBase::bind_thread(std::thread&& asyncThread) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mThread.joinable()) {
+        LOG(ERROR) << "CallbackBase::bind_thread -- a thread has already been bound to this "
+                      "callback object";
+        return false;
+    }
+    if (!asyncThread.joinable()) {
+        LOG(ERROR) << "CallbackBase::bind_thread -- the new thread is not joinable";
+        return false;
+    }
+    mThread = std::move(asyncThread);
+    return true;
+}
+
+void CallbackBase::join_thread() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    join_thread_locked();
+}
+
+void CallbackBase::notify() {
+    {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mNotified = true;
+        if (mPostWork != nullptr) {
+            bool success = mPostWork();
+            if (!success) {
+                LOG(ERROR) << "CallbackBase::notify -- post work failed";
+            }
+        }
+    }
+    mCondition.notify_all();
+}
+
+void CallbackBase::join_thread_locked() {
+    if (mThread.joinable()) {
+        mThread.join();
+    }
+}
+
+PreparedModelCallback::PreparedModelCallback()
+    : mErrorStatus(ErrorStatus::GENERAL_FAILURE), mPreparedModel(nullptr) {}
+
+PreparedModelCallback::~PreparedModelCallback() {}
+
+Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    mErrorStatus = errorStatus;
+    mPreparedModel = preparedModel;
+    CallbackBase::notify();
+    return Void();
+}
+
+Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
+    mErrorStatus = errorStatus;
+    mPreparedModel = preparedModel;
+    CallbackBase::notify();
+    return Void();
+}
+
+ErrorStatus PreparedModelCallback::getStatus() {
+    wait();
+    return mErrorStatus;
+}
+
+sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() {
+    wait();
+    return mPreparedModel;
+}
+
+ExecutionCallback::ExecutionCallback() : mErrorStatus(ErrorStatus::GENERAL_FAILURE) {}
+
+ExecutionCallback::~ExecutionCallback() {}
+
+Return<void> ExecutionCallback::notify(ErrorStatus errorStatus) {
+    mErrorStatus = errorStatus;
+    mOutputShapes = {};
+    mTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
+    CallbackBase::notify();
+    return Void();
+}
+
+Return<void> ExecutionCallback::notify_1_2(ErrorStatus errorStatus,
+                                           const hidl_vec<OutputShape>& outputShapes,
+                                           const Timing& timing) {
+    mErrorStatus = errorStatus;
+    mOutputShapes = outputShapes;
+    mTiming = timing;
+    CallbackBase::notify();
+    return Void();
+}
+
+ErrorStatus ExecutionCallback::getStatus() {
+    wait();
+    return mErrorStatus;
+}
+
+const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() {
+    wait();
+    return mOutputShapes;
+}
+
+Timing ExecutionCallback::getTiming() {
+    wait();
+    return mTiming;
+}
+
+}  // namespace implementation
+}  // namespace V1_2
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 4411b90..9cabb7b 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -27,8 +27,9 @@
 #include <cstdlib>
 #include <random>
 
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
 #include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
 #include "TestHarness.h"
 #include "Utils.h"
 #include "VtsHalNeuralnetworks.h"
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..c3578cd
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GeneratedTestHarness.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+#include <iostream>
+
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+using ::android::hardware::neuralnetworks::V1_0::ErrorStatus;
+using ::android::hardware::neuralnetworks::V1_0::Request;
+using ::android::hardware::neuralnetworks::V1_0::RequestArgument;
+using ::android::hardware::neuralnetworks::V1_1::ExecutionPreference;
+using ::android::hardware::neuralnetworks::V1_2::IDevice;
+using ::android::hardware::neuralnetworks::V1_2::IPreparedModel;
+using ::android::hardware::neuralnetworks::V1_2::Model;
+using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using ::android::hidl::memory::V1_0::IMemory;
+using ::test_helper::compare;
+using ::test_helper::expectMultinomialDistributionWithinTolerance;
+using ::test_helper::filter;
+using ::test_helper::for_all;
+using ::test_helper::for_each;
+using ::test_helper::MixedTyped;
+using ::test_helper::MixedTypedExample;
+using ::test_helper::resize_accordingly;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
+static bool isZeroSized(const MixedTyped& example, uint32_t index) {
+    for (auto i : example.operandDimensions.at(index)) {
+        if (i == 0) return true;
+    }
+    return false;
+}
+
+static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+                                                const Request& request, MeasureTiming measure,
+                                                sp<ExecutionCallback>& callback) {
+    return preparedModel->execute_1_2(request, measure, callback);
+}
+static Return<ErrorStatus> ExecutePreparedModel(sp<IPreparedModel>& preparedModel,
+                                                const Request& request, MeasureTiming measure,
+                                                hidl_vec<OutputShape>* outputShapes,
+                                                Timing* timing) {
+    ErrorStatus result;
+    Return<void> ret = preparedModel->executeSynchronously(
+            request, measure,
+            [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
+                                            const Timing& time) {
+                result = error;
+                *outputShapes = shapes;
+                *timing = time;
+            });
+    if (!ret.isOk()) {
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    return result;
+}
+static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+        const sp<IPreparedModel>& preparedModel) {
+    return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
+}
+enum class Executor { ASYNC, SYNC, BURST };
+enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+const float kDefaultAtol = 1e-5f;
+const float kDefaultRtol = 1e-5f;
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
+                           Executor executor, MeasureTiming measure, OutputType outputType) {
+    const uint32_t INPUT = 0;
+    const uint32_t OUTPUT = 1;
+
+    int example_no = 1;
+    for (auto& example : examples) {
+        SCOPED_TRACE(example_no++);
+        const MixedTyped& inputs = example.operands.first;
+        const MixedTyped& golden = example.operands.second;
+
+        const bool hasFloat16Inputs = !inputs.float16Operands.empty();
+        if (hasRelaxedFloat32Model || hasFloat16Inputs) {
+            // TODO: Adjust the error limit based on testing.
+            // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
+            fpAtol = 5.0f * 0.0009765625f;
+            // Set the relative tolerance to be 5ULP of the corresponding FP precision.
+            fpRtol = 5.0f * 0.0009765625f;
+        }
+
+        std::vector<RequestArgument> inputs_info, outputs_info;
+        uint32_t inputSize = 0, outputSize = 0;
+        // This function only partially specifies the metadata (vector of RequestArguments).
+        // The contents are copied over below.
+        for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
+            if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
+            RequestArgument arg = {
+                    .location = {.poolIndex = INPUT,
+                                 .offset = 0,
+                                 .length = static_cast<uint32_t>(s)},
+                    .dimensions = {},
+            };
+            RequestArgument arg_empty = {
+                    .hasNoValue = true,
+            };
+            inputs_info[index] = s ? arg : arg_empty;
+            inputSize += s;
+        });
+        // Compute offset for inputs 1 and so on
+        {
+            size_t offset = 0;
+            for (auto& i : inputs_info) {
+                if (!i.hasNoValue) i.location.offset = offset;
+                offset += i.location.length;
+            }
+        }
+
+        MixedTyped test;  // holding test results
+
+        // Go through all outputs, initialize RequestArgument descriptors
+        resize_accordingly(golden, test);
+        bool sizeLargerThanOne = true;
+        for_all(golden, [&golden, &outputs_info, &outputSize, &outputType, &sizeLargerThanOne](
+                                int index, auto, auto s) {
+            if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
+            if (index == 0) {
+                // On OutputType::INSUFFICIENT, set the output operand with index 0 with
+                // buffer size one byte less than needed.
+                if (outputType == OutputType::INSUFFICIENT) {
+                    if (s > 1 && !isZeroSized(golden, index)) {
+                        s -= 1;
+                    } else {
+                        sizeLargerThanOne = false;
+                    }
+                }
+            }
+            RequestArgument arg = {
+                    .location = {.poolIndex = OUTPUT,
+                                 .offset = 0,
+                                 .length = static_cast<uint32_t>(s)},
+                    .dimensions = {},
+            };
+            outputs_info[index] = arg;
+            outputSize += s;
+        });
+        // If output0 does not have size larger than one byte,
+        // we can not provide an insufficient buffer
+        if (!sizeLargerThanOne && outputType == OutputType::INSUFFICIENT) return;
+        // Compute offset for outputs 1 and so on
+        {
+            size_t offset = 0;
+            for (auto& i : outputs_info) {
+                i.location.offset = offset;
+                offset += i.location.length;
+            }
+        }
+        std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
+                                          nn::allocateSharedMemory(outputSize)};
+        ASSERT_NE(0ull, pools[INPUT].size());
+        ASSERT_NE(0ull, pools[OUTPUT].size());
+
+        // load data
+        sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
+        sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
+        ASSERT_NE(nullptr, inputMemory.get());
+        ASSERT_NE(nullptr, outputMemory.get());
+        char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
+        char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
+        ASSERT_NE(nullptr, inputPtr);
+        ASSERT_NE(nullptr, outputPtr);
+        inputMemory->update();
+        outputMemory->update();
+
+        // Go through all inputs, copy the values
+        for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
+            char* begin = (char*)p;
+            char* end = begin + s;
+            // TODO: handle more than one input
+            std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
+        });
+
+        inputMemory->commit();
+        outputMemory->commit();
+
+        const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
+
+        ErrorStatus executionStatus;
+        hidl_vec<OutputShape> outputShapes;
+        Timing timing;
+        switch (executor) {
+            case Executor::ASYNC: {
+                SCOPED_TRACE("asynchronous");
+
+                // launch execution
+                sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+                ASSERT_NE(nullptr, executionCallback.get());
+                Return<ErrorStatus> executionLaunchStatus =
+                        ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+                ASSERT_TRUE(executionLaunchStatus.isOk());
+                EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+
+                // retrieve execution status
+                executionCallback->wait();
+                executionStatus = executionCallback->getStatus();
+                outputShapes = executionCallback->getOutputShapes();
+                timing = executionCallback->getTiming();
+
+                break;
+            }
+            case Executor::SYNC: {
+                SCOPED_TRACE("synchronous");
+
+                // execute
+                Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+                        preparedModel, request, measure, &outputShapes, &timing);
+                ASSERT_TRUE(executionReturnStatus.isOk());
+                executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+                break;
+            }
+            case Executor::BURST: {
+                SCOPED_TRACE("burst");
+
+                // create burst
+                const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
+                        CreateBurst(preparedModel);
+                ASSERT_NE(nullptr, controller.get());
+
+                // create memory keys
+                std::vector<intptr_t> keys(request.pools.size());
+                for (size_t i = 0; i < keys.size(); ++i) {
+                    keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+                }
+
+                // execute burst
+                std::tie(executionStatus, outputShapes, timing) =
+                        controller->compute(request, measure, keys);
+
+                break;
+            }
+        }
+
+        if (outputType != OutputType::FULLY_SPECIFIED &&
+            executionStatus == ErrorStatus::GENERAL_FAILURE) {
+            LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                         "execute model that it does not support.";
+            std::cout << "[          ]   Early termination of test because vendor service cannot "
+                         "execute model that it does not support."
+                      << std::endl;
+            GTEST_SKIP();
+        }
+        if (measure == MeasureTiming::NO) {
+            EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+            EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+        } else {
+            if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
+                EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
+            }
+        }
+
+        switch (outputType) {
+            case OutputType::FULLY_SPECIFIED:
+                // If the model output operands are fully specified, outputShapes must be either
+                // either empty, or have the same number of elements as the number of outputs.
+                ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+                ASSERT_TRUE(outputShapes.size() == 0 ||
+                            outputShapes.size() == test.operandDimensions.size());
+                break;
+            case OutputType::UNSPECIFIED:
+                // If the model output operands are not fully specified, outputShapes must have
+                // the same number of elements as the number of outputs.
+                ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+                ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+                break;
+            case OutputType::INSUFFICIENT:
+                ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+                ASSERT_EQ(outputShapes.size(), test.operandDimensions.size());
+                ASSERT_FALSE(outputShapes[0].isSufficient);
+                return;
+        }
+        // Go through all outputs, overwrite output dimensions with returned output shapes
+        if (outputShapes.size() > 0) {
+            for_each<uint32_t>(test.operandDimensions,
+                               [&outputShapes](int idx, std::vector<uint32_t>& dim) {
+                                   dim = outputShapes[idx].dimensions;
+                               });
+        }
+
+        // validate results
+        outputMemory->read();
+        copy_back(&test, outputs_info, outputPtr);
+        outputMemory->commit();
+        // Filter out don't cares
+        MixedTyped filtered_golden = filter(golden, is_ignored);
+        MixedTyped filtered_test = filter(test, is_ignored);
+
+        // We want "close-enough" results for float
+        compare(filtered_golden, filtered_test, fpAtol, fpRtol);
+
+        if (example.expectedMultinomialDistributionTolerance > 0) {
+            expectMultinomialDistributionWithinTolerance(test, example);
+        }
+    }
+}
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
+                           OutputType outputType) {
+    EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
+                          kDefaultRtol, executor, measure, outputType);
+}
+
+void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model, bool testDynamicOutputShape) {
+    if (testDynamicOutputShape) {
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::NO, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::NO, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::YES, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::YES, OutputType::UNSPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::NO, OutputType::INSUFFICIENT);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::NO, OutputType::INSUFFICIENT);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::YES, OutputType::INSUFFICIENT);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::YES, OutputType::INSUFFICIENT);
+    } else {
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::NO, OutputType::FULLY_SPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::ASYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::SYNC, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+        EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model,
+                              Executor::BURST, MeasureTiming::YES, OutputType::FULLY_SPECIFIED);
+    }
+}
+
+void PrepareModel(const sp<IDevice>& device, const Model& model,
+                  sp<IPreparedModel>* preparedModel) {
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedCall = device->getSupportedOperations_1_2(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedCall.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
+            model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    sp<V1_0::IPreparedModel> preparedModelV1_0 = preparedModelCallback->getPreparedModel();
+    *preparedModel = IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+
+    // early termination if vendor service cannot fully prepare model
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Early termination of test because vendor service cannot "
+                     "prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
+void Execute(const sp<IDevice>& device, std::function<Model(void)> create_model,
+             std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+             bool testDynamicOutputShape) {
+    Model model = create_model();
+    sp<IPreparedModel> preparedModel = nullptr;
+    PrepareModel(device, model, &preparedModel);
+    if (preparedModel == nullptr) {
+        GTEST_SKIP();
+    }
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16, testDynamicOutputShape);
+}
+
+}  // namespace generated_tests
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
new file mode 100644
index 0000000..30e5578
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
+
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <functional>
+#include <vector>
+#include "TestHarness.h"
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace generated_tests {
+
+using ::test_helper::MixedTypedExample;
+
+void PrepareModel(const sp<V1_2::IDevice>& device, const V1_2::Model& model,
+                  sp<V1_2::IPreparedModel>* preparedModel);
+
+void EvaluatePreparedModel(sp<V1_2::IPreparedModel>& preparedModel,
+                           std::function<bool(int)> is_ignored,
+                           const std::vector<MixedTypedExample>& examples,
+                           bool hasRelaxedFloat32Model, bool testDynamicOutputShape);
+
+void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model,
+             std::function<bool(int)> is_ignored, const std::vector<MixedTypedExample>& examples,
+             bool testDynamicOutputShape = false);
+
+}  // namespace generated_tests
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
index 990cab9..d48c73e 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_0.cpp
@@ -16,17 +16,17 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
 #include <android-base/logging.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
 
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
index fa6d54d..1adb371 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_1.cpp
@@ -16,17 +16,17 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
 #include <android-base/logging.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
 
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
similarity index 97%
rename from neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
rename to neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
index 5af3255..f9cecf8 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestsV1_2.cpp
@@ -16,17 +16,17 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "GeneratedTestHarness.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
 #include <android-base/logging.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
 
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 8c6391e..4d6bdbb 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -18,7 +18,7 @@
 
 #include "VtsHalNeuralnetworks.h"
 
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
 #include "ExecutionBurstController.h"
 #include "ExecutionBurstServer.h"
 #include "TestHarness.h"
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index a0b6d9a..78bb194 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -16,10 +16,10 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
 #include "VtsHalNeuralnetworks.h"
 
-#include "Callbacks.h"
-
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -41,10 +41,10 @@
                                            const Model& model) {
     SCOPED_TRACE(message + " [getSupportedOperations_1_2]");
 
-    Return<void> ret =
-        device->getSupportedOperations_1_2(model, [&](ErrorStatus status, const hidl_vec<bool>&) {
-            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
-        });
+    Return<void> ret = device->getSupportedOperations_1_2(
+            model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+                EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            });
     EXPECT_TRUE(ret.isOk());
 }
 
@@ -87,36 +87,16 @@
     validatePrepareModel(device, message, model, preference);
 }
 
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
-    if (vec) {
-        std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
-        vec->resize(vec->size() - 1);
-    }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
-    // assume vec is valid
-    const uint32_t index = vec->size();
-    vec->resize(index + 1);
-    (*vec)[index] = value;
-    return index;
-}
-
 static uint32_t addOperand(Model* model) {
     return hidl_vec_push_back(&model->operands,
                               {
-                                  .type = OperandType::INT32,
-                                  .dimensions = {},
-                                  .numberOfConsumers = 0,
-                                  .scale = 0.0f,
-                                  .zeroPoint = 0,
-                                  .lifetime = OperandLifeTime::MODEL_INPUT,
-                                  .location = {.poolIndex = 0, .offset = 0, .length = 0},
+                                      .type = OperandType::INT32,
+                                      .dimensions = {},
+                                      .numberOfConsumers = 0,
+                                      .scale = 0.0f,
+                                      .zeroPoint = 0,
+                                      .lifetime = OperandLifeTime::MODEL_INPUT,
+                                      .location = {.poolIndex = 0, .offset = 0, .length = 0},
                               });
 }
 
@@ -243,7 +223,7 @@
         case OperandType::TENSOR_QUANT8_ASYMM:
             return {-1, 256};
         case OperandType::TENSOR_QUANT8_SYMM:
-          return {-129, -1, 1, 128};
+            return {-129, -1, 1, 128};
         case OperandType::TENSOR_QUANT16_ASYMM:
             return {-1, 65536};
         case OperandType::TENSOR_QUANT16_SYMM:
@@ -256,7 +236,7 @@
 static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operand = 0; operand < model.operands.size(); ++operand) {
         const std::vector<int32_t> invalidZeroPoints =
-            getInvalidZeroPoints(model.operands[operand].type);
+                getInvalidZeroPoints(model.operands[operand].type);
         for (int32_t invalidZeroPoint : invalidZeroPoints) {
             const std::string message = "mutateOperandZeroPointTest: operand " +
                                         std::to_string(operand) + " has zero point of " +
@@ -292,13 +272,13 @@
         case OperandType::TENSOR_FLOAT16:
         case OperandType::TENSOR_FLOAT32:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = 0.0f;
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_INT32:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.zeroPoint = 0;
             break;
         case OperandType::TENSOR_QUANT8_ASYMM:
@@ -306,19 +286,20 @@
         case OperandType::TENSOR_QUANT16_ASYMM:
         case OperandType::TENSOR_QUANT16_SYMM:
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
             break;
         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
             newOperand.dimensions =
-                operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+                    operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
             newOperand.scale = 0.0f;
             newOperand.zeroPoint = 0;
 
             SymmPerChannelQuantParams channelQuant;
             channelQuant.channelDim = 0;
             channelQuant.scales = hidl_vec<float>(
-                operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0]) : 0);
+                    operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
+                                                   : 0);
             for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
                 channelQuant.scales[i] = 1.0f;
             }
@@ -435,7 +416,7 @@
                                         std::to_string(invalidOperationType);
             validate(device, message, model, [operation, invalidOperationType](Model* model) {
                 model->operations[operation].type =
-                    static_cast<OperationType>(invalidOperationType);
+                        static_cast<OperationType>(invalidOperationType);
             });
         }
     }
@@ -690,7 +671,7 @@
 static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
     for (size_t operation = 0; operation < model.operations.size(); ++operation) {
         const std::string message =
-            "addOperationOutputTest: operation " + std::to_string(operation);
+                "addOperationOutputTest: operation " + std::to_string(operation);
         validate(device, message, model, [operation](Model* model) {
             uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
             hidl_vec_push_back(&model->operations[operation].outputs, index);
@@ -702,14 +683,14 @@
 ///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
 
 static const int32_t invalidExecutionPreferences[] = {
-    static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1,        // lower bound
-    static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1,  // upper bound
+        static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1,        // lower bound
+        static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1,  // upper bound
 };
 
 static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
     for (int32_t preference : invalidExecutionPreferences) {
         const std::string message =
-            "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+                "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
         validate(device, message, model, [](Model*) {},
                  static_cast<ExecutionPreference>(preference));
     }
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index e935aaa..a7e8328 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -16,17 +16,18 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
-#include "VtsHalNeuralnetworks.h"
-
-#include "Callbacks.h"
-#include "ExecutionBurstController.h"
-#include "TestHarness.h"
-#include "Utils.h"
-
 #include <android-base/logging.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
 
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -137,26 +138,6 @@
     }
 }
 
-// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
-// so this is efficiently accomplished by moving the element to the end and
-// resizing the hidl_vec to one less.
-template <typename Type>
-static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) {
-    if (vec) {
-        std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end());
-        vec->resize(vec->size() - 1);
-    }
-}
-
-template <typename Type>
-static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) {
-    // assume vec is valid
-    const uint32_t index = vec->size();
-    vec->resize(index + 1);
-    (*vec)[index] = value;
-    return index;
-}
-
 ///////////////////////// REMOVE INPUT ////////////////////////////////////
 
 static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
@@ -197,11 +178,13 @@
         for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
             if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
             RequestArgument arg = {
-                .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
-                .dimensions = {},
+                    .location = {.poolIndex = INPUT,
+                                 .offset = 0,
+                                 .length = static_cast<uint32_t>(s)},
+                    .dimensions = {},
             };
             RequestArgument arg_empty = {
-                .hasNoValue = true,
+                    .hasNoValue = true,
             };
             inputs_info[index] = s ? arg : arg_empty;
             inputSize += s;
@@ -219,8 +202,10 @@
         for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) {
             if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
             RequestArgument arg = {
-                .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
-                .dimensions = {},
+                    .location = {.poolIndex = OUTPUT,
+                                 .offset = 0,
+                                 .length = static_cast<uint32_t>(s)},
+                    .dimensions = {},
             };
             outputs_info[index] = arg;
             outputSize += s;
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 666f9b5..bd24edc 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -20,7 +20,7 @@
 
 #include <android-base/logging.h>
 
-#include "Callbacks.h"
+#include "1.2/Callbacks.h"
 
 namespace android {
 namespace hardware {
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 80e810a..90dfe25 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -14,24 +14,23 @@
  * limitations under the License.
  */
 
-#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H
-#define VTS_HAL_NEURALNETWORKS_V1_2_H
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
 
-#include "Callbacks.h"
-
+#include <VtsHalHidlTargetTestBase.h>
+#include <VtsHalHidlTargetTestEnvBase.h>
+#include <android-base/macros.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
 #include <android/hardware/neuralnetworks/1.1/types.h>
 #include <android/hardware/neuralnetworks/1.2/IDevice.h>
 #include <android/hardware/neuralnetworks/1.2/types.h>
-
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <android-base/macros.h>
 #include <gtest/gtest.h>
+
 #include <iostream>
 #include <vector>
 
+#include "1.2/Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -50,7 +49,7 @@
     NeuralnetworksHidlEnvironment();
     ~NeuralnetworksHidlEnvironment() override;
 
-   public:
+  public:
     static NeuralnetworksHidlEnvironment* getInstance();
     void registerTestServices() override;
 };
@@ -59,30 +58,30 @@
 class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase {
     DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
 
-   public:
+  public:
     NeuralnetworksHidlTest();
     ~NeuralnetworksHidlTest() override;
     void SetUp() override;
     void TearDown() override;
 
-   protected:
+  protected:
     sp<IDevice> device;
 };
 
 // Tag for the validation tests
 class ValidationTest : public NeuralnetworksHidlTest {
-   protected:
-     void validateEverything(const Model& model, const std::vector<Request>& requests);
-     void validateFailure(const Model& model, const std::vector<Request>& requests);
+  protected:
+    void validateEverything(const Model& model, const std::vector<Request>& requests);
+    void validateFailure(const Model& model, const std::vector<Request>& requests);
 
-   private:
-     void validateModel(const Model& model);
-     void validateRequests(const sp<IPreparedModel>& preparedModel,
-                           const std::vector<Request>& requests);
-     void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
-                                 const std::vector<Request>& requests);
-     void validateBurst(const sp<IPreparedModel>& preparedModel,
-                        const std::vector<Request>& requests);
+  private:
+    void validateModel(const Model& model);
+    void validateRequests(const sp<IPreparedModel>& preparedModel,
+                          const std::vector<Request>& requests);
+    void validateRequestFailure(const sp<IPreparedModel>& preparedModel,
+                                const std::vector<Request>& requests);
+    void validateBurst(const sp<IPreparedModel>& preparedModel,
+                       const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
@@ -93,7 +92,7 @@
 
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<IPreparedModel> getPreparedModel_1_2(
-    const sp<V1_2::implementation::PreparedModelCallback>& callback);
+        const sp<V1_2::implementation::PreparedModelCallback>& callback);
 
 }  // namespace functional
 }  // namespace vts
@@ -110,4 +109,4 @@
 
 }  // namespace android::hardware::neuralnetworks::V1_0
 
-#endif  // VTS_HAL_NEURALNETWORKS_V1_2_H
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
new file mode 100644
index 0000000..212a887
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/include/1.2/Callbacks.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H
+
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <hidl/Status.h>
+#include <chrono>
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace implementation {
+
+using V1_0::ErrorStatus;
+
+/**
+ * The CallbackBase class is used internally by the NeuralNetworks runtime to
+ * synchronize between different threads. An asynchronous task is launched
+ * paired with a callback object. When a client thread requires the output being
+ * generated by the asynchronous task, the client thread can wait for the result
+ * and be blocked until it has completed or a timeout condition has been
+ * reached. Any wait* may safely be called concurrently, even on the same
+ * callback object. When the asynchronous task has finished its workload, it
+ * must immediately call "notify". If the asynchronous task has failed to launch,
+ * the function that tried to launch the asynchronous task must immediately call
+ * "notify". This "notify" call awakens any client threads waiting on the
+ * callback object.
+ *
+ * The CallbackBase class implements some of the base synchronization common to
+ * both PrepareModelCallback and ExecutionCallback. For consistency, any HIDL
+ * callback class must inherit from CallbackBase as well as the HIDL callback
+ * interface it implements.
+ *
+ * This class exists to enable synchronization across HIDL. When synchronization
+ * is only required in the same process, consider using std::future, std::mutex,
+ * std::condition_variable, or std::experimental::latch instead.
+ */
+class CallbackBase {
+  public:
+    CallbackBase();
+    ~CallbackBase();
+
+    /**
+     * CallbackBase::wait blocks until notify has been called on the callback
+     * object.
+     */
+    void wait();
+
+    /**
+     * CallbackBase::wait_for blocks until notify has been called on the
+     * callback object or the time duration from the time the wait_for function
+     * was called has expired, whichever comes first.
+     *
+     * @return Status std::cv_status::no_timeout if the callback was notified
+     *                before the time duration expired, std::cv_status::timeout
+     *                otherwise.
+     */
+    template <class Rep, class Period>
+    std::cv_status wait_for(const std::chrono::duration<Rep, Period>& timeout_duration);
+
+    /**
+     * CallbackBase::on_finish binds a function to the callback object. This
+     * bound function will be executed when CallbackBase::notify is called,
+     * before any calls to wait* return. (Note that CallbackBase::wait_for can
+     * return std::cv_status::timeout before CallbackBase::notify is called for
+     * the first time, and hence before the bound function is executed.)
+     *
+     * The bound function must not synchronize with or otherwise access the
+     * callback object it is bound to, as this could cause a deadlock.
+     *
+     * CallbackBase::on_finish can be called at most once on a given callback
+     * object, and the call to CallbackBase::on_finish must finish before
+     * CallbackBase::notify is called.
+     *
+     * @param post_work Function to be invoked the first time
+     *                  CallbackBase::notify is called. Must have a target --
+     *                  i.e., must not compare equal to nullptr. post_work
+     *                  returns true if it successfully completes, false if it
+     *                  fails.
+     * @return bool True if the function was successfully bound, false if
+     *              unsuccessful.
+     *
+     * TODO: Why does the return value of the callback matter?
+     */
+    bool on_finish(std::function<bool(void)> post_work);
+
+    /**
+     * CallbackBase::bind_thread binds a thread to the event for later use by
+     * CallbackBase::join_thread.
+     *
+     * The thread must be passed using std::move.
+     *
+     * Once a thread is bound with CallbackBase::bind_thread, the client code
+     * should ensure that one of the following occurs before the event is
+     * destroyed:
+     * - CallbackBase::join_thread has been called.
+     * - CallbackBase::wait has been called.
+     * - CallbackBase::wait_for has been called and returned other than
+     *   std::cv_status::no_timeout.
+     *
+     * The bound thread shall not call any CallbackBase method with the
+     * exception of CallbackBase::notify, which it must call when the thread has
+     * finished its computation.
+     *
+     * CallbackBase::bind_thread can be called at most once on a given callback
+     * object.
+     *
+     * @param asyncThread Thread to be bound to the callback object. The thread
+     *                    object must represent a thread of execution -- i.e.,
+     *                    asyncThread.joinable() must be true.
+     * @return bool True if successful, false if thread was not properly bound.
+     */
+    bool bind_thread(std::thread&& asyncThread);
+
+    /**
+     * CallbackBase::join_thread ensures that the thread (if any) bound to this
+     * event with CallbackBase::bind_thread has fully finished and cleaned its
+     * resources. It is legal to call this function multiple times, concurrently
+     * or sequentially.
+     */
+    void join_thread();
+
+  protected:
+    /**
+     * CallbackBase::notify enables all prior and future wait* calls on the
+     * callback object to proceed. The call to CallbackBase::notify happens
+     * before any wait* calls on this callback object return (except in the case
+     * of wait_for timing out). The asynchronous call the callback object is
+     * paired with must ensure that any update to state that should be visible
+     * to the caller of wait* happens before the call to CallbackBase::notify.
+     *
+     * CallbackBase::notify must be called exactly once on a given callback
+     * object.
+     */
+    void notify();
+
+  private:
+    // Same as CallbackBase::join_thread but assumes we already hold a lock on
+    // mMutex.
+    void join_thread_locked();
+
+    bool mNotified;
+    std::mutex mMutex;
+    std::condition_variable mCondition;
+    std::function<bool(void)> mPostWork;
+    std::thread mThread;
+};
+
+/**
+ * The PreparedModelCallback class is used to receive the error status of
+ * preparing a model as well as the prepared model from a task executing
+ * asynchronously with respect to the runtime. If a calling thread calls wait*
+ * or get* on a PreparedModelCallback object and the corresponding asynchronous
+ * task has not finished preparing the model, the calling thread will block
+ * until the asynchronous task has either called notify or notify_1_2. For more
+ * information on the synchronization behavior, refer to the CallbackBase class.
+ *
+ * This class inherits the basic blocking and signaling calls from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
+ * IPreparedModelCallback. This callback object is passed as an argument to
+ * IDevice::prepareModel.
+ */
+class PreparedModelCallback : public CallbackBase, public IPreparedModelCallback {
+  public:
+    PreparedModelCallback();
+    ~PreparedModelCallback() override;
+
+    /**
+     * IPreparedModelCallback::notify and IPreparedModelCallback::notify_1_2
+     * mark the callback object with the return status of the asynchronous
+     * model preparation along with the prepared model, and call
+     * CallbackBase::notify, enabling all prior and future wait* calls on the
+     * PreparedModelCallback object to proceed. For more information on the
+     * synchronization behavior, refer to the CallbackBase class.
+     *
+     * Either IPreparedModelCallback::notify or IPreparedModelCallback::notify_1_2
+     * must be called exactly once on a given PreparedModelCallback object.
+     *
+     * @param status Error status returned from asynchronously preparing the
+     *               model; will be:
+     *               - NONE if the asynchronous preparation was successful
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if there is an unspecified error
+     *               - INVALID_ARGUMENT if the input model is invalid
+     * @param preparedModel Returned model that has been prepared for execution,
+     *                      nullptr if the model was unable to be prepared.
+     */
+    Return<void> notify(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_2(ErrorStatus status,
+                            const sp<V1_2::IPreparedModel>& preparedModel) override;
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by IDevice::prepareModel. If IDevice::prepareModel has not finished
+     * asynchronously preparing the model, this call will block until the
+     * asynchronous task notifies the object.
+     *
+     * @return status Error status returned from asynchronously preparing the
+     *                model; will be:
+     *                - NONE if the asynchronous preparation was successful
+     *                - DEVICE_UNAVAILABLE if driver is offline or busy
+     *                - GENERAL_FAILURE if there is an unspecified error
+     *                - INVALID_ARGUMENT if the input model is invalid
+     */
+    ErrorStatus getStatus();
+
+    /**
+     * Retrieves the model that has been prepared for execution from the
+     * asynchronous task launched by IDevice::prepareModel. If
+     * IDevice::prepareModel has not finished asynchronously preparing the
+     * model, this call will block until the asynchronous task notifies the
+     * object.
+     *
+     * @return preparedModel Returned model that has been prepared for
+     *                       execution, nullptr if the model was unable to be
+     *                       prepared.
+     */
+    sp<V1_0::IPreparedModel> getPreparedModel();
+
+  private:
+    ErrorStatus mErrorStatus;
+    sp<V1_0::IPreparedModel> mPreparedModel;
+};
+
+/**
+ * The ExecutionCallback class is used to receive the error status of the
+ * execution from a task executing asynchronously with respect to the runtime.
+ * If a calling thread calls wait* or get* on a PreparedModelCallback object and
+ * the corresponding asynchronous task has not finished the execution, the
+ * calling thread will block until the asynchronous task has either called notify
+ * or notify_1_2. For more information on the synchronization behavior, refer to
+ * the CallbackBase class.
+ *
+ * This class inherits the basic blocking and signaling calls from
+ * CallbackBase, and implements the HIDL notify and notify_1_2 calls from
+ * IExecutionCallback. This callback object is passed as an argument to
+ * IPreparedModel::execute.
+ */
+class ExecutionCallback : public CallbackBase, public IExecutionCallback {
+  public:
+    ExecutionCallback();
+    ~ExecutionCallback() override;
+
+    /**
+     * IExecutionCallback::notify and IExecutionCallback::notify_1_2 mark the
+     * callback object with the return status of the asynchronous execution that
+     * held this callback and enable all prior and future wait* calls on the
+     * ExecutionCallback object to proceed. For more information on the
+     * synchronization behavior, refer to the CallbackBase class.
+     *
+     * Either IExecutionCallback::notify or IExecutionCallback::notify_1_2 must
+     * be called exactly once on a given ExecutionCallback object.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *               (if the launch fails) or from the asynchronous task itself
+     *               (if the launch succeeds). Must be:
+     *               - NONE if the asynchronous execution was successful
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if there is an unspecified error
+     *               - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is
+     *                 not large enough to store the resultant values
+     *               - INVALID_ARGUMENT if the input request is invalid
+     */
+    Return<void> notify(ErrorStatus status) override;
+
+    /**
+     * Similar to IExecutionCallback::notify, but for V1_2::IPreparedModel to
+     * also notify output shapes along with error status.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *               (if the launch fails) or from the asynchronous task itself
+     *               (if the launch succeeds). Must be:
+     *               - NONE if the asynchronous execution was successful
+     *               - DEVICE_UNAVAILABLE if driver is offline or busy
+     *               - GENERAL_FAILURE if the asynchronous task resulted in an
+     *                 unspecified error
+     *               - OUTPUT_INSUFFICIENT_SIZE if at least one output
+     *                 operand buffer is not large enough to store the
+     *                 corresponding output
+     *               - INVALID_ARGUMENT if one of the input arguments to
+     *                 prepareModel is invalid
+     * @param outputShapes A list of shape information of model output operands.
+     *                     The index into "outputShapes" corresponds to the index
+     *                     of the output operand in the Request outputs vector.
+     *                     outputShapes must be empty unless the status is either
+     *                     NONE or OUTPUT_INSUFFICIENT_SIZE.
+     * @return Timing Duration of execution. Unless MeasureTiming::YES was passed when
+     *                launching the execution and status is NONE, all times must
+     *                be reported as UINT64_MAX. A driver may choose to report
+     *                any time as UINT64_MAX, indicating that particular measurement is
+     *                not available.
+     */
+    Return<void> notify_1_2(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+                            const Timing& timing) override;
+
+    // An overload of the latest notify interface to hide the version from ExecutionBuilder.
+    Return<void> notify(ErrorStatus status, const hidl_vec<OutputShape>& outputShapes,
+                        const Timing& timing) {
+        return notify_1_2(status, outputShapes, timing);
+    }
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by either IPreparedModel::execute or IPreparedModel::execute_1_2. If
+     * IPreparedModel::execute or IPreparedModel::execute_1_2 has not finished
+     * asynchronously executing, this call will block until the asynchronous task
+     * notifies the object.
+     *
+     * @return status Error status returned from launching the asynchronous task
+     *                (if the launch fails) or from the asynchronous task itself
+     *                (if the launch succeeds). Must be:
+     *                - NONE if the asynchronous execution was successful
+     *                - DEVICE_UNAVAILABLE if driver is offline or busy
+     *                - GENERAL_FAILURE if the asynchronous task resulted in an
+     *                  unspecified error
+     *                - OUTPUT_INSUFFICIENT_SIZE if at least one output
+     *                  operand buffer is not large enough to store the
+     *                  corresponding output
+     *                - INVALID_ARGUMENT if one of the input arguments to
+     *                  prepareModel is invalid
+     */
+    ErrorStatus getStatus();
+
+    /**
+     * Retrieves the output shapes returned from the asynchronous task launched
+     * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
+     * asynchronously executing, this call will block until the asynchronous task
+     * notifies the object.
+     *
+     * If the asynchronous task was launched by IPreparedModel::execute, an empty vector
+     * will be returned.
+     *
+     * @return outputShapes A list of shape information of model output operands.
+     *                      The index into "outputShapes" corresponds to the index
+     *                      of the output operand in the Request outputs vector.
+     *                      outputShapes must be empty unless the status is either
+     *                      NONE or OUTPUT_INSUFFICIENT_SIZE.
+     */
+    const std::vector<OutputShape>& getOutputShapes();
+
+    /**
+     * Retrieves the duration of execution ofthe asynchronous task launched
+     * by IPreparedModel::execute_1_2. If IPreparedModel::execute_1_2 has not finished
+     * asynchronously executing, this call will block until the asynchronous task
+     * notifies the object.
+     *
+     * If the asynchronous task was launched by IPreparedModel::execute, every time
+     * must be UINT64_MAX.
+     *
+     * @return timing Duration of the execution. Every time must be UINT64_MAX unless
+     *                the status is NONE.
+     */
+    Timing getTiming();
+
+  private:
+    ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
+    std::vector<OutputShape> mOutputShapes = {};
+    Timing mTiming = {};
+};
+
+// template function implementation(s) below this point
+
+template <class Rep, class Period>
+std::cv_status CallbackBase::wait_for(const std::chrono::duration<Rep, Period>& timeout_duration) {
+    std::unique_lock<std::mutex> lock(mMutex);
+    std::cv_status status =
+            mCondition.wait_for(lock, timeout_duration, [this] { return mNotified; });
+    if (status != std::cv_status::timeout) {
+        join_thread_locked();
+    }
+    return status;
+}
+
+}  // namespace implementation
+}  // namespace V1_2
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
+
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_CALLBACKS_H