Update NNAPI 1.3 VTS tests with new types

Bug: 136739795
Bug: 142902514
Bug: 145300530
Test: mma
Test: atest VtsHalNeuralnetworksV1_3TargetTest
Change-Id: Ie76da9dc9d6993a56bf644cfe20c5f5b421672c9
Merged-In: Ie76da9dc9d6993a56bf644cfe20c5f5b421672c9
(cherry picked from commit 9449a28b2f905279550dd3fbe3602cb5207f3313)
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 4909214..599fd1d 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -272,7 +272,7 @@
             int n;
             std::tie(n, outputShapes, timing, std::ignore) =
                     controller->compute(request, testConfig.measureTiming, keys);
-            executionStatus = nn::convertResultCodeToErrorStatus(n);
+            executionStatus = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
 
             break;
         }
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 416744f..ec9629b 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -296,7 +296,8 @@
     // collect serialized result by running regular burst
     const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
             controllerRegular->compute(request, MeasureTiming::NO, keys);
-    const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
+    const ErrorStatus statusRegular =
+            nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
     EXPECT_FALSE(fallbackRegular);
 
     // skip test if regular burst output isn't useful for testing a failure
@@ -312,7 +313,7 @@
     // large enough to return the serialized result
     const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
             controllerSmall->compute(request, MeasureTiming::NO, keys);
-    const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
+    const ErrorStatus statusSmall = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
     EXPECT_NE(ErrorStatus::NONE, statusSmall);
     EXPECT_EQ(0u, outputShapesSmall.size());
     EXPECT_TRUE(badTiming(timingSmall));
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 2d83b81..7b5ff9b 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -107,7 +107,7 @@
 
         // execute and verify
         const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
-        const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
+        const ErrorStatus status = nn::convertToV1_0(nn::convertResultCodeToErrorStatus(n));
         EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
         EXPECT_EQ(outputShapes.size(), 0);
         EXPECT_TRUE(badTiming(timing));
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index e2795de..e7a9fd3 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -15,11 +15,12 @@
 //
 
 cc_library_static {
-    name: "VtsHalNeuralNetworksV1_3Callbacks",
+    name: "VtsHalNeuralNetworksV1_3_utils",
     defaults: ["VtsHalTargetTestDefaults"],
     export_include_dirs: ["include"],
     srcs: [
         "Callbacks.cpp",
+        "Utils.cpp",
     ],
     static_libs: [
         "android.hardware.neuralnetworks@1.0",
@@ -29,7 +30,7 @@
     ],
     header_libs: [
         "libbase_headers",
-    ]
+    ],
 }
 
 cc_test {
@@ -50,6 +51,9 @@
         "libnativewindow",
     ],
     static_libs: [
+        "VtsHalNeuralNetworksV1_0_utils",
+        "VtsHalNeuralNetworksV1_2Callbacks",
+        "VtsHalNeuralNetworksV1_3_utils",
         "android.hardware.neuralnetworks@1.0",
         "android.hardware.neuralnetworks@1.1",
         "android.hardware.neuralnetworks@1.2",
@@ -60,9 +64,6 @@
         "libhidlmemory",
         "libneuralnetworks_generated_test_harness",
         "libneuralnetworks_utils",
-        "VtsHalNeuralNetworksV1_0_utils",
-        "VtsHalNeuralNetworksV1_2Callbacks",
-        "VtsHalNeuralNetworksV1_3Callbacks",
     ],
     whole_static_libs: [
         "neuralnetworks_generated_V1_0_example",
diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
index b64dc2f..891850c 100644
--- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
@@ -21,7 +21,6 @@
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
 using V1_0::DeviceStatus;
-using V1_0::ErrorStatus;
 using V1_0::PerformanceInfo;
 using V1_2::Constant;
 using V1_2::DeviceType;
diff --git a/neuralnetworks/1.3/vts/functional/Callbacks.cpp b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
index 4f08e72..5768e37 100644
--- a/neuralnetworks/1.3/vts/functional/Callbacks.cpp
+++ b/neuralnetworks/1.3/vts/functional/Callbacks.cpp
@@ -24,12 +24,16 @@
 
 namespace android::hardware::neuralnetworks::V1_3::implementation {
 
-using V1_0::ErrorStatus;
+using V1_2::OutputShape;
+using V1_2::Timing;
+
+constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(),
+                              .timeInDriver = std::numeric_limits<uint64_t>::max()};
 
 // PreparedModelCallback methods begin here
 
-Return<void> PreparedModelCallback::notify(ErrorStatus errorStatus,
-                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+Return<void> PreparedModelCallback::notifyInternal(ErrorStatus errorStatus,
+                                                   const sp<V1_0::IPreparedModel>& preparedModel) {
     {
         std::lock_guard<std::mutex> hold(mMutex);
 
@@ -48,14 +52,19 @@
     return Void();
 }
 
-Return<void> PreparedModelCallback::notify_1_2(ErrorStatus errorStatus,
-                                               const sp<V1_2::IPreparedModel>& preparedModel) {
-    return notify(errorStatus, preparedModel);
+Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus,
+                                           const sp<V1_0::IPreparedModel>& preparedModel) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
 }
 
-Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus,
+Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
+                                               const sp<V1_2::IPreparedModel>& preparedModel) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), preparedModel);
+}
+
+Return<void> PreparedModelCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
                                                const sp<V1_3::IPreparedModel>& preparedModel) {
-    return notify(errorStatus, preparedModel);
+    return notifyInternal(errorStatus, preparedModel);
 }
 
 void PreparedModelCallback::wait() const {
@@ -73,4 +82,82 @@
     return mPreparedModel;
 }
 
+// ExecutionCallback methods begin here
+
+Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), {}, kNoTiming);
+}
+
+Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus,
+                                           const hidl_vec<OutputShape>& outputShapes,
+                                           const Timing& timing) {
+    return notifyInternal(static_cast<ErrorStatus>(errorStatus), outputShapes, timing);
+}
+
+Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus,
+                                           const hidl_vec<OutputShape>& outputShapes,
+                                           const Timing& timing) {
+    return notifyInternal(errorStatus, outputShapes, timing);
+}
+
+void ExecutionCallback::wait() const {
+    std::unique_lock<std::mutex> lock(mMutex);
+    mCondition.wait(lock, [this] { return mNotified; });
+}
+
+ErrorStatus ExecutionCallback::getStatus() const {
+    wait();
+    return mErrorStatus;
+}
+
+const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const {
+    wait();
+    return mOutputShapes;
+}
+
+Timing ExecutionCallback::getTiming() const {
+    wait();
+    return mTiming;
+}
+
+Return<void> ExecutionCallback::notifyInternal(ErrorStatus errorStatus,
+                                               hidl_vec<OutputShape> outputShapes, Timing timing) {
+    // check results
+    if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
+        // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE.
+        if (outputShapes.size() == 0) {
+            LOG(ERROR) << "Notifid with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE";
+            errorStatus = ErrorStatus::GENERAL_FAILURE;
+            outputShapes = {};
+            timing = kNoTiming;
+        }
+    } else if (errorStatus != ErrorStatus::NONE) {
+        // outputShapes must be empty if errorStatus is neither NONE nor OUTPUT_INSUFFICIENT_SIZE.
+        if (outputShapes.size() != 0) {
+            LOG(ERROR) << "Notified with non-empty output shape vector when error status is "
+                          "neither NONE nor OUTPUT_INSUFFICIENT_SIZE";
+            errorStatus = ErrorStatus::GENERAL_FAILURE;
+            outputShapes = {};
+            timing = kNoTiming;
+        }
+    }
+
+    // store results
+    {
+        std::lock_guard<std::mutex> hold(mMutex);
+
+        // quick-return if object has already been notified
+        if (mNotified) {
+            return Void();
+        }
+
+        mErrorStatus = errorStatus;
+        mOutputShapes = std::move(outputShapes);
+        mTiming = timing;
+        mNotified = true;
+    }
+    mCondition.notify_all();
+    return Void();
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_3::implementation
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index 5cb466f..576e524 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -29,6 +29,7 @@
 #include <thread>
 
 #include "1.3/Callbacks.h"
+#include "1.3/Utils.h"
 #include "GeneratedTestHarness.h"
 #include "MemoryUtils.h"
 #include "TestHarness.h"
@@ -49,7 +50,6 @@
 
 using namespace test_helper;
 using implementation::PreparedModelCallback;
-using V1_0::ErrorStatus;
 using V1_1::ExecutionPreference;
 using V1_2::Constant;
 using V1_2::OperationType;
@@ -238,8 +238,8 @@
         mCacheDir.push_back('/');
 
         Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
-                [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
-                    EXPECT_EQ(ErrorStatus::NONE, status);
+                [this](V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+                    EXPECT_EQ(V1_0::ErrorStatus::NONE, status);
                     mNumModelCache = numModelCache;
                     mNumDataCache = numDataCache;
                 });
@@ -324,9 +324,9 @@
         // Launch prepare model.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
-        Return<ErrorStatus> prepareLaunchStatus =
-                kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER,
-                                          modelCache, dataCache, cacheToken, preparedModelCallback);
+        Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModel_1_3(
+                model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, modelCache,
+                dataCache, cacheToken, preparedModelCallback);
         ASSERT_TRUE(prepareLaunchStatus.isOk());
         ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
 
@@ -370,7 +370,7 @@
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
         hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
         Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache_1_3(
-                modelCache, dataCache, cacheToken, preparedModelCallback);
+                kDefaultPriority, {}, modelCache, dataCache, cacheToken, preparedModelCallback);
         ASSERT_TRUE(prepareLaunchStatus.isOk());
         if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
             *preparedModel = nullptr;
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 805d5b5..82e63ac 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -44,7 +44,6 @@
 #include <vector>
 
 #include "1.0/Utils.h"
-#include "1.2/Callbacks.h"
 #include "1.3/Callbacks.h"
 #include "ExecutionBurstController.h"
 #include "MemoryUtils.h"
@@ -56,9 +55,9 @@
 
 using namespace test_helper;
 using hidl::memory::V1_0::IMemory;
+using implementation::ExecutionCallback;
 using implementation::PreparedModelCallback;
 using V1_0::DataLocation;
-using V1_0::ErrorStatus;
 using V1_0::RequestArgument;
 using V1_1::ExecutionPreference;
 using V1_2::Constant;
@@ -66,7 +65,6 @@
 using V1_2::OutputShape;
 using V1_2::SymmPerChannelQuantParams;
 using V1_2::Timing;
-using V1_2::implementation::ExecutionCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 
 namespace {
@@ -453,7 +451,7 @@
 static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
                                                 const Request& request, MeasureTiming measure,
                                                 sp<ExecutionCallback>& callback) {
-    return preparedModel->execute_1_3(request, measure, callback);
+    return preparedModel->execute_1_3(request, measure, {}, callback);
 }
 static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
                                                 const Request& request, MeasureTiming measure,
@@ -461,7 +459,7 @@
                                                 Timing* timing) {
     ErrorStatus result;
     Return<void> ret = preparedModel->executeSynchronously_1_3(
-            request, measure,
+            request, measure, {},
             [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
                                             const Timing& time) {
                 result = error;
diff --git a/neuralnetworks/1.3/vts/functional/Utils.cpp b/neuralnetworks/1.3/vts/functional/Utils.cpp
new file mode 100644
index 0000000..23e2af8
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/Utils.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "1.3/Utils.h"
+
+#include <iostream>
+
+namespace android::hardware::neuralnetworks::V1_3 {
+
+::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) {
+    return os << toString(errorStatus);
+}
+
+}  // namespace android::hardware::neuralnetworks::V1_3
diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
index 7df8046..6ff9dfd 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
@@ -34,7 +34,6 @@
 using nn::ExecutionBurstController;
 using nn::RequestChannelSender;
 using nn::ResultChannelReceiver;
-using V1_0::ErrorStatus;
 using V1_0::Request;
 using V1_2::FmqRequestDatum;
 using V1_2::FmqResultDatum;
@@ -80,16 +79,17 @@
     ASSERT_NE(nullptr, fmqResultDescriptor);
 
     // configure burst
-    ErrorStatus errorStatus;
+    V1_0::ErrorStatus errorStatus;
     sp<IBurstContext> burstContext;
     const Return<void> ret = preparedModel->configureExecutionBurst(
             callback, *fmqRequestDescriptor, *fmqResultDescriptor,
-            [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
+            [&errorStatus, &burstContext](V1_0::ErrorStatus status,
+                                          const sp<IBurstContext>& context) {
                 errorStatus = status;
                 burstContext = context;
             });
     ASSERT_TRUE(ret.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, errorStatus);
+    ASSERT_EQ(V1_0::ErrorStatus::NONE, errorStatus);
     ASSERT_NE(nullptr, burstContext.get());
 
     // return values
@@ -144,7 +144,7 @@
     auto results = receiver->getBlocking();
     ASSERT_TRUE(results.has_value());
     const auto [status, outputShapes, timing] = std::move(*results);
-    EXPECT_NE(ErrorStatus::NONE, status);
+    EXPECT_NE(V1_0::ErrorStatus::NONE, status);
     EXPECT_EQ(0u, outputShapes.size());
     EXPECT_TRUE(badTiming(timing));
 }
@@ -302,14 +302,15 @@
     // collect serialized result by running regular burst
     const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
             controllerRegular->compute(request, MeasureTiming::NO, keys);
-    const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
+    const V1_0::ErrorStatus statusRegular =
+            nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nRegular));
     EXPECT_FALSE(fallbackRegular);
 
     // skip test if regular burst output isn't useful for testing a failure
     // caused by having too small of a length for the result FMQ
     const std::vector<FmqResultDatum> serialized =
             android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
-    if (statusRegular != ErrorStatus::NONE ||
+    if (statusRegular != V1_0::ErrorStatus::NONE ||
         serialized.size() <= kExecutionBurstChannelSmallLength) {
         return;
     }
@@ -318,8 +319,9 @@
     // large enough to return the serialized result
     const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
             controllerSmall->compute(request, MeasureTiming::NO, keys);
-    const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
-    EXPECT_NE(ErrorStatus::NONE, statusSmall);
+    const V1_0::ErrorStatus statusSmall =
+            nn::convertToV1_0(nn::convertResultCodeToErrorStatus(nSmall));
+    EXPECT_NE(V1_0::ErrorStatus::NONE, statusSmall);
     EXPECT_EQ(0u, outputShapesSmall.size());
     EXPECT_TRUE(badTiming(timingSmall));
     EXPECT_FALSE(fallbackSmall);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index cc86264..43e53ef 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -18,13 +18,13 @@
 
 #include "1.0/Utils.h"
 #include "1.3/Callbacks.h"
+#include "1.3/Utils.h"
 #include "GeneratedTestHarness.h"
 #include "VtsHalNeuralnetworks.h"
 
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
 using implementation::PreparedModelCallback;
-using V1_0::ErrorStatus;
 using V1_1::ExecutionPreference;
 using V1_2::SymmPerChannelQuantParams;
 using HidlToken =
@@ -48,9 +48,9 @@
     SCOPED_TRACE(message + " [prepareModel_1_3]");
 
     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    Return<ErrorStatus> prepareLaunchStatus =
-            device->prepareModel_1_3(model, preference, hidl_vec<hidl_handle>(),
-                                     hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
+            model, preference, kDefaultPriority, {}, hidl_vec<hidl_handle>(),
+            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
 
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
index 96dc589..9fb4c6e 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -18,7 +18,7 @@
 
 #include <chrono>
 #include "1.0/Utils.h"
-#include "1.2/Callbacks.h"
+#include "1.3/Callbacks.h"
 #include "ExecutionBurstController.h"
 #include "GeneratedTestHarness.h"
 #include "TestHarness.h"
@@ -27,11 +27,10 @@
 
 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
 
-using V1_0::ErrorStatus;
+using implementation::ExecutionCallback;
 using V1_2::MeasureTiming;
 using V1_2::OutputShape;
 using V1_2::Timing;
-using V1_2::implementation::ExecutionCallback;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
@@ -63,7 +62,7 @@
 
         sp<ExecutionCallback> executionCallback = new ExecutionCallback();
         Return<ErrorStatus> executeLaunchStatus =
-                preparedModel->execute_1_3(request, measure, executionCallback);
+                preparedModel->execute_1_3(request, measure, {}, executionCallback);
         ASSERT_TRUE(executeLaunchStatus.isOk());
         ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
 
@@ -81,7 +80,7 @@
         SCOPED_TRACE(message + " [executeSynchronously_1_3]");
 
         Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
-                request, measure,
+                request, measure, {},
                 [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
                    const Timing& timing) {
                     ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
@@ -163,7 +162,7 @@
 void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
     SCOPED_TRACE("Expecting request to fail [executeSynchronously_1_3]");
     Return<void> executeStatus = preparedModel->executeSynchronously_1_3(
-            request, MeasureTiming::NO,
+            request, MeasureTiming::NO, {},
             [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
                 ASSERT_NE(ErrorStatus::NONE, error);
                 EXPECT_EQ(outputShapes.size(), 0);
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 1140b68..7a32b04 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -23,6 +23,7 @@
 #include <utility>
 #include "1.0/Utils.h"
 #include "1.3/Callbacks.h"
+#include "1.3/Utils.h"
 #include "GeneratedTestHarness.h"
 #include "TestHarness.h"
 #include "Utils.h"
@@ -32,7 +33,6 @@
 using HidlToken =
         hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 using implementation::PreparedModelCallback;
-using V1_0::ErrorStatus;
 using V1_1::ExecutionPreference;
 
 // internal helper function
@@ -55,8 +55,8 @@
     // launch prepare model
     const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
     const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
-            model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
-            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+            model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {},
+            hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
     ASSERT_TRUE(prepareLaunchStatus.isOk());
     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
 
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
index fb19a84..e9dec2d 100644
--- a/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Callbacks.h
@@ -18,8 +18,11 @@
 #define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H
 
 #include <android-base/thread_annotations.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
 #include <hidl/Status.h>
 #include <condition_variable>
@@ -136,7 +139,7 @@
      * @param preparedModel Returned model that has been prepared for execution,
      *     nullptr if the model was unable to be prepared.
      */
-    Return<void> notify_1_3(V1_0::ErrorStatus status,
+    Return<void> notify_1_3(V1_3::ErrorStatus status,
                             const sp<V1_3::IPreparedModel>& preparedModel) override;
 
     /**
@@ -158,7 +161,7 @@
      *     - GENERAL_FAILURE if there is an unspecified error
      *     - INVALID_ARGUMENT if the input model is invalid
      */
-    V1_0::ErrorStatus getStatus() const;
+    ErrorStatus getStatus() const;
 
     /**
      * Retrieves the model that has been prepared for execution from the
@@ -173,13 +176,216 @@
     sp<V1_0::IPreparedModel> getPreparedModel() const;
 
   private:
+    Return<void> notifyInternal(ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel);
+
     mutable std::mutex mMutex;
     mutable std::condition_variable mCondition;
     bool mNotified GUARDED_BY(mMutex) = false;
-    V1_0::ErrorStatus mErrorStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
+    ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
     sp<V1_0::IPreparedModel> mPreparedModel;
 };
 
+/**
+ * The ExecutionCallback class is used to receive the results of the execution
+ * from a task executing asynchronously with respect to the runtime. If a
+ * calling thread calls wait or get* on a ExecutionCallback object and the
+ * corresponding asynchronous task has not finished the execution, the calling
+ * thread will block until the asynchronous task has either called one of the
+ * notify* methods.
+ *
+ * If the callback object is notified more than once, only the results of the
+ * first call to notify* are used, and the results from subsequent calls are
+ * discarded.
+ *
+ * This callback object is passed as an argument to IPreparedModel::execute*.
+ */
+class ExecutionCallback : public IExecutionCallback {
+  public:
+    /**
+     * IExecutionCallback::notify marks the callback object with the return
+     * status of the asynchronous execution that held this callback and enables
+     * all prior and future wait calls on the ExecutionCallback object to
+     * proceed.
+     *
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *     (if the launch fails) or from the asynchronous task itself (if the
+     *     launch succeeds). Must be:
+     *     - NONE if the asynchronous execution was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     *     - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large
+     *         enough to store the resultant values
+     *     - INVALID_ARGUMENT if the input request is invalid
+     */
+    Return<void> notify(V1_0::ErrorStatus status) override;
+
+    /**
+     * IExecutionCallback::notify_1_2 marks the callback object with the results
+     * (error status, dynamic output shapes, and timing information) of the
+     * asynchronous execution that held this callback and enables all prior and
+     * future wait calls on the ExecutionCallback object to proceed.
+     *
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *     (if the launch fails) or from the asynchronous task itself (if the
+     *     launch succeeds). Must be:
+     *     - NONE if the asynchronous execution was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
+     *         error
+     *     - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
+     *         not large enough to store the corresponding output
+     *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
+     *         invalid
+     * @param outputShapes A list of shape information of model output operands.
+     *     The index into "outputShapes" corresponds to the index of the output
+     *     operand in the Request outputs vector. outputShapes must be empty
+     *     unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
+     * @param Timing Duration of execution. Unless MeasureTiming::YES was passed
+     *     when launching the execution and status is NONE, all times must be
+     *     reported as UINT64_MAX. A driver may choose to report any time as
+     *     UINT64_MAX, indicating that particular measurement is not available.
+     */
+    Return<void> notify_1_2(V1_0::ErrorStatus status,
+                            const hidl_vec<V1_2::OutputShape>& outputShapes,
+                            const V1_2::Timing& timing) override;
+
+    /**
+     * IExecutionCallback::notify_1_3 marks the callback object with the results
+     * (error status, dynamic output shapes, and timing information) of the
+     * asynchronous execution that held this callback and enables all prior and
+     * future wait calls on the ExecutionCallback object to proceed.
+     *
+     * One of the IExecutionCallback::notify* methods must be called on a given
+     * ExecutionCallback object.
+     *
+     * If the callback object is notified more than once, only the results of
+     * the first call to notify* are used, and the results from subsequent calls
+     * are discarded.
+     *
+     * @param status Error status returned from launching the asynchronous task
+     *     (if the launch fails) or from the asynchronous task itself (if the
+     *     launch succeeds). Must be:
+     *     - NONE if the asynchronous execution was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
+     *         error
+     *     - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
+     *         not large enough to store the corresponding output
+     *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
+     *         invalid
+     *     - MISSED_DEADLINE_* if the deadline was not met
+     * @param outputShapes A list of shape information of model output operands.
+     *     The index into "outputShapes" corresponds to the index of the output
+     *     operand in the Request outputs vector. outputShapes must be empty
+     *     unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE.
+     * @param Timing Duration of execution. Unless MeasureTiming::YES was passed
+     *     when launching the execution and status is NONE, all times must be
+     *     reported as UINT64_MAX. A driver may choose to report any time as
+     *     UINT64_MAX, indicating that particular measurement is not available.
+     */
+    Return<void> notify_1_3(V1_3::ErrorStatus status,
+                            const hidl_vec<V1_2::OutputShape>& outputShapes,
+                            const V1_2::Timing& timing) override;
+
+    /**
+     * ExecutionCallback::wait blocks until notify* has been called on the
+     * callback object.
+     */
+    void wait() const;
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by one of the IPreparedModel::execute* methods. If
+     * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
+     * has not finished asynchronously executing, this call will block until the
+     * asynchronous task notifies the object.
+     *
+     * @return status Error status returned from launching the asynchronous task
+     *     (if the launch fails) or from the asynchronous task itself (if the
+     *     launch succeeds). Must be:
+     *     - NONE if the asynchronous execution was successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if the asynchronous task resulted in an unspecified
+     *         error
+     *     - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is
+     *         not large enough to store the corresponding output
+     *     - INVALID_ARGUMENT if one of the input arguments to prepareModel is
+     *         invalid
+     *     - MISSED_DEADLINE_* if the deadline could not be met
+     */
+    V1_3::ErrorStatus getStatus() const;
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by one of the IPreparedModel::execute* methods. If
+     * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
+     * has not finished asynchronously executing, this call will block until the
+     * asynchronous task notifies the object.
+     *
+     * If the asynchronous task was launched by IPreparedModel::execute, an
+     * empty vector will be returned.
+     *
+     * @return outputShapes A list of shape information of model output
+     *     operands. The index into "outputShapes" corresponds to the index of
+     *     the output operand in the Request outputs vector. outputShapes must
+     *     be empty unless the status is either NONE or
+     *     OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is
+     *     NONE and all model output operands are fully-specified at execution
+     *     time. outputShapes must have the same number of elements as the
+     *     number of model output operands if the status is
+     *     OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has
+     *     at least one output operand that is not fully-specified.
+     */
+    const std::vector<V1_2::OutputShape>& getOutputShapes() const;
+
+    /**
+     * Retrieves the error status returned from the asynchronous task launched
+     * by one of the IPreparedModel::execute* methods. If
+     * IPreparedModel::execute* (but not IPreparedModel::executeSynchronously*)
+     * has not finished asynchronously executing, this call will block until the
+     * asynchronous task notifies the object.
+     *
+     * If the asynchronous task was launched by IPreparedModel::execute, every
+     * time must be UINT64_MAX.
+     *
+     * @return timing Duration of the execution. Every time must be UINT64_MAX
+     *     unless the status is NONE.
+     */
+    V1_2::Timing getTiming() const;
+
+  private:
+    /*
+     * ExecutionCallback::notifyInternal stores the results of the execution
+     * (status, output shapes, and timing information) in the ExecutionCallback
+     * object before any call to wait or get* return. It then enables all prior
+     * and future wait calls on the ExecutionCallback object to proceed.
+     */
+    Return<void> notifyInternal(V1_3::ErrorStatus errorStatus,
+                                hidl_vec<V1_2::OutputShape> outputShapes, V1_2::Timing timing);
+
+    // members
+    mutable std::mutex mMutex;
+    mutable std::condition_variable mCondition;
+    bool mNotified GUARDED_BY(mMutex) = false;
+    V1_3::ErrorStatus mErrorStatus = V1_3::ErrorStatus::GENERAL_FAILURE;
+    std::vector<V1_2::OutputShape> mOutputShapes = {};
+    V1_2::Timing mTiming = {};
+};
+
 }  // namespace android::hardware::neuralnetworks::V1_3::implementation
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_CALLBACKS_H
diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
new file mode 100644
index 0000000..3661b66
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H
+
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <iosfwd>
+
+namespace android::hardware::neuralnetworks {
+
+inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM;
+
+}  // namespace android::hardware::neuralnetworks
+
+namespace android::hardware::neuralnetworks::V1_3 {
+
+// pretty-print values for error messages
+::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus);
+
+}  // namespace android::hardware::neuralnetworks::V1_3
+
+#endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_UTILS_H