NNAPI Burst -- HAL VTS tests
am: 29471a8935

Change-Id: I3de6e9959b4037c1cdaf065b61fb429e1aafe5b9
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 2920cec..52d6328 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -23,6 +23,7 @@
     defaults: ["VtsHalTargetTestDefaults"],
     export_include_dirs: ["."],
     shared_libs: [
+        "libfmq",
         "libnativewindow",
     ],
     static_libs: [
@@ -51,6 +52,7 @@
         "VtsHalNeuralnetworks.cpp",
     ],
     shared_libs: [
+        "libfmq",
         "libnativewindow",
     ],
     static_libs: [
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 65c425e..1c5a6e8 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -15,6 +15,7 @@
  */
 
 #include "Callbacks.h"
+#include "ExecutionBurstController.h"
 #include "TestHarness.h"
 #include "Utils.h"
 
@@ -109,14 +110,22 @@
     }
     return result;
 }
-enum class Synchronously { NO, YES };
+static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+        const sp<V1_0::IPreparedModel>&) {
+    ADD_FAILURE() << "asking for burst execution at V1_0";
+    return nullptr;
+}
+static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+        const sp<V1_2::IPreparedModel>& preparedModel) {
+    return ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
+}
+enum class Executor { ASYNC, SYNC, BURST };
 const float kDefaultAtol = 1e-5f;
 const float kDefaultRtol = 1e-5f;
 template <typename T_IPreparedModel>
 void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
-                           const std::vector<MixedTypedExample>& examples,
-                           bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
-                           Synchronously sync, MeasureTiming measure, bool testDynamicOutputShape) {
+        const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model, float fpAtol,
+        float fpRtol, Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
     const uint32_t INPUT = 0;
     const uint32_t OUTPUT = 1;
 
@@ -209,35 +218,62 @@
         inputMemory->commit();
         outputMemory->commit();
 
+        const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
+
         ErrorStatus executionStatus;
         hidl_vec<OutputShape> outputShapes;
         Timing timing;
-        if (sync == Synchronously::NO) {
-            SCOPED_TRACE("asynchronous");
+        switch (executor) {
+            case Executor::ASYNC: {
+                SCOPED_TRACE("asynchronous");
 
-            // launch execution
-            sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-            ASSERT_NE(nullptr, executionCallback.get());
-            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
-                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
-                    measure, executionCallback);
-            ASSERT_TRUE(executionLaunchStatus.isOk());
-            EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+                // launch execution
+                sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+                ASSERT_NE(nullptr, executionCallback.get());
+                Return<ErrorStatus> executionLaunchStatus =
+                        ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+                ASSERT_TRUE(executionLaunchStatus.isOk());
+                EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
 
-            // retrieve execution status
-            executionCallback->wait();
-            executionStatus = executionCallback->getStatus();
-            outputShapes = executionCallback->getOutputShapes();
-            timing = executionCallback->getTiming();
-        } else {
-            SCOPED_TRACE("synchronous");
+                // retrieve execution status
+                executionCallback->wait();
+                executionStatus = executionCallback->getStatus();
+                outputShapes = executionCallback->getOutputShapes();
+                timing = executionCallback->getTiming();
 
-            // execute
-            Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
-                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
-                    measure, &outputShapes, &timing);
-            ASSERT_TRUE(executionReturnStatus.isOk());
-            executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+                break;
+            }
+            case Executor::SYNC: {
+                SCOPED_TRACE("synchronous");
+
+                // execute
+                Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+                        preparedModel, request, measure, &outputShapes, &timing);
+                ASSERT_TRUE(executionReturnStatus.isOk());
+                executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+                break;
+            }
+            case Executor::BURST: {
+                SCOPED_TRACE("burst");
+
+                // create burst
+                const std::unique_ptr<::android::nn::ExecutionBurstController> controller =
+                        CreateBurst(preparedModel);
+                ASSERT_NE(nullptr, controller.get());
+
+                // create memory keys
+                std::vector<intptr_t> keys(request.pools.size());
+                for (size_t i = 0; i < keys.size(); ++i) {
+                    keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+                }
+
+                // execute burst
+                std::tie(executionStatus, outputShapes, timing) =
+                        controller->compute(request, measure, keys);
+
+                break;
+            }
         }
 
         if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) {
@@ -285,11 +321,10 @@
 }
 template <typename T_IPreparedModel>
 void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
-                           const std::vector<MixedTypedExample>& examples,
-                           bool hasRelaxedFloat32Model, Synchronously sync, MeasureTiming measure,
-                           bool testDynamicOutputShape) {
+        const std::vector<MixedTypedExample>& examples, bool hasRelaxedFloat32Model,
+        Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
     EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
-                          kDefaultRtol, sync, measure, testDynamicOutputShape);
+            kDefaultRtol, executor, measure, testDynamicOutputShape);
 }
 
 static void getPreparedModel(sp<PreparedModelCallback> callback,
@@ -345,8 +380,8 @@
 
     float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO,
-                          MeasureTiming::NO, /*testDynamicOutputShape=*/false);
+            /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC, MeasureTiming::NO,
+            /*testDynamicOutputShape=*/false);
 }
 
 void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
@@ -392,8 +427,8 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO,
-                          MeasureTiming::NO, /*testDynamicOutputShape=*/false);
+            model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
+            MeasureTiming::NO, /*testDynamicOutputShape=*/false);
 }
 
 // TODO: Reduce code duplication.
@@ -441,17 +476,23 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
-                          MeasureTiming::NO, testDynamicOutputShape);
+            model.relaxComputationFloat32toFloat16, Executor::ASYNC, MeasureTiming::NO,
+            testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
-                          MeasureTiming::NO, testDynamicOutputShape);
+            model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::NO,
+            testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
-                          MeasureTiming::YES, testDynamicOutputShape);
+            model.relaxComputationFloat32toFloat16, Executor::BURST, MeasureTiming::NO,
+            testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
-                          MeasureTiming::YES, testDynamicOutputShape);
+            model.relaxComputationFloat32toFloat16, Executor::ASYNC, MeasureTiming::YES,
+            testDynamicOutputShape);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+            model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::YES,
+            testDynamicOutputShape);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+            model.relaxComputationFloat32toFloat16, Executor::BURST, MeasureTiming::YES,
+            testDynamicOutputShape);
 }
 
 }  // namespace generated_tests
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 00a7c3e..d411da4 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -19,6 +19,7 @@
 #include "VtsHalNeuralnetworks.h"
 
 #include "Callbacks.h"
+#include "ExecutionBurstController.h"
 #include "TestHarness.h"
 #include "Utils.h"
 
@@ -112,6 +113,7 @@
     };
     MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
 
+    // asynchronous
     {
         SCOPED_TRACE(message + " [execute_1_2]");
 
@@ -131,6 +133,7 @@
         ASSERT_TRUE(badTiming(timing));
     }
 
+    // synchronous
     {
         SCOPED_TRACE(message + " [executeSynchronously]");
 
@@ -144,6 +147,43 @@
                 });
         ASSERT_TRUE(executeStatus.isOk());
     }
+
+    // burst
+    {
+        SCOPED_TRACE(message + " [burst]");
+
+        // create burst
+        std::unique_ptr<::android::nn::ExecutionBurstController> burst =
+                ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
+        ASSERT_NE(nullptr, burst.get());
+
+        // create memory keys
+        std::vector<intptr_t> keys(request.pools.size());
+        for (size_t i = 0; i < keys.size(); ++i) {
+            keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+        }
+
+        // execute and verify
+        ErrorStatus error;
+        std::vector<OutputShape> outputShapes;
+        Timing timing;
+        std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
+        EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
+        EXPECT_EQ(outputShapes.size(), 0);
+        EXPECT_TRUE(badTiming(timing));
+
+        // additional burst testing
+        if (request.pools.size() > 0) {
+            // valid free
+            burst->freeMemory(keys.front());
+
+            // negative test: invalid free of unknown (blank) memory
+            burst->freeMemory(intptr_t{});
+
+            // negative test: double free of memory
+            burst->freeMemory(keys.front());
+        }
+    }
 }
 
 // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,