Merge "graphics.common: add HSV format definition to 1.2"
diff --git a/biometrics/face/1.0/types.hal b/biometrics/face/1.0/types.hal
index a488d67..2bcd3d5 100644
--- a/biometrics/face/1.0/types.hal
+++ b/biometrics/face/1.0/types.hal
@@ -281,9 +281,52 @@
     TOO_SIMILAR = 15,
 
     /**
+     * The magnitude of the pan angle of the user’s face with respect to the sensor’s
+     * capture plane is too high.
+     *
+     * The pan angle is defined as the angle swept out by the user’s face turning
+     * their neck left and right. The pan angle would be zero if the user faced the
+     * camera directly.
+     *
+     * The user should be informed to look more directly at the camera.
+     */
+    PAN_TOO_EXTREME = 16,
+
+    /**
+     * The magnitude of the tilt angle of the user’s face with respect to the sensor’s
+     * capture plane is too high.
+     *
+     * The tilt angle is defined as the angle swept out by the user’s face looking up
+     * and down. The pan angle would be zero if the user faced the camera directly.
+     *
+     * The user should be informed to look more directly at the camera.
+     */
+    TILT_TOO_EXTREME = 17,
+
+    /**
+     * The magnitude of the roll angle of the user’s face with respect to the sensor’s
+     * capture plane is too high.
+     *
+     * The roll angle is defined as the angle swept out by the user’s face tilting their head
+     * towards their shoulders to the left and right. The pan angle would be zero if the user
+     * faced the camera directly.
+     *
+     * The user should be informed to look more directly at the camera.
+     */
+    ROLL_TOO_EXTREME = 18,
+
+   /**
+     * The user’s face has been obscured by some object.
+     *
+     * The user should be informed to remove any objects from the line of sight from
+     * the sensor to the user’s face.
+     */
+    FACE_OBSCURED = 19,
+
+    /**
      * Used to enable a vendor-specific acquisition message.
      */
-    VENDOR = 16
+    VENDOR = 20
 };
 
 /**
diff --git a/camera/provider/2.4/default/CameraProvider.cpp b/camera/provider/2.4/default/CameraProvider.cpp
index 488b9af..f143dd7 100644
--- a/camera/provider/2.4/default/CameraProvider.cpp
+++ b/camera/provider/2.4/default/CameraProvider.cpp
@@ -143,7 +143,6 @@
         int new_status) {
     CameraProvider* cp = const_cast<CameraProvider*>(
             static_cast<const CameraProvider*>(callbacks));
-    bool found = false;
 
     if (cp == nullptr) {
         ALOGE("%s: callback ops is null", __FUNCTION__);
@@ -155,17 +154,23 @@
     snprintf(cameraId, sizeof(cameraId), "%d", camera_id);
     std::string cameraIdStr(cameraId);
     cp->mCameraStatusMap[cameraIdStr] = (camera_device_status_t) new_status;
-    if (cp->mCallbacks != nullptr) {
-        CameraDeviceStatus status = (CameraDeviceStatus) new_status;
-        for (auto const& deviceNamePair : cp->mCameraDeviceNames) {
-            if (cameraIdStr.compare(deviceNamePair.first) == 0) {
-                cp->mCallbacks->cameraDeviceStatusChange(
-                        deviceNamePair.second, status);
-                found = true;
-            }
-        }
 
-        switch (status) {
+    if (cp->mCallbacks == nullptr) {
+        // For camera connected before mCallbacks is set, the corresponding
+        // addDeviceNames() would be called later in setCallbacks().
+        return;
+    }
+
+    bool found = false;
+    CameraDeviceStatus status = (CameraDeviceStatus)new_status;
+    for (auto const& deviceNamePair : cp->mCameraDeviceNames) {
+        if (cameraIdStr.compare(deviceNamePair.first) == 0) {
+            cp->mCallbacks->cameraDeviceStatusChange(deviceNamePair.second, status);
+            found = true;
+        }
+    }
+
+    switch (status) {
         case CameraDeviceStatus::PRESENT:
         case CameraDeviceStatus::ENUMERATING:
             if (!found) {
@@ -176,7 +181,6 @@
             if (found) {
                 cp->removeDeviceNames(camera_id);
             }
-        }
     }
 }
 
@@ -439,8 +443,22 @@
 
 // Methods from ::android::hardware::camera::provider::V2_4::ICameraProvider follow.
 Return<Status> CameraProvider::setCallback(const sp<ICameraProviderCallback>& callback)  {
+    if (callback == nullptr) {
+        return Status::ILLEGAL_ARGUMENT;
+    }
+
     Mutex::Autolock _l(mCbLock);
     mCallbacks = callback;
+
+    // Add and report all presenting external cameras.
+    for (auto const& statusPair : mCameraStatusMap) {
+        int id = std::stoi(statusPair.first);
+        auto status = static_cast<CameraDeviceStatus>(statusPair.second);
+        if (id >= mNumberOfLegacyCameras && status != CameraDeviceStatus::NOT_PRESENT) {
+            addDeviceNames(id, status, true);
+        }
+    }
+
     return Status::OK;
 }
 
@@ -452,6 +470,11 @@
 Return<void> CameraProvider::getCameraIdList(getCameraIdList_cb _hidl_cb)  {
     std::vector<hidl_string> deviceNameList;
     for (auto const& deviceNamePair : mCameraDeviceNames) {
+        if (std::stoi(deviceNamePair.first) >= mNumberOfLegacyCameras) {
+            // External camera devices must be reported through the device status change callback,
+            // not in this list.
+            continue;
+        }
         if (mCameraStatusMap[deviceNamePair.first] == CAMERA_DEVICE_STATUS_PRESENT) {
             deviceNameList.push_back(deviceNamePair.second);
         }
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 2920cec..52d6328 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -23,6 +23,7 @@
     defaults: ["VtsHalTargetTestDefaults"],
     export_include_dirs: ["."],
     shared_libs: [
+        "libfmq",
         "libnativewindow",
     ],
     static_libs: [
@@ -51,6 +52,7 @@
         "VtsHalNeuralnetworks.cpp",
     ],
     shared_libs: [
+        "libfmq",
         "libnativewindow",
     ],
     static_libs: [
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 65c425e..8d427b1 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -15,6 +15,7 @@
  */
 
 #include "Callbacks.h"
+#include "ExecutionBurstController.h"
 #include "TestHarness.h"
 #include "Utils.h"
 
@@ -109,14 +110,23 @@
     }
     return result;
 }
-enum class Synchronously { NO, YES };
+static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+        const sp<V1_0::IPreparedModel>&) {
+    ADD_FAILURE() << "asking for burst execution at V1_0";
+    return nullptr;
+}
+static std::unique_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+        const sp<V1_2::IPreparedModel>& preparedModel) {
+    return ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
+}
+enum class Executor { ASYNC, SYNC, BURST };
 const float kDefaultAtol = 1e-5f;
 const float kDefaultRtol = 1e-5f;
 template <typename T_IPreparedModel>
 void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                            const std::vector<MixedTypedExample>& examples,
                            bool hasRelaxedFloat32Model, float fpAtol, float fpRtol,
-                           Synchronously sync, MeasureTiming measure, bool testDynamicOutputShape) {
+                           Executor executor, MeasureTiming measure, bool testDynamicOutputShape) {
     const uint32_t INPUT = 0;
     const uint32_t OUTPUT = 1;
 
@@ -209,35 +219,62 @@
         inputMemory->commit();
         outputMemory->commit();
 
+        const Request request = {.inputs = inputs_info, .outputs = outputs_info, .pools = pools};
+
         ErrorStatus executionStatus;
         hidl_vec<OutputShape> outputShapes;
         Timing timing;
-        if (sync == Synchronously::NO) {
-            SCOPED_TRACE("asynchronous");
+        switch (executor) {
+            case Executor::ASYNC: {
+                SCOPED_TRACE("asynchronous");
 
-            // launch execution
-            sp<ExecutionCallback> executionCallback = new ExecutionCallback();
-            ASSERT_NE(nullptr, executionCallback.get());
-            Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
-                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
-                    measure, executionCallback);
-            ASSERT_TRUE(executionLaunchStatus.isOk());
-            EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+                // launch execution
+                sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+                ASSERT_NE(nullptr, executionCallback.get());
+                Return<ErrorStatus> executionLaunchStatus =
+                        ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+                ASSERT_TRUE(executionLaunchStatus.isOk());
+                EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
 
-            // retrieve execution status
-            executionCallback->wait();
-            executionStatus = executionCallback->getStatus();
-            outputShapes = executionCallback->getOutputShapes();
-            timing = executionCallback->getTiming();
-        } else {
-            SCOPED_TRACE("synchronous");
+                // retrieve execution status
+                executionCallback->wait();
+                executionStatus = executionCallback->getStatus();
+                outputShapes = executionCallback->getOutputShapes();
+                timing = executionCallback->getTiming();
 
-            // execute
-            Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
-                    preparedModel, {.inputs = inputs_info, .outputs = outputs_info, .pools = pools},
-                    measure, &outputShapes, &timing);
-            ASSERT_TRUE(executionReturnStatus.isOk());
-            executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+                break;
+            }
+            case Executor::SYNC: {
+                SCOPED_TRACE("synchronous");
+
+                // execute
+                Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+                        preparedModel, request, measure, &outputShapes, &timing);
+                ASSERT_TRUE(executionReturnStatus.isOk());
+                executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+                break;
+            }
+            case Executor::BURST: {
+                SCOPED_TRACE("burst");
+
+                // create burst
+                const std::unique_ptr<::android::nn::ExecutionBurstController> controller =
+                        CreateBurst(preparedModel);
+                ASSERT_NE(nullptr, controller.get());
+
+                // create memory keys
+                std::vector<intptr_t> keys(request.pools.size());
+                for (size_t i = 0; i < keys.size(); ++i) {
+                    keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+                }
+
+                // execute burst
+                std::tie(executionStatus, outputShapes, timing) =
+                        controller->compute(request, measure, keys);
+
+                break;
+            }
         }
 
         if (testDynamicOutputShape && executionStatus != ErrorStatus::NONE) {
@@ -286,10 +323,10 @@
 template <typename T_IPreparedModel>
 void EvaluatePreparedModel(sp<T_IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
                            const std::vector<MixedTypedExample>& examples,
-                           bool hasRelaxedFloat32Model, Synchronously sync, MeasureTiming measure,
+                           bool hasRelaxedFloat32Model, Executor executor, MeasureTiming measure,
                            bool testDynamicOutputShape) {
     EvaluatePreparedModel(preparedModel, is_ignored, examples, hasRelaxedFloat32Model, kDefaultAtol,
-                          kDefaultRtol, sync, measure, testDynamicOutputShape);
+                          kDefaultRtol, executor, measure, testDynamicOutputShape);
 }
 
 static void getPreparedModel(sp<PreparedModelCallback> callback,
@@ -345,7 +382,7 @@
 
     float fpAtol = 1e-5f, fpRtol = 5.0f * 1.1920928955078125e-7f;
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Synchronously::NO,
+                          /*hasRelaxedFloat32Model=*/false, fpAtol, fpRtol, Executor::ASYNC,
                           MeasureTiming::NO, /*testDynamicOutputShape=*/false);
 }
 
@@ -392,7 +429,7 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Synchronously::NO,
+                          model.relaxComputationFloat32toFloat16, 1e-5f, 1e-5f, Executor::ASYNC,
                           MeasureTiming::NO, /*testDynamicOutputShape=*/false);
 }
 
@@ -441,16 +478,22 @@
     ASSERT_NE(nullptr, preparedModel.get());
 
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
+                          model.relaxComputationFloat32toFloat16, Executor::ASYNC,
                           MeasureTiming::NO, testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
+                          model.relaxComputationFloat32toFloat16, Executor::SYNC, MeasureTiming::NO,
+                          testDynamicOutputShape);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16, Executor::BURST,
                           MeasureTiming::NO, testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::NO,
+                          model.relaxComputationFloat32toFloat16, Executor::ASYNC,
                           MeasureTiming::YES, testDynamicOutputShape);
     EvaluatePreparedModel(preparedModel, is_ignored, examples,
-                          model.relaxComputationFloat32toFloat16, Synchronously::YES,
+                          model.relaxComputationFloat32toFloat16, Executor::SYNC,
+                          MeasureTiming::YES, testDynamicOutputShape);
+    EvaluatePreparedModel(preparedModel, is_ignored, examples,
+                          model.relaxComputationFloat32toFloat16, Executor::BURST,
                           MeasureTiming::YES, testDynamicOutputShape);
 }
 
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index 6c3b483..de249b0 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -98,6 +98,25 @@
             generates (ErrorStatus status, vec<bool> supportedOperations);
 
     /**
+     * Gets whether the driver supports compilation caching.
+     *
+     * isCachingSupported indicates whether the driver supports compilation caching.
+     * Even if so, the driver may still choose not to cache certain compiled models.
+     *
+     * If the device reports the caching is not supported, the user may avoid calling
+     * IDevice::prepareModelFromCache and IPreparedModel::saveToCache.
+     *
+     * @return status Error status of the call, must be:
+     *     - NONE if successful
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if there is an unspecified error
+     * @return supported A boolean indicating whether the driver supports compilation
+     *                   caching. Even on returning true, the driver may still choose
+     *                   not to cache certain compiled models.
+     */
+    isCachingSupported() generates (ErrorStatus status, bool supported);
+
+    /**
      * Creates a prepared model for execution.
      *
      * prepareModel is used to make any necessary transformations or alternative
@@ -153,4 +172,84 @@
     prepareModel_1_2(Model model, ExecutionPreference preference,
                      IPreparedModelCallback callback)
           generates (ErrorStatus status);
+
+    /**
+     * Creates a prepared model from cache files for execution.
+     *
+     * prepareModelFromCache is used to retrieve a prepared model directly from
+     * cache files to avoid slow model compilation time. There are exactly two
+     * cache file descriptors provided to the driver: modelCache and dataCache.
+     *
+     * The dataCache is for caching constant data, possibly including preprocessed
+     * and transformed tensor buffers. Any modification to the dataCache should
+     * have no worse effect than generating bad output values at execution time.
+     *
+     * The modelCache is for caching security-sensitive data such as compiled
+     * executable machine code in the device's native binary format. A modification
+     * to the modelCache may affect the driver's execution behavior, and a malicious
+     * client could make use of this to execute beyond the granted permission. Thus,
+     * the driver must always check whether the modelCache is corrupted before preparing
+     * the model from cache.
+     *
+     * The two file descriptors may be closed by the client once the asynchronous
+     * preparation has finished. The driver has to copy all the data it needs.
+     *
+     * The model is prepared asynchronously with respect to the caller. The
+     * prepareModelFromCache function must verify the inputs to the
+     * prepareModelFromCache function are correct, and that the security-sensitive
+     * cache has not been modified since it was last written by the driver.
+     * If there is an error, or if compilation caching is not supported, or if the
+     * security-sensitive cache has been modified, prepareModelFromCache must
+     * immediately invoke the callback with the appropriate ErrorStatus value and
+     * nullptr for the IPreparedModel, then return with the same ErrorStatus. If
+     * the inputs to the prepareModelFromCache function are valid, the security-sensitive
+     * cache is not modified, and there is no error, prepareModelFromCache must launch an
+     * asynchronous task to prepare the model in the background, and immediately return
+     * from prepareModelFromCache with ErrorStatus::NONE. If the asynchronous task
+     * fails to launch, prepareModelFromCache must immediately invoke the callback
+     * with ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then
+     * return with ErrorStatus::GENERAL_FAILURE.
+     *
+     * When the asynchronous task has finished preparing the model, it must
+     * immediately invoke the callback function provided as an input to
+     * prepareModelFromCache. If the model was prepared successfully, the
+     * callback object must be invoked with an error status of ErrorStatus::NONE
+     * and the produced IPreparedModel object. If an error occurred preparing
+     * the model, the callback object must be invoked with the appropriate
+     * ErrorStatus value and nullptr for the IPreparedModel.
+     *
+     * The only information that may be unknown to the model at this stage is
+     * the shape of the tensors, which may only be known at execution time. As
+     * such, some driver services may return partially prepared models, where
+     * the prepared model may only be finished when it is paired with a set of
+     * inputs to the model. Note that the same prepared model object may be
+     * used with different shapes of inputs on different (possibly concurrent)
+     * executions.
+     *
+     * @param modelCache A handle holding exactly one cache file descriptor for the
+     *     security-sensitive cache.
+     * @param dataCache A handle holding exactly one cache file descriptor for the
+     *     constants' cache.
+     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
+     *     identifying the prepared model. It is the same token provided when saving
+     *     the cache files with IPreparedModel::saveToCache. Tokens should be chosen
+     *     to have a low rate of collision for a particular application. The driver
+     *     cannot detect a collision; a collision will result in a failed execution
+     *     or in a successful execution that produces incorrect output values.
+     * @param callback A callback object used to return the error status of
+     *     preparing the model for execution and the prepared model if
+     *     successful, nullptr otherwise. The callback object's notify function
+     *     must be called exactly once, even if the model could not be prepared.
+     * @return status Error status of launching a task which prepares the model
+     *     in the background; must be:
+     *     - NONE if preparation task is successfully launched
+     *     - DEVICE_UNAVAILABLE if driver is offline or busy
+     *     - GENERAL_FAILURE if caching is not supported or if there is an
+     *       unspecified error
+     *     - INVALID_ARGUMENT if one of the input arguments is invalid
+     */
+    prepareModelFromCache(handle modelCache, handle dataCache,
+                          uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
+                          IPreparedModelCallback callback)
+            generates (ErrorStatus status);
 };
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
index 5d2d80f..757d5f1 100644
--- a/neuralnetworks/1.2/IPreparedModel.hal
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -157,4 +157,62 @@
                             fmq_sync<FmqRequestDatum> requestChannel,
                             fmq_sync<FmqResultDatum> resultChannel)
                  generates (ErrorStatus status, IBurstContext context);
+
+    /*
+     * Saves the prepared model to cache files.
+     *
+     * saveToCache is used to save a prepared model to cache files for faster
+     * model compilation time when the same model preparation is requested in
+     * the future. There are exactly two cache file descriptors provided to the
+     * driver: modelCache and dataCache.
+     *
+     * The dataCache is for caching constant data, possibly including preprocessed
+     * and transformed tensor buffers. Any modification to the dataCache should
+     * have no worse effect than generating bad output values at execution time.
+     *
+     * The modelCache is for caching security-sensitive data such as compiled
+     * executable machine code in the device's native binary format. A modification
+     * to the modelCache may affect the driver's execution behavior, and a malicious
+     * client could make use of this to execute beyond the granted permission. Thus,
+     * the driver must always check whether the modelCache is corrupted before preparing
+     * the model from cache.
+     *
+     * The two file descriptors must point to two zero-length files with offset
+     * positioned at the beginning of the file. The file descriptors may be closed
+     * by the client once the method has returned.
+     *
+     * If the driver decides not to save the prepared model without looking at the
+     * input arguments to the saveToCache function, saveToCache must return with
+     * ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify
+     * the input arguments to the saveToCache function are valid, and return with
+     * ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver
+     * could not save the prepared model, saveToCache must return with the appropriate
+     * ErrorStatus. Otherwise, it must write the cache files and return
+     * ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents
+     * of the cache files are undefined.
+     *
+     * @param modelCache A handle holding exactly one cache file descriptor for the
+     *                   security-sensitive cache.
+     * @param dataCache A handle holding exactly one cache file descriptor for the
+     *                  constants' cache.
+     * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
+     *              identifying the prepared model. The same token will be provided
+     *              when retrieving the prepared model from cache files with
+     *              IDevice::prepareModelFromCache. Tokens should be chosen to have
+     *              a low rate of collision for a particular application. The driver
+     *              cannot detect a collision; a collision will result in a failed
+     *              execution or in a successful execution that produces incorrect
+     *              output values.
+     * @return status Error status of saveToCache, must be:
+     *                - NONE if saveToCache is performed successfully
+     *                - DEVICE_UNAVAILABLE if driver is offline or busy
+     *                - GENERAL_FAILURE if the driver could not save the
+     *                  prepared model or if there is an unspecified error
+     *                - INVALID_ARGUMENT if one of the input arguments is invalid,
+     *                  unless the driver decides not to save the prepared model
+     *                  without looking at the input arguments
+     */
+    saveToCache(handle modelCache, handle dataCache,
+                uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token)
+        generates (ErrorStatus status);
 };
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index bd8354f..0abe56d 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -25,6 +25,13 @@
 
 import android.hidl.safe_union@1.0::Monostate;
 
+enum Constant : uint32_t {
+    /**
+     * The byte size of the cache token.
+     */
+    BYTE_SIZE_OF_CACHE_TOKEN = 32,
+};
+
 enum OperandType : @1.0::OperandType {
     /**
      * An 8 bit boolean scalar value.
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 00a7c3e..d411da4 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -19,6 +19,7 @@
 #include "VtsHalNeuralnetworks.h"
 
 #include "Callbacks.h"
+#include "ExecutionBurstController.h"
 #include "TestHarness.h"
 #include "Utils.h"
 
@@ -112,6 +113,7 @@
     };
     MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
 
+    // asynchronous
     {
         SCOPED_TRACE(message + " [execute_1_2]");
 
@@ -131,6 +133,7 @@
         ASSERT_TRUE(badTiming(timing));
     }
 
+    // synchronous
     {
         SCOPED_TRACE(message + " [executeSynchronously]");
 
@@ -144,6 +147,43 @@
                 });
         ASSERT_TRUE(executeStatus.isOk());
     }
+
+    // burst
+    {
+        SCOPED_TRACE(message + " [burst]");
+
+        // create burst
+        std::unique_ptr<::android::nn::ExecutionBurstController> burst =
+                ::android::nn::createExecutionBurstController(preparedModel, /*blocking=*/true);
+        ASSERT_NE(nullptr, burst.get());
+
+        // create memory keys
+        std::vector<intptr_t> keys(request.pools.size());
+        for (size_t i = 0; i < keys.size(); ++i) {
+            keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+        }
+
+        // execute and verify
+        ErrorStatus error;
+        std::vector<OutputShape> outputShapes;
+        Timing timing;
+        std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
+        EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
+        EXPECT_EQ(outputShapes.size(), 0);
+        EXPECT_TRUE(badTiming(timing));
+
+        // additional burst testing
+        if (request.pools.size() > 0) {
+            // valid free
+            burst->freeMemory(keys.front());
+
+            // negative test: invalid free of unknown (blank) memory
+            burst->freeMemory(intptr_t{});
+
+            // negative test: double free of memory
+            burst->freeMemory(keys.front());
+        }
+    }
 }
 
 // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,