Merge "Update @2.0::IGnss.hal setCallback() method documentation" into qt-dev
diff --git a/current.txt b/current.txt
index 01053a0..64fd4ae 100644
--- a/current.txt
+++ b/current.txt
@@ -393,15 +393,17 @@
 23780340c686ee86986aa5a9755c2d8566224fed177bbb22a5ebf06be574b60c android.hardware.camera.metadata@3.3::types
 05d1ee760d81cdd2dc7a70ce0241af9fa830edae33b4be83d9bf5fffe05ddc6f android.hardware.camera.provider@2.4::ICameraProvider
 da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
+ede69710c3a95c2cbe818e6c8bb72c7816823face5fc21c17731b26f41d94d65 android.hardware.gnss@1.0::IGnss
 21165b8e30c4b2d52980e4728f661420adc16e38bbe73476c06b2085be908f4c android.hardware.gnss@1.0::IGnssCallback
 d702fb01dc2a0733aa820b7eb65435ee3334f75632ef880bafd2fb8803a20a58 android.hardware.gnss@1.0::IGnssMeasurementCallback
+b5f1f4c1bd6de71a8e71d70f57cdab904ac024a12f3dee3e2173770a4583bcc2 android.hardware.gnss@1.1::IGnss
 7c7721c0f773fcf422b71a4f558545e9e36acc973e58ca51e5bd53905cf46bc0 android.hardware.graphics.bufferqueue@1.0::IGraphicBufferProducer
 d4fea995378bb4f421b4e24ccf68cad2734ab07fe4f874a126ba558b99df5766 android.hardware.graphics.composer@2.1::IComposerClient
 f7d7cb747dc01a9fdb2d39a80003b4d8df9be733d65f5842198802eb6209db69 android.hardware.graphics.mapper@2.0::IMapper
 65a021fa89085b62fc96b2b6d3bef2f9103cf4d63379c68bc154fd9eef672852 android.hardware.health@1.0::types
 b7ecf29927055ec422ec44bf776223f07d79ad9f92ccf9becf167e62c2607e7a android.hardware.keymaster@4.0::IKeymasterDevice
 574e8f1499436fb4075894dcae0b36682427956ecb114f17f1fe22d116a83c6b android.hardware.neuralnetworks@1.0::IPreparedModel
-e75759b40a1c5f97b463b30aab91954012c9ea9e454dde308db853a56796e5a6 android.hardware.neuralnetworks@1.0::types
+1e3576c07006d82ba5bc6ddbf87c101414d361c41afe7a82713750844c488725 android.hardware.neuralnetworks@1.0::types
 eb754b58c93e5591613208b4c972811288b0fa16a82430d602f107c91a908b22 android.hardware.neuralnetworks@1.1::types
 1d4a5776614c08b5d794a5ec5ab04697260cbd4b3441d5935cd53ee71d19da02 android.hardware.radio@1.0::IRadioResponse
 ed9da80ec0c96991fd03f0a46107815d0e50f764656e49dba4980fa5c31d5bc3 android.hardware.radio@1.0::types
@@ -459,9 +461,9 @@
 5b1f4a4fb88c239e07d76026467a1f2ee0d08f4d52c1805bd93bd7c05e3fe69c android.hardware.drm@1.2::ICryptoFactory
 4895f98e9ef210e9acb01982f5d07b654538377e1404b8db5e19e7858835e9d8 android.hardware.drm@1.2::ICryptoPlugin
 976116b9033b2c222b940109fdf0ffcc29b77cbe631ef6b4fcc2ad5ce8e605f7 android.hardware.drm@1.2::IDrmFactory
-b2efccc6425085f84795a2ca15a09d9a81ffd02f9dc3d30ba21d1a59bdfa253f android.hardware.drm@1.2::IDrmPlugin
-39ca9e88404b6c090f7650455a7ed3fdee9cce4e3a356c9d547f8ff02f2e7fc8 android.hardware.drm@1.2::IDrmPluginListener
-f27baaa587bc3dd9b740cb6928ab812b9b7d105b5187663938aee578105f3c39 android.hardware.drm@1.2::types
+8ef1caf921c3e83a00180f770e3b8e8ff65d8a5c806482e51aa45e6d55f1aec1 android.hardware.drm@1.2::IDrmPlugin
+b778fcce93eb6294446a940e1bae0200da7bd97b91b91977be2dcd31ca58374f android.hardware.drm@1.2::IDrmPluginListener
+564732cbfe5c0895cfbd2bdf84c3f2b0f760ea20f2237c0d388aaeeaef2dd0a9 android.hardware.drm@1.2::types
 44480c912e4ab90b9ed17e56569cd5ca98413a8a2372efb028f4181204b6b73e android.hardware.fastboot@1.0::IFastboot
 7b2989744e3c555292d4b5b829acd09a7b40f96ead62ce54174cd959503b64bb android.hardware.fastboot@1.0::types
 7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss
@@ -515,7 +517,7 @@
 92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
 36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
 e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-e3b6176e3bf235c4e0e4e451b0166e396c7ee176cfe167c9147c3d46d7b34f0c android.hardware.neuralnetworks@1.2::types
+d18bba0b6c8d2d1da3cfb52b14f556d2e04eb91551d97ee60a3524cf993a3e0e android.hardware.neuralnetworks@1.2::types
 cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
 abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
 4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
diff --git a/drm/1.2/IDrmPlugin.hal b/drm/1.2/IDrmPlugin.hal
index 7d266f4..df09ccf 100644
--- a/drm/1.2/IDrmPlugin.hal
+++ b/drm/1.2/IDrmPlugin.hal
@@ -226,4 +226,22 @@
      * @param sessionId identifies the session the event originated from
      */
     sendSessionLostState(SessionId sessionId);
+
+    /**
+     * Send a keys change event to the listener. The keys change event
+     * indicates the status of each key in the session. Keys can be
+     * indicated as being usable, expired, outputnotallowed or statuspending.
+     *
+     * This method only differs from @1.0 version by the addition of new
+     * KeyStatusType(s) in keyStatusList.
+     *
+     * @param sessionId identifies the session the event originated from
+     * @param keyStatusList indicates the status for each key ID in the
+     * session.
+     * @param hasNewUsableKey indicates if the event includes at least one
+     * key that has become usable.
+     */
+    sendKeysChange_1_2(SessionId sessionId, vec<KeyStatus> keyStatusList,
+            bool hasNewUsableKey);
+
 };
diff --git a/drm/1.2/IDrmPluginListener.hal b/drm/1.2/IDrmPluginListener.hal
index a6bd6c9..e8cb91a 100644
--- a/drm/1.2/IDrmPluginListener.hal
+++ b/drm/1.2/IDrmPluginListener.hal
@@ -36,4 +36,22 @@
      * @param sessionId identifies the session that has been invalidated
      */
      oneway sendSessionLostState(SessionId sessionId);
+
+    /**
+     * Send a keys change event to the listener. The keys change event
+     * indicates the status of each key in the session. Keys can be
+     * indicated as being usable, expired, outputnotallowed or statuspending.
+     *
+     * This method only differs from @1.0 version by the addition of new
+     * KeyStatusType(s) in keyStatusList.
+     *
+     * @param sessionId identifies the session the event originated from
+     * @param keyStatusList indicates the status for each key ID in the
+     * session.
+     * @param hasNewUsableKey indicates if the event includes at least one
+     * key that has become usable.
+     */
+    oneway sendKeysChange_1_2(SessionId sessionId, vec<KeyStatus> keyStatusList,
+            bool hasNewUsableKey);
+
 };
diff --git a/drm/1.2/types.hal b/drm/1.2/types.hal
index 28c8e67..87218a4 100644
--- a/drm/1.2/types.hal
+++ b/drm/1.2/types.hal
@@ -16,6 +16,7 @@
 
 package android.hardware.drm@1.2;
 
+import @1.0::KeyStatusType;
 import @1.0::Status;
 import @1.1::HdcpLevel;
 
@@ -93,3 +94,25 @@
  * set in methods that take a KeySetId as an input parameter.
  */
 typedef vec<uint8_t> KeySetId;
+
+enum KeyStatusType : @1.0::KeyStatusType {
+    /**
+     * The key is not yet usable to decrypt media because the start
+     * time is in the future. The key must become usable when
+     * its start time is reached.
+     */
+    USABLEINFUTURE
+};
+
+/**
+ * Used by sendKeysChange_1_2 to report the usability status of each key to the
+ * app.
+ *
+ * This struct only differs from @1.0 version by the addition of new
+ * KeyStatusType(s).
+ *
+ */
+struct KeyStatus {
+    KeySetId keyId;
+    KeyStatusType type;
+};
diff --git a/drm/1.2/vts/functional/drm_hal_common.cpp b/drm/1.2/vts/functional/drm_hal_common.cpp
index b9a8425..bfffbe8 100644
--- a/drm/1.2/vts/functional/drm_hal_common.cpp
+++ b/drm/1.2/vts/functional/drm_hal_common.cpp
@@ -56,6 +56,7 @@
 namespace vts {
 
 const char *kCallbackLostState = "LostState";
+const char *kCallbackKeysChange = "KeysChange";
 
 drm_vts::VendorModules *DrmHalTest::gVendorModules = nullptr;
 
@@ -64,7 +65,19 @@
  */
 
 Return<void> DrmHalPluginListener::sendSessionLostState(const hidl_vec<uint8_t>& sessionId) {
-    NotifyFromCallback(kCallbackLostState, sessionId);
+    ListenerEventArgs args;
+    args.sessionId = sessionId;
+    NotifyFromCallback(kCallbackLostState, args);
+    return Void();
+}
+
+Return<void> DrmHalPluginListener::sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
+        const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey) {
+    ListenerEventArgs args;
+    args.sessionId = sessionId;
+    args.keyStatusList = keyStatusList;
+    args.hasNewUsableKey = hasNewUsableKey;
+    NotifyFromCallback(kCallbackKeysChange, args);
     return Void();
 }
 
diff --git a/drm/1.2/vts/functional/drm_hal_common.h b/drm/1.2/vts/functional/drm_hal_common.h
index 1b95dde..e348664 100644
--- a/drm/1.2/vts/functional/drm_hal_common.h
+++ b/drm/1.2/vts/functional/drm_hal_common.h
@@ -37,7 +37,7 @@
 
 using ::android::hardware::drm::V1_0::EventType;
 using ::android::hardware::drm::V1_0::KeyedVector;
-using ::android::hardware::drm::V1_0::KeyStatus;
+using KeyStatusV1_0 = ::android::hardware::drm::V1_0::KeyStatus;
 using ::android::hardware::drm::V1_0::KeyType;
 using ::android::hardware::drm::V1_0::Mode;
 using ::android::hardware::drm::V1_0::Pattern;
@@ -46,10 +46,6 @@
 
 using ::android::hardware::drm::V1_1::ICryptoFactory;
 
-using ::android::hardware::drm::V1_2::ICryptoPlugin;
-using ::android::hardware::drm::V1_2::IDrmFactory;
-using ::android::hardware::drm::V1_2::IDrmPlugin;
-using ::android::hardware::drm::V1_2::IDrmPluginListener;
 using StatusV1_2 = ::android::hardware::drm::V1_2::Status;
 
 using ::android::hardware::hidl_array;
@@ -166,9 +162,16 @@
  *  Event Handling tests
  */
 extern const char *kCallbackLostState;
+extern const char *kCallbackKeysChange;
+
+struct ListenerEventArgs {
+    SessionId sessionId;
+    hidl_vec<KeyStatus> keyStatusList;
+    bool hasNewUsableKey;
+};
 
 class DrmHalPluginListener
-    : public ::testing::VtsHalHidlTargetCallbackBase<SessionId>,
+    : public ::testing::VtsHalHidlTargetCallbackBase<ListenerEventArgs>,
       public IDrmPluginListener {
 public:
     DrmHalPluginListener() {
@@ -183,10 +186,13 @@
             int64_t) override { return Void(); }
 
     virtual Return<void> sendKeysChange(const hidl_vec<uint8_t>&,
-            const hidl_vec<KeyStatus>&, bool) override { return Void(); }
+            const hidl_vec<KeyStatusV1_0>&, bool) override { return Void(); }
 
     virtual Return<void> sendSessionLostState(const hidl_vec<uint8_t>& sessionId) override;
 
+    virtual Return<void> sendKeysChange_1_2(const hidl_vec<uint8_t>&,
+            const hidl_vec<KeyStatus>&, bool) override;
+
 };
 
 }  // namespace vts
diff --git a/drm/1.2/vts/functional/drm_hal_test.cpp b/drm/1.2/vts/functional/drm_hal_test.cpp
index 252ebb9..37ecc25 100644
--- a/drm/1.2/vts/functional/drm_hal_test.cpp
+++ b/drm/1.2/vts/functional/drm_hal_test.cpp
@@ -28,6 +28,8 @@
 using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::drm::V1_2::HdcpLevel;
 using ::android::hardware::drm::V1_2::KeySetId;
+using ::android::hardware::drm::V1_2::KeyStatus;
+using ::android::hardware::drm::V1_2::KeyStatusType;
 using ::android::hardware::drm::V1_2::OfflineLicenseState;
 
 using ::android::hardware::drm::V1_2::vts::DrmHalClearkeyTest;
@@ -35,6 +37,7 @@
 using ::android::hardware::drm::V1_2::vts::DrmHalTest;
 using ::android::hardware::drm::V1_2::vts::DrmHidlEnvironment;
 using ::android::hardware::drm::V1_2::vts::kCallbackLostState;
+using ::android::hardware::drm::V1_2::vts::kCallbackKeysChange;
 
 using ::android::hardware::hidl_string;
 
@@ -275,6 +278,35 @@
 }
 
 /**
+ * Simulate the plugin sending keys change and make sure
+ * the listener gets them.
+ */
+TEST_P(DrmHalTest, ListenerKeysChange) {
+    RETURN_IF_SKIPPED;
+    sp<DrmHalPluginListener> listener = new DrmHalPluginListener();
+    auto res = drmPlugin->setListener(listener);
+    EXPECT_OK(res);
+
+    auto sessionId = openSession();
+    const hidl_vec<KeyStatus> keyStatusList = {
+        {{1}, KeyStatusType::USABLE},
+        {{2}, KeyStatusType::EXPIRED},
+        {{3}, KeyStatusType::OUTPUTNOTALLOWED},
+        {{4}, KeyStatusType::STATUSPENDING},
+        {{5}, KeyStatusType::INTERNALERROR},
+        {{6}, KeyStatusType::USABLEINFUTURE},
+    };
+
+    drmPlugin->sendKeysChange_1_2(sessionId, keyStatusList, true);
+    auto result = listener->WaitForCallback(kCallbackKeysChange);
+    EXPECT_TRUE(result.no_timeout);
+    EXPECT_TRUE(result.args);
+    EXPECT_EQ(sessionId, result.args->sessionId);
+    EXPECT_EQ(keyStatusList, result.args->keyStatusList);
+    closeSession(sessionId);
+}
+
+/**
  *  CryptoPlugin Decrypt tests
  */
 
@@ -452,7 +484,7 @@
     auto result = listener->WaitForCallback(kCallbackLostState);
     EXPECT_TRUE(result.no_timeout);
     EXPECT_TRUE(result.args);
-    EXPECT_EQ(sessionId, *(result.args));
+    EXPECT_EQ(sessionId, result.args->sessionId);
 }
 
 /**
diff --git a/gnss/1.0/IGnss.hal b/gnss/1.0/IGnss.hal
index 602c615..d32bc63 100644
--- a/gnss/1.0/IGnss.hal
+++ b/gnss/1.0/IGnss.hal
@@ -75,8 +75,13 @@
     };
 
     /**
-     * Opens the interface and provides the callback routines
-     * to the implementation of this interface.
+     * Opens the interface and provides the callback routines to the implementation of this
+     * interface.
+     *
+     * The framework calls this method to instruct the GPS engine to prepare for serving requests
+     * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+     * framework upon successful return from this method until cleanup() method is called to
+     * close this interface.
      *
      * @param callback Callback interface for IGnss.
      *
@@ -105,6 +110,18 @@
 
     /**
      * Closes the interface.
+     *
+     * The cleanup() method is called by the framework to tell the GNSS HAL implementation to
+     * not expect any GNSS requests in the immediate future - e.g. this may be called when
+     * location is disabled by a user setting or low battery conditions. The GNSS HAL
+     * implementation must immediately stop responding to any existing requests until the
+     * setCallback() method is called again and the requests are re-initiated by the framework.
+     *
+     * After this method is called, the GNSS HAL implementation may choose to modify GNSS hardware
+     * states to save power. It is expected that when setCallback() method is called again to
+     * reopen this interface, to serve requests, there may be some minor delays in GNSS response
+     * requests as hardware readiness states are restored, not to exceed those that occur on normal
+     * device boot up.
      */
     cleanup();
 
@@ -153,7 +170,7 @@
      * @param mode  Parameter must be one of MS_BASED or STANDALONE.
      * It is allowed by the platform (and it is recommended) to fallback to
      * MS_BASED if MS_ASSISTED is passed in, and MS_BASED is supported.
-     * @recurrence GNSS postion recurrence value, either periodic or single.
+     * @recurrence GNSS position recurrence value, either periodic or single.
      * @param minIntervalMs Represents the time between fixes in milliseconds.
      * @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
      * @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/gnss/1.1/IGnss.hal b/gnss/1.1/IGnss.hal
index 672f742..3400807 100644
--- a/gnss/1.1/IGnss.hal
+++ b/gnss/1.1/IGnss.hal
@@ -29,6 +29,11 @@
      * Opens the interface and provides the callback routines to the implementation of this
      * interface.
      *
+     * The framework calls this method to instruct the GPS engine to prepare for serving requests
+     * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+     * framework upon successful return from this method until cleanup() method is called to
+     * close this interface.
+     *
      * @param callback Callback interface for IGnss.
      *
      * @return success Returns true on success.
@@ -42,7 +47,7 @@
      * @param mode Parameter must be one of MS_BASED or STANDALONE. It is allowed by the platform
      *     (and it is recommended) to fallback to MS_BASED if MS_ASSISTED is passed in, and MS_BASED
      *     is supported.
-     * @param recurrence GNSS postion recurrence value, either periodic or single.
+     * @param recurrence GNSS position recurrence value, either periodic or single.
      * @param minIntervalMs Represents the time between fixes in milliseconds.
      * @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
      * @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp b/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
index 4018aea..3c408b7 100644
--- a/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
+++ b/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
@@ -850,10 +850,37 @@
     ASSERT_NO_FATAL_FAILURE(layer =
                                 mComposerClient->createLayer(mPrimaryDisplay, kBufferSlotCount));
 
+    auto handle = allocate();
+    ASSERT_NE(nullptr, handle);
+    IComposerClient::Rect displayFrame{0, 0, mDisplayWidth, mDisplayHeight};
+
     mWriter->selectDisplay(mPrimaryDisplay);
     mWriter->selectLayer(layer);
+    mWriter->setLayerBuffer(0, handle, -1);
+    mWriter->setLayerCompositionType(IComposerClient::Composition::CURSOR);
+    mWriter->setLayerDisplayFrame(displayFrame);
+    mWriter->setLayerPlaneAlpha(1);
+    mWriter->setLayerSourceCrop({0, 0, (float)mDisplayWidth, (float)mDisplayHeight});
+    mWriter->setLayerTransform(static_cast<Transform>(0));
+    mWriter->setLayerVisibleRegion(std::vector<IComposerClient::Rect>(1, displayFrame));
+    mWriter->setLayerZOrder(10);
+    mWriter->setLayerBlendMode(IComposerClient::BlendMode::NONE);
+    mWriter->setLayerSurfaceDamage(std::vector<IComposerClient::Rect>(1, displayFrame));
+    mWriter->setLayerDataspace(Dataspace::UNKNOWN);
+    mWriter->validateDisplay();
+
+    execute();
+    if (mReader->mCompositionChanges.size() != 0) {
+        GTEST_SUCCEED() << "Composition change requested, skipping test";
+        return;
+    }
+    mWriter->presentDisplay();
+    ASSERT_EQ(0, mReader->mErrors.size());
+
     mWriter->setLayerCursorPosition(1, 1);
     mWriter->setLayerCursorPosition(0, 0);
+    mWriter->validateDisplay();
+    mWriter->presentDisplay();
     execute();
 }
 
diff --git a/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp b/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
index 7834b94..9c80f4d 100644
--- a/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
+++ b/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
@@ -246,7 +246,19 @@
  * Test IComposerClient::getPerFrameMetadataKeys.
  */
 TEST_F(GraphicsComposerHidlTest, GetPerFrameMetadataKeys) {
-    mComposerClient->getPerFrameMetadataKeys(mPrimaryDisplay);
+    std::vector<IComposerClient::PerFrameMetadataKey> keys;
+    Error error = Error::NONE;
+    mComposerClient->getRaw()->getPerFrameMetadataKeys(
+            mPrimaryDisplay, [&](const auto& tmpError, const auto& tmpKeys) {
+                error = tmpError;
+                keys = tmpKeys;
+            });
+    if (error == Error::UNSUPPORTED) {
+        GTEST_SUCCEED() << "getPerFrameMetadataKeys is not supported";
+        return;
+    }
+    ASSERT_EQ(Error::NONE, error);
+    ASSERT_TRUE(keys.size() >= 0);
 }
 
 /**
diff --git a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
index 3792c2e..b289b6a 100644
--- a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
+++ b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
@@ -95,7 +95,7 @@
 
     Return<void> getDisplayCapabilities(
         Display display, IComposerClient::getDisplayCapabilities_cb hidl_cb) override {
-        hidl_vec<IComposerClient::DisplayCapability> capabilities;
+        std::vector<IComposerClient::DisplayCapability> capabilities;
         Error error = mHal->getDisplayCapabilities(display, &capabilities);
         hidl_cb(error, capabilities);
         return Void();
diff --git a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
index 186b004..c3c4887 100644
--- a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
+++ b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
@@ -115,7 +115,7 @@
                                             hidl_vec<uint64_t>& sampleComponent2,
                                             hidl_vec<uint64_t>& sampleComponent3) = 0;
     virtual Error getDisplayCapabilities(
-        Display display, hidl_vec<IComposerClient::DisplayCapability>* outCapabilities) = 0;
+            Display display, std::vector<IComposerClient::DisplayCapability>* outCapabilities) = 0;
     virtual Error setLayerPerFrameMetadataBlobs(
         Display display, Layer layer,
         std::vector<IComposerClient::PerFrameMetadataBlob>& blobs) = 0;
diff --git a/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h b/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
index 4829e24..d3b29bb 100644
--- a/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
+++ b/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
@@ -220,7 +220,8 @@
     }
 
     Error getDisplayCapabilities(
-        Display display, hidl_vec<IComposerClient::DisplayCapability>* outCapabilities) override {
+            Display display,
+            std::vector<IComposerClient::DisplayCapability>* outCapabilities) override {
         uint32_t count = 0;
         int32_t error = mDispatch.getDisplayCapabilities(mDevice, display, &count, nullptr);
         if (error != HWC2_ERROR_NONE) {
@@ -232,7 +233,7 @@
             reinterpret_cast<std::underlying_type<IComposerClient::DisplayCapability>::type*>(
                 outCapabilities->data()));
         if (error != HWC2_ERROR_NONE) {
-            *outCapabilities = hidl_vec<IComposerClient::DisplayCapability>();
+            *outCapabilities = std::vector<IComposerClient::DisplayCapability>();
             return static_cast<Error>(error);
         }
         return Error::NONE;
@@ -267,6 +268,19 @@
 
     Error getDisplayBrightnessSupport(Display display, bool* outSupport) {
         if (!mDispatch.getDisplayBrightnessSupport) {
+            // Preemptively set to false.
+            *outSupport = false;
+            // Try to query from getDisplayCapabilities.
+            std::vector<IComposerClient::DisplayCapability> capabilities;
+            Error error = getDisplayCapabilities(display, &capabilities);
+            if (error != Error::NONE) {
+                // This function is not registered, always return UNSUPPORTED.
+                return Error::UNSUPPORTED;
+            }
+            *outSupport =
+                    std::find(capabilities.begin(), capabilities.end(),
+                              IComposerClient::DisplayCapability::BRIGHTNESS) != capabilities.end();
+            // This function is not registered, always return UNSUPPORTED.
             return Error::UNSUPPORTED;
         }
         bool support = false;
diff --git a/graphics/composer/2.3/utils/vts/ComposerVts.cpp b/graphics/composer/2.3/utils/vts/ComposerVts.cpp
index b763209..d4f5b3a 100644
--- a/graphics/composer/2.3/utils/vts/ComposerVts.cpp
+++ b/graphics/composer/2.3/utils/vts/ComposerVts.cpp
@@ -192,10 +192,8 @@
 
 bool ComposerClient::getDisplayBrightnessSupport(Display display) {
     bool support = false;
-    mClient->getDisplayBrightnessSupport(display, [&](const auto& error, const auto& tmpSupport) {
-        ASSERT_EQ(Error::NONE, error) << "failed to get brightness support";
-        support = tmpSupport;
-    });
+    mClient->getDisplayBrightnessSupport(
+            display, [&](const auto& /*error*/, const auto& tmpSupport) { support = tmpSupport; });
     return support;
 }
 
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index b0a1c1a..02db063 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -858,20 +858,21 @@
      *   elements of the input matrices.
      *
      * The operation has the following independently optional inputs:
+     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+     *   have values or neither of them have values (i.e., all set to null). If
+     *   they have values, the peephole optimization is used.
      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
-     *   (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
-     *   bias (\f$b_i\f$) either all have values, or none of them have values
-     *   (i.e., all set to null). If they have no values, coupling of input and
-     *   forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
-     *   is calculated using the following equation instead.
+     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+     *   or none of them have values. If they have no values, coupling of input
+     *   and forget gates (CIFG) is used, in which case the input gate
+     *   (\f$i_t\f$) is calculated using the following equation instead.
      *   \f{eqnarray*}{
      *   i_t = 1 - f_t
      *   \f}
-     * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
-     *   (\f$W_{co}\f$) either both have values or neither of them have values.
-     *   If they have values, the peephole optimization is used. Additionally,
-     *   if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
-     *   required to have values for peephole optimization.
+     *   In case peephole optimization is used and CIFG is not used
+     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+     *   cell-to-input weights must have no value.
      * * The projection weights (\f$W_{proj}\f$) is required only for the
      *   recurrent projection layer, and should otherwise have no value.
      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
@@ -984,8 +985,8 @@
      * Outputs:
      * * 0: The scratch buffer.
      *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
-     *      [batch_size, num_units * 4] with CIFG, or
-     *      [batch_size, num_units * 3] without CIFG.
+     *      [batch_size, num_units * 3] with CIFG, or
+     *      [batch_size, num_units * 4] without CIFG.
      * * 1: The output state (out) (\f$h_t\f$).
      *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
      *      [batch_size, output_size].
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 72a5007..f0c93b7 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
 namespace functional {
 
 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
@@ -42,53 +41,6 @@
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
-                                sp<IPreparedModel>* preparedModel) {
-    ASSERT_NE(nullptr, preparedModel);
-
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
-        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-            ASSERT_EQ(ErrorStatus::NONE, status);
-            ASSERT_NE(0ul, supported.size());
-            fullySupportsModel =
-                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
-        });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = preparedModelCallback->getPreparedModel();
-
-    // The getSupportedOperations call returns a list of operations that are
-    // guaranteed not to fail if prepareModel is called, and
-    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
-    // If a driver has any doubt that it can prepare an operation, it must
-    // return false. So here, if a driver isn't sure if it can support an
-    // operation, but reports that it successfully prepared the model, the test
-    // can continue.
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
-                  << std::endl;
-        return;
-    }
-    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
-}
-
 // Primary validation function. This function will take a valid request, apply a
 // mutation to it to invalidate the request, then pass it to interface calls
 // that use the request. Note that the request here is passed by value, and any
@@ -237,15 +189,8 @@
     return requests;
 }
 
-void ValidationTest::validateRequests(const V1_0::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                       const std::vector<Request>& requests) {
-    // create IPreparedModel
-    sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
-
     // validate each request
     for (const Request& request : requests) {
         removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index 31638c4..aee2f85 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -25,6 +29,55 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
+                                sp<IPreparedModel>* preparedModel) {
+    ASSERT_NE(nullptr, preparedModel);
+
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    *preparedModel = preparedModelCallback->getPreparedModel();
+
+    // The getSupportedOperations call returns a list of operations that are
+    // guaranteed not to fail if prepareModel is called, and
+    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+    // If a driver has any doubt that it can prepare an operation, it must
+    // return false. So here, if a driver isn't sure if it can support an
+    // operation, but reports that it successfully prepared the model, the test
+    // can continue.
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Unable to test Request validation because vendor service "
+                     "cannot prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
@@ -68,9 +121,17 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
     validateModel(model);
-    validateRequests(model, request);
+
+    // create IPreparedModel
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequests(preparedModel, requests);
 }
 
 }  // namespace functional
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index 559d678..22285be 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -67,7 +67,8 @@
 
    private:
      void validateModel(const Model& model);
-     void validateRequests(const Model& model, const std::vector<Request>& request);
+     void validateRequests(const sp<IPreparedModel>& preparedModel,
+                           const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index 5225bf7..f4adbab 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
 namespace functional {
 
 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
@@ -42,54 +41,6 @@
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
-                                sp<IPreparedModel>* preparedModel) {
-    ASSERT_NE(nullptr, preparedModel);
-
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
-        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-            ASSERT_EQ(ErrorStatus::NONE, status);
-            ASSERT_NE(0ul, supported.size());
-            fullySupportsModel =
-                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
-        });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
-        model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = preparedModelCallback->getPreparedModel();
-
-    // The getSupportedOperations_1_1 call returns a list of operations that are
-    // guaranteed not to fail if prepareModel_1_1 is called, and
-    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
-    // If a driver has any doubt that it can prepare an operation, it must
-    // return false. So here, if a driver isn't sure if it can support an
-    // operation, but reports that it successfully prepared the model, the test
-    // can continue.
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
-                  << std::endl;
-        return;
-    }
-    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
-}
-
 // Primary validation function. This function will take a valid request, apply a
 // mutation to it to invalidate the request, then pass it to interface calls
 // that use the request. Note that the request here is passed by value, and any
@@ -238,15 +189,8 @@
     return requests;
 }
 
-void ValidationTest::validateRequests(const V1_1::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                       const std::vector<Request>& requests) {
-    // create IPreparedModel
-    sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
-
     // validate each request
     for (const Request& request : requests) {
         removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 11fa693..08069f2 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -25,6 +29,56 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
+                                sp<IPreparedModel>* preparedModel) {
+    ASSERT_NE(nullptr, preparedModel);
+
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+            model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    *preparedModel = preparedModelCallback->getPreparedModel();
+
+    // The getSupportedOperations_1_1 call returns a list of operations that are
+    // guaranteed not to fail if prepareModel_1_1 is called, and
+    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+    // If a driver has any doubt that it can prepare an operation, it must
+    // return false. So here, if a driver isn't sure if it can support an
+    // operation, but reports that it successfully prepared the model, the test
+    // can continue.
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Unable to test Request validation because vendor service "
+                     "cannot prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
@@ -68,9 +122,17 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
     validateModel(model);
-    validateRequests(model, request);
+
+    // create IPreparedModel
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequests(preparedModel, requests);
 }
 
 }  // namespace functional
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index cea2b54..f3f587b 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -76,7 +76,8 @@
 
    private:
      void validateModel(const Model& model);
-     void validateRequests(const Model& model, const std::vector<Request>& request);
+     void validateRequests(const sp<IPreparedModel>& preparedModel,
+                           const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index c2e8f22..f368ce2 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -1177,20 +1177,21 @@
      * https://arxiv.org/pdf/1607.06450.pdf
      *
      * The operation has the following independently optional inputs:
+     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+     *   have values or neither of them have values (i.e., all set to null). If
+     *   they have values, the peephole optimization is used.
      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
-     *   (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
-     *   bias (\f$b_i\f$) either all have values, or none of them have values
-     *   (i.e., all set to null). If they have no values, coupling of input and
-     *   forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
-     *   is calculated using the following equation instead.
+     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+     *   or none of them have values. If they have no values, coupling of input
+     *   and forget gates (CIFG) is used, in which case the input gate
+     *   (\f$i_t\f$) is calculated using the following equation instead.
      *   \f{eqnarray*}{
      *   i_t = 1 - f_t
      *   \f}
-     * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
-     *   (\f$W_{co}\f$) either both have values or neither of them have values.
-     *   If they have values, the peephole optimization is used. Additionally,
-     *   if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
-     *   required to have values for peephole optimization.
+     *   In case peephole optimization is used and CIFG is not used
+     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+     *   cell-to-input weights must have no value.
      * * The projection weights (\f$W_{proj}\f$) is required only for the
      *   recurrent projection layer, and should otherwise have no value.
      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index bf91560..4411b90 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -45,9 +45,9 @@
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
-namespace {
+namespace float32_model {
 
-// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of mobilenet.
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of float32 mobilenet.
 #include "examples/mobilenet_224_gender_basic_fixed.example.cpp"
 #include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp"
 
@@ -55,6 +55,44 @@
 [[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
 [[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
 
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+    float outputValue = 1.0f + static_cast<float>(len);
+    return {{.operands = {
+                     // Input
+                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
+                     // Output
+                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
+}
+
+}  // namespace float32_model
+
+namespace quant8_model {
+
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of quant8 mobilenet.
+#include "examples/mobilenet_quantized.example.cpp"
+#include "vts_models/mobilenet_quantized.model.cpp"
+
+// Prevent the compiler from complaining about an otherwise unused function.
+[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
+[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
+
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+    uint8_t outputValue = 1 + static_cast<uint8_t>(len);
+    return {{.operands = {// Input
+                          {.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}},
+                          // Output
+                          {.operandDimensions = {{0, {1}}},
+                           .quant8AsymmOperands = {{0, {outputValue}}}}}}};
+}
+
+}  // namespace quant8_model
+
+namespace {
+
 enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
 
 // Creates cache handles based on provided file groups.
@@ -101,14 +139,18 @@
 //                ↑      ↑      ↑             ↑
 //               [1]    [1]    [1]           [1]
 //
-Model createLargeTestModel(OperationType op, uint32_t len) {
+// This function assumes the operation is either ADD or MUL.
+template <typename CppType, OperandType operandType>
+Model createLargeTestModelImpl(OperationType op, uint32_t len) {
+    EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL);
+
     // Model operations and operands.
     std::vector<Operation> operations(len);
     std::vector<Operand> operands(len * 2 + 2);
 
     // The constant buffer pool. This contains the activation scalar, followed by the
     // per-operation constant operands.
-    std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(float));
+    std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(CppType));
 
     // The activation scalar, value = 0.
     operands[0] = {
@@ -122,7 +164,26 @@
     };
     memset(operandValues.data(), 0, sizeof(int32_t));
 
-    const float floatBufferValue = 1.0f;
+    // The buffer value of the constant second operand. The logical value is always 1.0f.
+    CppType bufferValue;
+    // The scale of the first and second operand.
+    float scale1, scale2;
+    if (operandType == OperandType::TENSOR_FLOAT32) {
+        bufferValue = 1.0f;
+        scale1 = 0.0f;
+        scale2 = 0.0f;
+    } else if (op == OperationType::ADD) {
+        bufferValue = 1;
+        scale1 = 1.0f;
+        scale2 = 1.0f;
+    } else {
+        // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale,
+        // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point.
+        bufferValue = 2;
+        scale1 = 1.0f;
+        scale2 = 0.5f;
+    }
+
     for (uint32_t i = 0; i < len; i++) {
         const uint32_t firstInputIndex = i * 2 + 1;
         const uint32_t secondInputIndex = firstInputIndex + 1;
@@ -130,10 +191,10 @@
 
         // The first operation input.
         operands[firstInputIndex] = {
-                .type = OperandType::TENSOR_FLOAT32,
+                .type = operandType,
                 .dimensions = {1},
                 .numberOfConsumers = 1,
-                .scale = 0.0f,
+                .scale = scale1,
                 .zeroPoint = 0,
                 .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
                                     : OperandLifeTime::TEMPORARY_VARIABLE),
@@ -142,18 +203,18 @@
 
         // The second operation input, value = 1.
         operands[secondInputIndex] = {
-                .type = OperandType::TENSOR_FLOAT32,
+                .type = operandType,
                 .dimensions = {1},
                 .numberOfConsumers = 1,
-                .scale = 0.0f,
+                .scale = scale2,
                 .zeroPoint = 0,
                 .lifetime = OperandLifeTime::CONSTANT_COPY,
                 .location = {.poolIndex = 0,
-                             .offset = static_cast<uint32_t>(i * sizeof(float) + sizeof(int32_t)),
-                             .length = sizeof(float)},
+                             .offset = static_cast<uint32_t>(i * sizeof(CppType) + sizeof(int32_t)),
+                             .length = sizeof(CppType)},
         };
-        memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(float), &floatBufferValue,
-               sizeof(float));
+        memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue,
+               sizeof(CppType));
 
         // The operation. All operations share the same activation scalar.
         // The output operand is created as an input in the next iteration of the loop, in the case
@@ -168,10 +229,10 @@
 
     // The model output.
     operands.back() = {
-            .type = OperandType::TENSOR_FLOAT32,
+            .type = operandType,
             .dimensions = {1},
             .numberOfConsumers = 0,
-            .scale = 0.0f,
+            .scale = scale1,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {},
@@ -191,22 +252,13 @@
     };
 }
 
-// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
-// This function assumes the operation is always ADD.
-std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
-    float outputValue = 1.0f + static_cast<float>(len);
-    return {{.operands = {
-                     // Input
-                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
-                     // Output
-                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
-};
-
 }  // namespace
 
 // Tag for the compilation caching tests.
-class CompilationCachingTest : public NeuralnetworksHidlTest {
+class CompilationCachingTestBase : public NeuralnetworksHidlTest {
   protected:
+    CompilationCachingTestBase(OperandType type) : kOperandType(type) {}
+
     void SetUp() override {
         NeuralnetworksHidlTest::SetUp();
         ASSERT_NE(device.get(), nullptr);
@@ -263,6 +315,40 @@
         NeuralnetworksHidlTest::TearDown();
     }
 
+    // Model and examples creators. According to kOperandType, the following methods will return
+    // either float32 model/examples or the quant8 variant.
+    Model createTestModel() {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::createTestModel();
+        } else {
+            return quant8_model::createTestModel();
+        }
+    }
+
+    std::vector<MixedTypedExample> get_examples() {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::get_examples();
+        } else {
+            return quant8_model::get_examples();
+        }
+    }
+
+    Model createLargeTestModel(OperationType op, uint32_t len) {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return createLargeTestModelImpl<float, OperandType::TENSOR_FLOAT32>(op, len);
+        } else {
+            return createLargeTestModelImpl<uint8_t, OperandType::TENSOR_QUANT8_ASYMM>(op, len);
+        }
+    }
+
+    std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::getLargeModelExamples(len);
+        } else {
+            return quant8_model::getLargeModelExamples(len);
+        }
+    }
+
     // See if the service can handle the model.
     bool isModelFullySupported(const V1_2::Model& model) {
         bool fullySupportsModel = false;
@@ -366,9 +452,20 @@
     uint32_t mNumModelCache;
     uint32_t mNumDataCache;
     uint32_t mIsCachingSupported;
+
+    // The primary data type of the testModel.
+    const OperandType kOperandType;
 };
 
-TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
+// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
+// pass running with float32 models and the second pass running with quant8 models.
+class CompilationCachingTest : public CompilationCachingTestBase,
+                               public ::testing::WithParamInterface<OperandType> {
+  protected:
+    CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
+};
+
+TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -409,7 +506,7 @@
                                            /*testDynamicOutputShape=*/false);
 }
 
-TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
+TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -472,7 +569,7 @@
                                            /*testDynamicOutputShape=*/false);
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -584,7 +681,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -664,7 +761,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -776,7 +873,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -856,7 +953,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -914,7 +1011,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
     // Create test HIDL model and compile.
     const Model testModel = createTestModel();
     if (checkEarlyTermination(testModel)) return;
@@ -990,7 +1087,7 @@
 constexpr uint32_t kLargeModelSize = 100;
 constexpr uint32_t kNumIterationsTOCTOU = 100;
 
-TEST_F(CompilationCachingTest, SaveToCache_TOCTOU) {
+TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
     if (!mIsCachingSupported) return;
 
     // Create test models and check if fully supported by the service.
@@ -1053,7 +1150,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareFromCache_TOCTOU) {
+TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
     if (!mIsCachingSupported) return;
 
     // Create test models and check if fully supported by the service.
@@ -1116,7 +1213,7 @@
     }
 }
 
-TEST_F(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
+TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
     if (!mIsCachingSupported) return;
 
     // Create test models and check if fully supported by the service.
@@ -1164,11 +1261,19 @@
     }
 }
 
-class CompilationCachingSecurityTest : public CompilationCachingTest,
-                                       public ::testing::WithParamInterface<uint32_t> {
+static const auto kOperandTypeChoices =
+        ::testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
+
+class CompilationCachingSecurityTest
+    : public CompilationCachingTestBase,
+      public ::testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
   protected:
+    CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
+
     void SetUp() {
-        CompilationCachingTest::SetUp();
+        CompilationCachingTestBase::SetUp();
         generator.seed(kSeed);
     }
 
@@ -1254,7 +1359,7 @@
         }
     }
 
-    const uint32_t kSeed = GetParam();
+    const uint32_t kSeed = std::get<1>(GetParam());
     std::mt19937 generator;
 };
 
@@ -1302,7 +1407,7 @@
 }
 
 INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
-                        ::testing::Range(0U, 10U));
+                        ::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
 
 }  // namespace functional
 }  // namespace vts
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 386c141..8c6391e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@
 #include "Utils.h"
 
 #include <android-base/logging.h>
+#include <cstring>
 
 namespace android {
 namespace hardware {
@@ -38,7 +39,13 @@
 using ::android::nn::ResultChannelReceiver;
 using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
 
+// This constant value represents the length of an FMQ that is large enough to
+// return a result from a burst execution for all of the generated test cases.
 constexpr size_t kExecutionBurstChannelLength = 1024;
+
+// This constant value represents a length of an FMQ that is not large enough
+// to return a result from a burst execution for some of the generated test
+// cases.
 constexpr size_t kExecutionBurstChannelSmallLength = 8;
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -50,7 +57,8 @@
 static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurstCallback>& callback,
                         std::unique_ptr<RequestChannelSender>* sender,
                         std::unique_ptr<ResultChannelReceiver>* receiver,
-                        sp<IBurstContext>* context) {
+                        sp<IBurstContext>* context,
+                        size_t resultChannelLength = kExecutionBurstChannelLength) {
     ASSERT_NE(nullptr, preparedModel.get());
     ASSERT_NE(nullptr, sender);
     ASSERT_NE(nullptr, receiver);
@@ -60,7 +68,7 @@
     auto [fmqRequestChannel, fmqRequestDescriptor] =
             RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
     auto [fmqResultChannel, fmqResultDescriptor] =
-            ResultChannelReceiver::create(kExecutionBurstChannelLength, /*blocking=*/true);
+            ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
     ASSERT_NE(nullptr, fmqRequestChannel.get());
     ASSERT_NE(nullptr, fmqResultChannel.get());
     ASSERT_NE(nullptr, fmqRequestDescriptor);
@@ -86,38 +94,25 @@
 }
 
 static void createBurstWithResultChannelLength(
-        const sp<IPreparedModel>& preparedModel,
-        std::shared_ptr<ExecutionBurstController>* controller, size_t resultChannelLength) {
+        const sp<IPreparedModel>& preparedModel, size_t resultChannelLength,
+        std::shared_ptr<ExecutionBurstController>* controller) {
     ASSERT_NE(nullptr, preparedModel.get());
     ASSERT_NE(nullptr, controller);
 
     // create FMQ objects
-    auto [fmqRequestChannel, fmqRequestDescriptor] =
-            RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
-    auto [fmqResultChannel, fmqResultDescriptor] =
-            ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
-    ASSERT_NE(nullptr, fmqRequestChannel.get());
-    ASSERT_NE(nullptr, fmqResultChannel.get());
-    ASSERT_NE(nullptr, fmqRequestDescriptor);
-    ASSERT_NE(nullptr, fmqResultDescriptor);
-
-    // configure burst
+    std::unique_ptr<RequestChannelSender> sender;
+    std::unique_ptr<ResultChannelReceiver> receiver;
     sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
-    ErrorStatus errorStatus;
-    sp<IBurstContext> burstContext;
-    const Return<void> ret = preparedModel->configureExecutionBurst(
-            callback, *fmqRequestDescriptor, *fmqResultDescriptor,
-            [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
-                errorStatus = status;
-                burstContext = context;
-            });
-    ASSERT_TRUE(ret.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, errorStatus);
-    ASSERT_NE(nullptr, burstContext.get());
+    sp<IBurstContext> context;
+    ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context,
+                                        resultChannelLength));
+    ASSERT_NE(nullptr, sender.get());
+    ASSERT_NE(nullptr, receiver.get());
+    ASSERT_NE(nullptr, context.get());
 
     // return values
-    *controller = std::make_shared<ExecutionBurstController>(
-            std::move(fmqRequestChannel), std::move(fmqResultChannel), burstContext, callback);
+    *controller = std::make_shared<ExecutionBurstController>(std::move(sender), std::move(receiver),
+                                                             context, callback);
 }
 
 // Primary validation function. This function will take a valid serialized
@@ -138,7 +133,7 @@
     SCOPED_TRACE(message);
 
     // send invalid packet
-    sender->sendPacket(serialized);
+    ASSERT_TRUE(sender->sendPacket(serialized));
 
     // receive error
     auto results = receiver->getBlocking();
@@ -149,27 +144,34 @@
     EXPECT_TRUE(badTiming(timing));
 }
 
-static std::vector<FmqRequestDatum> createUniqueDatum() {
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// creates pre-set invalid packet entries for convenience.
+static std::vector<FmqRequestDatum> createBadRequestPacketEntries() {
     const FmqRequestDatum::PacketInformation packetInformation = {
             /*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10,
             /*.numberOfPools=*/10};
     const FmqRequestDatum::OperandInformation operandInformation = {
             /*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10};
     const int32_t invalidPoolIdentifier = std::numeric_limits<int32_t>::max();
-    std::vector<FmqRequestDatum> unique(7);
-    unique[0].packetInformation(packetInformation);
-    unique[1].inputOperandInformation(operandInformation);
-    unique[2].inputOperandDimensionValue(0);
-    unique[3].outputOperandInformation(operandInformation);
-    unique[4].outputOperandDimensionValue(0);
-    unique[5].poolIdentifier(invalidPoolIdentifier);
-    unique[6].measureTiming(MeasureTiming::YES);
-    return unique;
+    std::vector<FmqRequestDatum> bad(7);
+    bad[0].packetInformation(packetInformation);
+    bad[1].inputOperandInformation(operandInformation);
+    bad[2].inputOperandDimensionValue(0);
+    bad[3].outputOperandInformation(operandInformation);
+    bad[4].outputOperandDimensionValue(0);
+    bad[5].poolIdentifier(invalidPoolIdentifier);
+    bad[6].measureTiming(MeasureTiming::YES);
+    return bad;
 }
 
-static const std::vector<FmqRequestDatum>& getUniqueDatum() {
-    static const std::vector<FmqRequestDatum> unique = createUniqueDatum();
-    return unique;
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// retrieves pre-set invalid packet entries for convenience. This function
+// caches these data so they can be reused on subsequent validation checks.
+static const std::vector<FmqRequestDatum>& getBadRequestPacketEntries() {
+    static const std::vector<FmqRequestDatum> bad = createBadRequestPacketEntries();
+    return bad;
 }
 
 ///////////////////////// REMOVE DATUM ////////////////////////////////////
@@ -189,7 +191,7 @@
 
 static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
                          const std::vector<FmqRequestDatum>& serialized) {
-    const std::vector<FmqRequestDatum>& extra = getUniqueDatum();
+    const std::vector<FmqRequestDatum>& extra = getBadRequestPacketEntries();
     for (size_t index = 0; index <= serialized.size(); ++index) {
         for (size_t type = 0; type < extra.size(); ++type) {
             const std::string message = "addDatum: added datum type " + std::to_string(type) +
@@ -208,17 +210,17 @@
     using Discriminator = FmqRequestDatum::hidl_discriminator;
 
     const bool differentValues = (lhs != rhs);
-    const bool sameSumType = (lhs.getDiscriminator() == rhs.getDiscriminator());
+    const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator());
     const auto discriminator = rhs.getDiscriminator();
     const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue ||
                                    discriminator == Discriminator::outputOperandDimensionValue);
 
-    return differentValues && !(sameSumType && isDimensionValue);
+    return differentValues && !(sameDiscriminator && isDimensionValue);
 }
 
 static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
                             const std::vector<FmqRequestDatum>& serialized) {
-    const std::vector<FmqRequestDatum>& change = getUniqueDatum();
+    const std::vector<FmqRequestDatum>& change = getBadRequestPacketEntries();
     for (size_t index = 0; index < serialized.size(); ++index) {
         for (size_t type = 0; type < change.size(); ++type) {
             if (interestingCase(serialized[index], change[type])) {
@@ -251,17 +253,17 @@
     // validate each request
     for (const Request& request : requests) {
         // load memory into callback slots
-        std::vector<intptr_t> keys(request.pools.size());
-        for (size_t i = 0; i < keys.size(); ++i) {
-            keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
-        }
+        std::vector<intptr_t> keys;
+        keys.reserve(request.pools.size());
+        std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+                       [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
         const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
 
         // ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
         // subsequent slot validation testing)
-        const auto maxElement = std::max_element(slots.begin(), slots.end());
-        ASSERT_NE(slots.end(), maxElement);
-        ASSERT_NE(std::numeric_limits<int32_t>::max(), *maxElement);
+        ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
+            return slot != std::numeric_limits<int32_t>::max();
+        }));
 
         // serialize the request
         const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
@@ -273,18 +275,20 @@
     }
 }
 
+// This test validates that when the Result message size exceeds length of the
+// result FMQ, the service instance gracefully fails and returns an error.
 static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
                                    const std::vector<Request>& requests) {
     // create regular burst
     std::shared_ptr<ExecutionBurstController> controllerRegular;
-    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerRegular,
-                                                               kExecutionBurstChannelLength));
+    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+            preparedModel, kExecutionBurstChannelLength, &controllerRegular));
     ASSERT_NE(nullptr, controllerRegular.get());
 
     // create burst with small output channel
     std::shared_ptr<ExecutionBurstController> controllerSmall;
-    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerSmall,
-                                                               kExecutionBurstChannelSmallLength));
+    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+            preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
     ASSERT_NE(nullptr, controllerSmall.get());
 
     // validate each request
@@ -296,24 +300,103 @@
         }
 
         // collect serialized result by running regular burst
-        const auto [status1, outputShapes1, timing1] =
+        const auto [statusRegular, outputShapesRegular, timingRegular] =
                 controllerRegular->compute(request, MeasureTiming::NO, keys);
 
-        // skip test if synchronous output isn't useful
+        // skip test if regular burst output isn't useful for testing a failure
+        // caused by having too small of a length for the result FMQ
         const std::vector<FmqResultDatum> serialized =
-                ::android::nn::serialize(status1, outputShapes1, timing1);
-        if (status1 != ErrorStatus::NONE ||
+                ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+        if (statusRegular != ErrorStatus::NONE ||
             serialized.size() <= kExecutionBurstChannelSmallLength) {
             continue;
         }
 
         // by this point, execution should fail because the result channel isn't
         // large enough to return the serialized result
-        const auto [status2, outputShapes2, timing2] =
+        const auto [statusSmall, outputShapesSmall, timingSmall] =
                 controllerSmall->compute(request, MeasureTiming::NO, keys);
-        EXPECT_NE(ErrorStatus::NONE, status2);
-        EXPECT_EQ(0u, outputShapes2.size());
-        EXPECT_TRUE(badTiming(timing2));
+        EXPECT_NE(ErrorStatus::NONE, statusSmall);
+        EXPECT_EQ(0u, outputShapesSmall.size());
+        EXPECT_TRUE(badTiming(timingSmall));
+    }
+}
+
+static bool isSanitized(const FmqResultDatum& datum) {
+    using Discriminator = FmqResultDatum::hidl_discriminator;
+
+    // check to ensure the padding values in the returned
+    // FmqResultDatum::OperandInformation are initialized to 0
+    if (datum.getDiscriminator() == Discriminator::operandInformation) {
+        static_assert(
+                offsetof(FmqResultDatum::OperandInformation, isSufficient) == 0,
+                "unexpected value for offset of FmqResultDatum::OperandInformation::isSufficient");
+        static_assert(
+                sizeof(FmqResultDatum::OperandInformation::isSufficient) == 1,
+                "unexpected value for size of FmqResultDatum::OperandInformation::isSufficient");
+        static_assert(offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) == 4,
+                      "unexpected value for offset of "
+                      "FmqResultDatum::OperandInformation::numberOfDimensions");
+        static_assert(sizeof(FmqResultDatum::OperandInformation::numberOfDimensions) == 4,
+                      "unexpected value for size of "
+                      "FmqResultDatum::OperandInformation::numberOfDimensions");
+        static_assert(sizeof(FmqResultDatum::OperandInformation) == 8,
+                      "unexpected value for size of "
+                      "FmqResultDatum::OperandInformation");
+
+        constexpr size_t paddingOffset =
+                offsetof(FmqResultDatum::OperandInformation, isSufficient) +
+                sizeof(FmqResultDatum::OperandInformation::isSufficient);
+        constexpr size_t paddingSize =
+                offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) - paddingOffset;
+
+        FmqResultDatum::OperandInformation initialized{};
+        std::memset(&initialized, 0, sizeof(initialized));
+
+        const char* initializedPaddingStart =
+                reinterpret_cast<const char*>(&initialized) + paddingOffset;
+        const char* datumPaddingStart =
+                reinterpret_cast<const char*>(&datum.operandInformation()) + paddingOffset;
+
+        return std::memcmp(datumPaddingStart, initializedPaddingStart, paddingSize) == 0;
+    }
+
+    // there are no other padding initialization checks required, so return true
+    // for any sum-type that isn't FmqResultDatum::OperandInformation
+    return true;
+}
+
+static void validateBurstSanitized(const sp<IPreparedModel>& preparedModel,
+                                   const std::vector<Request>& requests) {
+    // create burst
+    std::unique_ptr<RequestChannelSender> sender;
+    std::unique_ptr<ResultChannelReceiver> receiver;
+    sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+    sp<IBurstContext> context;
+    ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context));
+    ASSERT_NE(nullptr, sender.get());
+    ASSERT_NE(nullptr, receiver.get());
+    ASSERT_NE(nullptr, context.get());
+
+    // validate each request
+    for (const Request& request : requests) {
+        // load memory into callback slots
+        std::vector<intptr_t> keys;
+        keys.reserve(request.pools.size());
+        std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+                       [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+        const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+
+        // send valid request
+        ASSERT_TRUE(sender->send(request, MeasureTiming::YES, slots));
+
+        // receive valid result
+        auto serialized = receiver->getPacketBlocking();
+        ASSERT_TRUE(serialized.has_value());
+
+        // sanitize result
+        ASSERT_TRUE(std::all_of(serialized->begin(), serialized->end(), isSanitized))
+                << "The result serialized data is not properly sanitized";
     }
 }
 
@@ -323,6 +406,7 @@
                                    const std::vector<Request>& requests) {
     ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, requests));
     ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, requests));
+    ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, requests));
 }
 
 }  // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 93182f1..4ddefe8 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -29,7 +29,6 @@
 namespace vts {
 namespace functional {
 
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
 using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 using V1_1::ExecutionPreference;
@@ -127,7 +126,7 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
     validateModel(model);
 
     // create IPreparedModel
@@ -137,8 +136,8 @@
         return;
     }
 
-    validateRequests(preparedModel, request);
-    validateBurst(preparedModel, request);
+    validateRequests(preparedModel, requests);
+    validateBurst(preparedModel, requests);
 }
 
 sp<IPreparedModel> getPreparedModel_1_2(
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 36e73a4..8d1acbe 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -72,7 +72,7 @@
 // Tag for the validation tests
 class ValidationTest : public NeuralnetworksHidlTest {
    protected:
-     void validateEverything(const Model& model, const std::vector<Request>& request);
+     void validateEverything(const Model& model, const std::vector<Request>& requests);
 
    private:
      void validateModel(const Model& model);