Merge "Camera: Allow out-of-order ZSL shutter notify" into qt-dev
diff --git a/automotive/OWNERS b/automotive/OWNERS
index 4a94494..3cf4489 100644
--- a/automotive/OWNERS
+++ b/automotive/OWNERS
@@ -1,3 +1,4 @@
 randolphs@google.com
 pirozzoj@google.com
 twasilczyk@google.com
+pfg@google.com
diff --git a/automotive/evs/1.0/vts/functional/FormatConvert.cpp b/automotive/evs/1.0/vts/functional/FormatConvert.cpp
index 1e8929d..3d82d32 100644
--- a/automotive/evs/1.0/vts/functional/FormatConvert.cpp
+++ b/automotive/evs/1.0/vts/functional/FormatConvert.cpp
@@ -38,7 +38,8 @@
 }
 
 
-static uint32_t yuvToRgbx(const unsigned char Y, const unsigned char Uin, const unsigned char Vin) {
+static uint32_t yuvToRgbx(const unsigned char Y, const unsigned char Uin, const unsigned char Vin,
+                          bool bgrxFormat = false) {
     // Don't use this if you want to see the best performance.  :)
     // Better to do this in a pixel shader if we really have to, but on actual
     // embedded hardware we expect to be able to texture directly from the YUV data
@@ -52,16 +53,24 @@
     unsigned char G = (unsigned char)clamp(Gf, 0.0f, 255.0f);
     unsigned char B = (unsigned char)clamp(Bf, 0.0f, 255.0f);
 
-    return (R      ) |
-           (G <<  8) |
-           (B << 16) |
-           0xFF000000;  // Fill the alpha channel with ones
+    if (!bgrxFormat) {
+        return (R      ) |
+               (G <<  8) |
+               (B << 16) |
+               0xFF000000;  // Fill the alpha channel with ones
+    } else {
+        return (R << 16) |
+               (G <<  8) |
+               (B      ) |
+               0xFF000000;  // Fill the alpha channel with ones
+    }
 }
 
 
 void copyNV21toRGB32(unsigned width, unsigned height,
                      uint8_t* src,
-                     uint32_t* dst, unsigned dstStridePixels)
+                     uint32_t* dst, unsigned dstStridePixels,
+                     bool bgrxFormat)
 {
     // The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
     // U/V array.  It assumes an even width and height for the overall image, and a horizontal
@@ -84,7 +93,7 @@
         for (unsigned c = 0; c < width; c++) {
             unsigned uCol = (c & ~1);   // uCol is always even and repeats 1:2 with Y values
             unsigned vCol = uCol | 1;   // vCol is always odd
-            rowDest[c] = yuvToRgbx(rowY[c], rowUV[uCol], rowUV[vCol]);
+            rowDest[c] = yuvToRgbx(rowY[c], rowUV[uCol], rowUV[vCol], bgrxFormat);
         }
     }
 }
@@ -92,7 +101,8 @@
 
 void copyYV12toRGB32(unsigned width, unsigned height,
                      uint8_t* src,
-                     uint32_t* dst, unsigned dstStridePixels)
+                     uint32_t* dst, unsigned dstStridePixels,
+                     bool bgrxFormat)
 {
     // The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
     // by another 1/2 x 1/2 V array.  It assumes an even width and height for the overall image,
@@ -118,7 +128,7 @@
         uint32_t* rowDest = dst + r*dstStridePixels;
 
         for (unsigned c = 0; c < width; c++) {
-            rowDest[c] = yuvToRgbx(rowY[c], rowU[c], rowV[c]);
+            rowDest[c] = yuvToRgbx(rowY[c], rowU[c], rowV[c], bgrxFormat);
         }
     }
 }
@@ -126,7 +136,8 @@
 
 void copyYUYVtoRGB32(unsigned width, unsigned height,
                      uint8_t* src, unsigned srcStridePixels,
-                     uint32_t* dst, unsigned dstStridePixels)
+                     uint32_t* dst, unsigned dstStridePixels,
+                     bool bgrxFormat)
 {
     uint32_t* srcWords = (uint32_t*)src;
 
@@ -144,8 +155,8 @@
             uint8_t V  = (srcPixel >> 24) & 0xFF;
 
             // On the RGB output, we're writing one pixel at a time
-            *(dst+0) = yuvToRgbx(Y1, U, V);
-            *(dst+1) = yuvToRgbx(Y2, U, V);
+            *(dst+0) = yuvToRgbx(Y1, U, V, bgrxFormat);
+            *(dst+1) = yuvToRgbx(Y2, U, V, bgrxFormat);
             dst += 2;
         }
 
@@ -156,6 +167,30 @@
 }
 
 
+void copyNV21toBGR32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    return copyNV21toRGB32(width, height, src, dst, dstStridePixels, true);
+}
+
+
+void copyYV12toBGR32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    return copyYV12toRGB32(width, height, src, dst, dstStridePixels, true);
+}
+
+
+void copyYUYVtoBGR32(unsigned width, unsigned height,
+                     uint8_t* src, unsigned srcStridePixels,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    return copyYUYVtoRGB32(width, height, src, srcStridePixels, dst, dstStridePixels, true);
+}
+
+
 void copyMatchedInterleavedFormats(unsigned width, unsigned height,
                                    void* src, unsigned srcStridePixels,
                                    void* dst, unsigned dstStridePixels,
diff --git a/automotive/evs/1.0/vts/functional/FormatConvert.h b/automotive/evs/1.0/vts/functional/FormatConvert.h
index 3ff1eec..4a94f99 100644
--- a/automotive/evs/1.0/vts/functional/FormatConvert.h
+++ b/automotive/evs/1.0/vts/functional/FormatConvert.h
@@ -21,31 +21,45 @@
 #include <stdint.h>
 
 
-// Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx values.
-// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+// Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx/BGRx
+// values.  The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
 // U/V array.  It assumes an even width and height for the overall image, and a horizontal
 // stride that is an even multiple of 16 bytes for both the Y and UV arrays.
 void copyNV21toRGB32(unsigned width, unsigned height,
                      uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels,
+                     bool bgrxFormat = false);
+
+void copyNV21toBGR32(unsigned width, unsigned height,
+                     uint8_t* src,
                      uint32_t* dst, unsigned dstStridePixels);
 
 
-// Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx values.
+// Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx/BGRx values.
 // The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
 // by another 1/2 x 1/2 V array.  It assumes an even width and height for the overall image,
 // and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
 // and V arrays.
 void copyYV12toRGB32(unsigned width, unsigned height,
                      uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels,
+                     bool bgrxFormat = false);
+
+void copyYV12toBGR32(unsigned width, unsigned height,
+                     uint8_t* src,
                      uint32_t* dst, unsigned dstStridePixels);
 
-
-// Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx values.
-// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+// Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx/BGRx
+// values.  The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
 // U/V array.  It assumes an even width and height for the overall image, and a horizontal
 // stride that is an even multiple of 16 bytes for both the Y and UV arrays.
 void copyYUYVtoRGB32(unsigned width, unsigned height,
                      uint8_t* src, unsigned srcStrideBytes,
+                     uint32_t* dst, unsigned dstStrideBytes,
+                     bool bgrxFormat = false);
+
+void copyYUYVtoBGR32(unsigned width, unsigned height,
+                     uint8_t* src, unsigned srcStrideBytes,
                      uint32_t* dst, unsigned dstStrideBytes);
 
 
diff --git a/automotive/evs/1.0/vts/functional/FrameHandler.cpp b/automotive/evs/1.0/vts/functional/FrameHandler.cpp
index a69f72b..d44ba41 100644
--- a/automotive/evs/1.0/vts/functional/FrameHandler.cpp
+++ b/automotive/evs/1.0/vts/functional/FrameHandler.cpp
@@ -231,16 +231,12 @@
     uint8_t* srcPixels = nullptr;
     src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void**)&srcPixels);
 
-    // Lock our target buffer for writing (should be RGBA8888 format)
+    // Lock our target buffer for writing (should be either RGBA8888 or BGRA8888 format)
     uint32_t* tgtPixels = nullptr;
     tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
 
     if (srcPixels && tgtPixels) {
-        if (tgtBuffer.format != HAL_PIXEL_FORMAT_RGBA_8888) {
-            // We always expect 32 bit RGB for the display output for now.  Is there a need for 565?
-            ALOGE("Diplay buffer is always expected to be 32bit RGBA");
-            success = false;
-        } else {
+        if (tgtBuffer.format == HAL_PIXEL_FORMAT_RGBA_8888) {
             if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) {   // 420SP == NV21
                 copyNV21toRGB32(width, height,
                                 srcPixels,
@@ -258,7 +254,36 @@
                                               srcPixels, srcBuffer.stride,
                                               tgtPixels, tgtBuffer.stride,
                                               tgtBuffer.pixelSize);
+            } else {
+                ALOGE("Camera buffer format is not supported");
+                success = false;
             }
+        } else if (tgtBuffer.format == HAL_PIXEL_FORMAT_BGRA_8888) {
+            if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) {   // 420SP == NV21
+                copyNV21toBGR32(width, height,
+                                srcPixels,
+                                tgtPixels, tgtBuffer.stride);
+            } else if (srcBuffer.format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+                copyYV12toBGR32(width, height,
+                                srcPixels,
+                                tgtPixels, tgtBuffer.stride);
+            } else if (srcBuffer.format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+                copyYUYVtoBGR32(width, height,
+                                srcPixels, srcBuffer.stride,
+                                tgtPixels, tgtBuffer.stride);
+            } else if (srcBuffer.format == tgtBuffer.format) {  // 32bit RGBA
+                copyMatchedInterleavedFormats(width, height,
+                                              srcPixels, srcBuffer.stride,
+                                              tgtPixels, tgtBuffer.stride,
+                                              tgtBuffer.pixelSize);
+            } else {
+                ALOGE("Camera buffer format is not supported");
+                success = false;
+            }
+        } else {
+            // We always expect 32 bit RGB for the display output for now.  Is there a need for 565?
+            ALOGE("Diplay buffer is always expected to be 32bit RGBA");
+            success = false;
         }
     } else {
         ALOGE("Failed to lock buffer contents for contents transfer");
diff --git a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
index 258dbd9..7082566 100644
--- a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
+++ b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
@@ -105,6 +105,24 @@
     std::vector<VmsAssociatedLayer> associated_layers;
 };
 
+// An enum to represent the result of parsing START_SESSION message from the VMS service.
+enum VmsSessionStatus {
+    // New server session is received if the new client ID is -1 and the new server ID is not an
+    // invalid ID.
+    kNewServerSession,
+    // Ack to new client session is received if the new client ID is same as the old one and the new
+    // server ID is not an invalid ID.
+    kAckToNewClientSession,
+    // Error codes:
+    // Invalid message with either invalid format or unexpected data.
+    kInvalidMessage,
+    // Invalid server ID. New ID should always be greater than or equal to max_of(0, current server
+    // ID)
+    kInvalidServiceId,
+    // Invalid client ID. New ID should always be either -1 or the current client ID.
+    kInvalidClientId
+};
+
 // Creates an empty base VMS message with some pre-populated default fields.
 std::unique_ptr<VehiclePropValue> createBaseVmsMessage(size_t message_size);
 
@@ -146,11 +164,21 @@
 // Creates a VehiclePropValue containing a message of type VmsMessageType.DATA.
 // Returns a nullptr if the byte string in bytes is empty.
 //
-// For example, to build a VehiclePropMessage containing a proto, the caller
+// For example, to build a VehiclePropValue message containing a proto, the caller
 // should convert the proto to a byte string using the SerializeToString proto
 // API, then use this inteface to build the VehicleProperty.
 std::unique_ptr<VehiclePropValue> createDataMessage(const std::string& bytes);
 
+// Creates a VehiclePropValue containing a message of type
+// VmsMessageType.PUBLISHER_ID_REQUEST with the given publisher information.
+// Returns a nullptr if the input is empty.
+std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
+        const std::string& vms_provider_description);
+
+// Creates a VehiclePropValue message of type VmsMessageType.START_SESSION.
+std::unique_ptr<VehiclePropValue> createStartSessionMessage(const int service_id,
+                                                            const int client_id);
+
 // Returns true if the VehiclePropValue pointed to by value contains a valid Vms
 // message, i.e. the VehicleProperty, VehicleArea, and VmsMessageType are all
 // valid. Note: If the VmsMessageType enum is extended, this function will
@@ -169,12 +197,6 @@
 // function to ParseFromString.
 std::string parseData(const VehiclePropValue& value);
 
-// Creates a VehiclePropValue containing a message of type
-// VmsMessageType.PUBLISHER_ID_REQUEST with the given publisher information.
-// Returns a nullptr if the input is empty.
-std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
-        const std::string& vms_provider_description);
-
 // Returns the publisher ID by parsing the VehiclePropValue containing the ID.
 // Returns null if the message is invalid.
 int32_t parsePublisherIdResponse(const VehiclePropValue& publisher_id_response);
@@ -204,6 +226,12 @@
 // has newly started or restarted.
 bool hasServiceNewlyStarted(const VehiclePropValue& availability_change);
 
+// Takes a start session message, current service ID, current client ID; and returns the type/status
+// of the message. It also populates the new service ID with the correct value.
+VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session,
+                                          const int service_id, const int client_id,
+                                          int* new_service_id);
+
 }  // namespace vms
 }  // namespace V2_0
 }  // namespace vehicle
diff --git a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
index 1863191..111f6ea 100644
--- a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
+++ b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
@@ -31,6 +31,7 @@
 static constexpr int kLayerNumberSize = 1;
 static constexpr int kLayerSize = 3;
 static constexpr int kLayerAndPublisherSize = 4;
+static constexpr int kSessionIdsSize = 2;
 static constexpr int kPublisherIdIndex =
         toInt(VmsPublisherInformationIntegerValuesIndex::PUBLISHER_ID);
 static constexpr int kSubscriptionStateSequenceNumberIndex =
@@ -41,9 +42,9 @@
 // TODO(aditin): We should extend the VmsMessageType enum to include a first and
 // last, which would prevent breakages in this API. However, for all of the
 // functions in this module, we only need to guarantee that the message type is
-// between SUBSCRIBE and PUBLISHER_ID_RESPONSE.
+// between SUBSCRIBE and START_SESSION.
 static constexpr int kFirstMessageType = toInt(VmsMessageType::SUBSCRIBE);
-static constexpr int kLastMessageType = toInt(VmsMessageType::PUBLISHER_ID_RESPONSE);
+static constexpr int kLastMessageType = toInt(VmsMessageType::START_SESSION);
 
 std::unique_ptr<VehiclePropValue> createBaseVmsMessage(size_t message_size) {
     auto result = createVehiclePropValue(VehiclePropertyType::INT32, message_size);
@@ -132,6 +133,28 @@
     return result;
 }
 
+std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
+        const std::string& vms_provider_description) {
+    auto result = createBaseVmsMessage(kMessageTypeSize);
+    result->value.int32Values = hidl_vec<int32_t>{
+            toInt(VmsMessageType::PUBLISHER_ID_REQUEST),
+    };
+    result->value.bytes =
+            std::vector<uint8_t>(vms_provider_description.begin(), vms_provider_description.end());
+    return result;
+}
+
+std::unique_ptr<VehiclePropValue> createStartSessionMessage(const int service_id,
+                                                            const int client_id) {
+    auto result = createBaseVmsMessage(kMessageTypeSize + kSessionIdsSize);
+    result->value.int32Values = hidl_vec<int32_t>{
+            toInt(VmsMessageType::START_SESSION),
+            service_id,
+            client_id,
+    };
+    return result;
+}
+
 bool isValidVmsProperty(const VehiclePropValue& value) {
     return (value.prop == toInt(VehicleProperty::VEHICLE_MAP_SERVICE));
 }
@@ -159,17 +182,6 @@
     }
 }
 
-std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
-        const std::string& vms_provider_description) {
-    auto result = createBaseVmsMessage(kMessageTypeSize);
-    result->value.int32Values = hidl_vec<int32_t>{
-            toInt(VmsMessageType::PUBLISHER_ID_REQUEST),
-    };
-    result->value.bytes =
-            std::vector<uint8_t>(vms_provider_description.begin(), vms_provider_description.end());
-    return result;
-}
-
 int32_t parsePublisherIdResponse(const VehiclePropValue& publisher_id_response) {
     if (isValidVmsMessage(publisher_id_response) &&
         parseMessageType(publisher_id_response) == VmsMessageType::PUBLISHER_ID_RESPONSE &&
@@ -256,6 +268,31 @@
             availability_change.value.int32Values[kAvailabilitySequenceNumberIndex] == 0);
 }
 
+VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session,
+                                          const int service_id, const int client_id,
+                                          int* new_service_id) {
+    if (isValidVmsMessage(start_session) &&
+        parseMessageType(start_session) == VmsMessageType::START_SESSION &&
+        start_session.value.int32Values.size() == kSessionIdsSize + 1) {
+        *new_service_id = start_session.value.int32Values[1];
+        const int new_client_id = start_session.value.int32Values[2];
+        if (*new_service_id < std::max(0, service_id)) {
+            *new_service_id = service_id;
+            return VmsSessionStatus::kInvalidServiceId;
+        }
+        if (new_client_id == -1) {
+            return VmsSessionStatus::kNewServerSession;
+        }
+        if (new_client_id == client_id) {
+            return VmsSessionStatus::kAckToNewClientSession;
+        }
+        *new_service_id = service_id;
+        return VmsSessionStatus::kInvalidClientId;
+    }
+    *new_service_id = service_id;
+    return VmsSessionStatus::kInvalidMessage;
+}
+
 }  // namespace vms
 }  // namespace V2_0
 }  // namespace vehicle
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index 08cdffa..39fe991 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -530,6 +530,7 @@
     {.config = {.prop = toInt(VehicleProperty::HVAC_TEMPERATURE_DISPLAY_UNITS),
                 .access = VehiclePropertyAccess::READ_WRITE,
                 .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+                .configArray = {(int)VehicleUnit::FAHRENHEIT, (int)VehicleUnit::CELSIUS},
                 .areaConfigs = {VehicleAreaConfig{.areaId = (0)}}},
      .initialValue = {.int32Values = {(int)VehicleUnit::FAHRENHEIT}}},
 
diff --git a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
index 5ea5bd4..2b3efc7 100644
--- a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
+++ b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
@@ -158,7 +158,7 @@
 TEST(VmsUtilsTest, invalidMessageType) {
     VmsLayer layer(1, 0, 2);
     auto message = createSubscribeMessage(layer);
-    message->value.int32Values[0] = 0;
+    message->value.int32Values[0] = -1;
 
     EXPECT_FALSE(isValidVmsMessage(*message));
 }
@@ -325,6 +325,98 @@
     EXPECT_FALSE(hasServiceNewlyStarted(*message));
 }
 
+TEST(VmsUtilsTest, startSessionRequest) {
+    auto message = createStartSessionMessage(123, 456);
+    ASSERT_NE(message, nullptr);
+    EXPECT_TRUE(isValidVmsMessage(*message));
+    EXPECT_EQ(message->prop, toInt(VehicleProperty::VEHICLE_MAP_SERVICE));
+    EXPECT_EQ(message->value.int32Values.size(), 0x3ul);
+    EXPECT_EQ(parseMessageType(*message), VmsMessageType::START_SESSION);
+    EXPECT_EQ(message->value.int32Values[1], 123);
+    EXPECT_EQ(message->value.int32Values[2], 456);
+}
+
+TEST(VmsUtilsTest, startSessionServiceNewlyStarted) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, -1};
+    EXPECT_EQ(parseStartSessionMessage(*message, 122, 456, &new_service_id),
+              VmsSessionStatus::kNewServerSession);
+    EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionServiceNewlyStartedEdgeCase) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 0, -1};
+    EXPECT_EQ(parseStartSessionMessage(*message, -1, 0, &new_service_id),
+              VmsSessionStatus::kNewServerSession);
+    EXPECT_EQ(new_service_id, 0);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStarted) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 456};
+    EXPECT_EQ(parseStartSessionMessage(*message, -1, 456, &new_service_id),
+              VmsSessionStatus::kAckToNewClientSession);
+    EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStartedWithSameServerId) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 456};
+    EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+              VmsSessionStatus::kAckToNewClientSession);
+    EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStartedEdgeCase) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 0, 0};
+    EXPECT_EQ(parseStartSessionMessage(*message, 0, 0, &new_service_id),
+              VmsSessionStatus::kAckToNewClientSession);
+    EXPECT_EQ(new_service_id, 0);
+}
+
+TEST(VmsUtilsTest, startSessionOldServiceId) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 120, 456};
+    EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+              VmsSessionStatus::kInvalidServiceId);
+    EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidServiceIdEdgeCase) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), -1, 456};
+    EXPECT_EQ(parseStartSessionMessage(*message, -1, 456, &new_service_id),
+              VmsSessionStatus::kInvalidServiceId);
+    EXPECT_EQ(new_service_id, -1);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidClientId) {
+    auto message = createBaseVmsMessage(3);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 457};
+    EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+              VmsSessionStatus::kInvalidClientId);
+    EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidMessageFormat) {
+    auto message = createBaseVmsMessage(2);
+    int new_service_id;
+    message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123};
+    EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+              VmsSessionStatus::kInvalidMessage);
+    EXPECT_EQ(new_service_id, 123);
+}
+
 }  // namespace
 
 }  // namespace vms
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index b04d096..e36468a 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -1120,9 +1120,9 @@
      *
      * Distance units are defined in VehicleUnit.
      * VehiclePropConfig.configArray is used to indicate the supported distance display units.
-     * For example: configArray[0] = 0x21 // METER
-     *              configArray[1] = 0x23 // KILOMETER
-     *              configArray[2] = 0x24 // MILE
+     * For example: configArray[0] = METER
+     *              configArray[1] = KILOMETER
+     *              configArray[2] = MILE
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ_WRITE
      * @data_enum VehicleUnit
@@ -1141,8 +1141,8 @@
      *
      * VehiclePropConfig.configArray is used to indicate the supported fuel volume display units.
      * Volume units are defined in VehicleUnit.
-     * For example: configArray[0] = 0x41 // LITER
-     *              configArray[1] = 0x42 // GALLON
+     * For example: configArray[0] = LITER
+     *              configArray[1] = GALLON
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ_WRITE
      * @data_enum VehicleUnit
@@ -1161,9 +1161,9 @@
      *
      * VehiclePropConfig.configArray is used to indicate the supported pressure display units.
      * Pressure units are defined in VehicleUnit.
-     * For example: configArray[0] = 0x70 // KILOPASCAL
-     *              configArray[1] = 0x71 // PSI
-     *              configArray[2] = 0x72 // BAR
+     * For example: configArray[0] = KILOPASCAL
+     *              configArray[1] = PSI
+     *              configArray[2] = BAR
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ_WRITE
      * @data_enum VehicleUnit
@@ -1182,9 +1182,9 @@
      *
      * VehiclePropConfig.configArray is used to indicate the supported electrical energy units.
      * Electrical energy units are defined in VehicleUnit.
-     * For example: configArray[0] = 0x60 // watt-hours
-     *              configArray[1] = 0x64 // ampere-hours
-     *              configArray[2] = 0x65 // kilowatt-hours
+     * For example: configArray[0] = WATT_HOUR
+     *              configArray[1] = AMPERE_HOURS
+     *              configArray[2] = KILOWATT_HOUR
      * @change_mode VehiclePropertyChangeMode:ON_CHANGE
      * @access VehiclePropertyAccess:READ_WRITE
      * @data_enum VehicleUnit
@@ -1212,6 +1212,25 @@
         | VehicleArea:GLOBAL),
 
     /**
+     * Speed units for display
+     *
+     * Indicates type of units the car is using to display speed to user. Eg. m/s, km/h, or mph.
+     *
+     * VehiclePropConfig.configArray is used to indicate the supported speed display units.
+     * Pressure units are defined in VehicleUnit.
+     * For example: configArray[0] = METER_PER_SEC
+     *              configArray[1] = MILES_PER_HOUR
+     *              configArray[2] = KILOMETERS_PER_HOUR
+     * @change_mode VehiclePropertyChangeMode:ON_CHANGE
+     * @access VehiclePropertyAccess:READ_WRITE
+     */
+    VEHICLE_SPEED_DISPLAY_UNITS = (
+        0x0605
+        | VehiclePropertyGroup:SYSTEM
+        | VehiclePropertyType:INT32
+        | VehicleArea:GLOBAL),
+
+    /**
      * Outside temperature
      *
      * @change_mode VehiclePropertyChangeMode:CONTINUOUS
@@ -2586,42 +2605,45 @@
  * Units used for int or float type with no attached enum types.
  */
 enum VehicleUnit : int32_t {
-    SHOULD_NOT_USE = 0x000,
+    SHOULD_NOT_USE      = 0x000,
 
-    METER_PER_SEC  = 0x01,
-    RPM            = 0x02,
-    HERTZ          = 0x03,
-    PERCENTILE     = 0x10,
-    MILLIMETER     = 0x20,
-    METER          = 0x21,
-    KILOMETER      = 0x23,
-    MILE           = 0x24,
-    CELSIUS        = 0x30,
-    FAHRENHEIT     = 0x31,
-    KELVIN         = 0x32,
-    MILLILITER     = 0x40,
-    LITER          = 0x41,
+    METER_PER_SEC       = 0x01,
+    RPM                 = 0x02,
+    HERTZ               = 0x03,
+    PERCENTILE          = 0x10,
+    MILLIMETER          = 0x20,
+    METER               = 0x21,
+    KILOMETER           = 0x23,
+    MILE                = 0x24,
+    CELSIUS             = 0x30,
+    FAHRENHEIT          = 0x31,
+    KELVIN              = 0x32,
+    MILLILITER          = 0x40,
+    LITER               = 0x41,
 
     /** deprecated. Use US_GALLON instead. */
-    GALLON         = 0x42,
-    US_GALLON      = 0x42,
-    IMPERIAL_GALLON= 0x43,
-    NANO_SECS      = 0x50,
-    SECS           = 0x53,
-    YEAR           = 0x59,
+    GALLON              = 0x42,
+    US_GALLON           = 0x42,
+    IMPERIAL_GALLON     = 0x43,
+    NANO_SECS           = 0x50,
+    SECS                = 0x53,
+    YEAR                = 0x59,
 
     // Electrical Units
-    WATT_HOUR      = 0x60,
-    MILLIAMPERE    = 0x61,
-    MILLIVOLT      = 0x62,
-    MILLIWATTS     = 0x63,
-    AMPERE_HOURS   = 0x64,
-    KILOWATT_HOUR  = 0x65,
+    WATT_HOUR           = 0x60,
+    MILLIAMPERE         = 0x61,
+    MILLIVOLT           = 0x62,
+    MILLIWATTS          = 0x63,
+    AMPERE_HOURS        = 0x64,
+    KILOWATT_HOUR       = 0x65,
 
-    KILOPASCAL     = 0x70,
-    PSI            = 0x71,
-    BAR            = 0x72,
-    DEGREES        = 0x80,
+    KILOPASCAL          = 0x70,
+    PSI                 = 0x71,
+    BAR                 = 0x72,
+    DEGREES             = 0x80,
+
+    MILES_PER_HOUR      = 0x90,
+    KILOMETERS_PER_HOUR = 0x91,
 };
 
 /**
@@ -3249,6 +3271,16 @@
  */
 enum VmsMessageType : int32_t {
     /**
+     * A notification indicating that the sender has been reset.
+     *
+     * The receiving party must reset its internal state and respond to the
+     * sender with a START_SESSION message as acknowledgement.
+     *
+     * This message type uses enum VmsStartSessionMessageIntegerValuesIndex.
+     */
+    START_SESSION = 17,
+
+    /**
      * A request from the subscribers to the VMS service to subscribe to a layer.
      *
      * This message type uses enum VmsMessageWithLayerIntegerValuesIndex.
@@ -3364,7 +3396,7 @@
      */
     PUBLISHER_INFORMATION_RESPONSE = 16,
 
-    LAST_VMS_MESSAGE_TYPE = PUBLISHER_INFORMATION_RESPONSE,
+    LAST_VMS_MESSAGE_TYPE = START_SESSION,
 };
 
 /**
@@ -3378,6 +3410,30 @@
 };
 
 /*
+ * Handshake data sent as part of a VmsMessageType.START_SESSION message.
+ *
+ * A new session is initiated by sending a START_SESSION message with the
+ * sender's identifier populated and the receiver's identifier set to -1.
+ *
+ * Identifier values are independently generated, but must be non-negative, and
+ * increase monotonically between reboots.
+ *
+ * Upon receiving a START_SESSION with a mis-matching identifier, the receiver
+ * must clear any cached VMS offering or subscription state and acknowledge the
+ * new session by responding with a START_SESSION message that populates both
+ * identifier fields.
+ *
+ * Any VMS messages received between initiation and completion of the handshake
+ * must be discarded.
+ */
+enum VmsStartSessionMessageIntegerValuesIndex : VmsBaseMessageIntegerValuesIndex {
+    /* Identifier field for the Android system service. */
+    SERVICE_ID = 1,
+    /* Identifier field for the HAL client process. */
+    CLIENT_ID = 2,
+};
+
+/*
  * A VMS message with a layer is sent as part of a VmsMessageType.SUBSCRIBE or
  * VmsMessageType.UNSUBSCRIBE messages.
  *
diff --git a/broadcastradio/common/tests/Android.bp b/broadcastradio/common/tests/Android.bp
index ef8733c..0ace941 100644
--- a/broadcastradio/common/tests/Android.bp
+++ b/broadcastradio/common/tests/Android.bp
@@ -58,6 +58,7 @@
         "android.hardware.broadcastradio@common-utils-2x-lib",
     ],
     shared_libs: [
+        "libhidlbase",
         "android.hardware.broadcastradio@2.0",
     ],
     test_suites: ["general-tests"],
diff --git a/camera/device/3.2/default/CameraDeviceSession.cpp b/camera/device/3.2/default/CameraDeviceSession.cpp
index f2d7a47..99cdccb 100644
--- a/camera/device/3.2/default/CameraDeviceSession.cpp
+++ b/camera/device/3.2/default/CameraDeviceSession.cpp
@@ -99,11 +99,20 @@
         return true;
     }
 
-    int32_t reqFMQSize = property_get_int32("ro.camera.req.fmq.size", /*default*/-1);
+    // "ro.camera" properties are no longer supported on vendor side.
+    //  Support a fall back for the fmq size override that uses "ro.vendor.camera"
+    //  properties.
+    int32_t reqFMQSize = property_get_int32("ro.vendor.camera.req.fmq.size", /*default*/-1);
     if (reqFMQSize < 0) {
-        reqFMQSize = CAMERA_REQUEST_METADATA_QUEUE_SIZE;
+        reqFMQSize = property_get_int32("ro.camera.req.fmq.size", /*default*/-1);
+        if (reqFMQSize < 0) {
+            reqFMQSize = CAMERA_REQUEST_METADATA_QUEUE_SIZE;
+        } else {
+            ALOGV("%s: request FMQ size overridden to %d", __FUNCTION__, reqFMQSize);
+        }
     } else {
-        ALOGV("%s: request FMQ size overridden to %d", __FUNCTION__, reqFMQSize);
+        ALOGV("%s: request FMQ size overridden to %d via fallback property", __FUNCTION__,
+                reqFMQSize);
     }
 
     mRequestMetadataQueue = std::make_unique<RequestMetadataQueue>(
@@ -114,12 +123,22 @@
         return true;
     }
 
-    int32_t resFMQSize = property_get_int32("ro.camera.res.fmq.size", /*default*/-1);
+    // "ro.camera" properties are no longer supported on vendor side.
+    //  Support a fall back for the fmq size override that uses "ro.vendor.camera"
+    //  properties.
+    int32_t resFMQSize = property_get_int32("ro.vendor.camera.res.fmq.size", /*default*/-1);
     if (resFMQSize < 0) {
-        resFMQSize = CAMERA_RESULT_METADATA_QUEUE_SIZE;
+        resFMQSize = property_get_int32("ro.camera.res.fmq.size", /*default*/-1);
+        if (resFMQSize < 0) {
+            resFMQSize = CAMERA_RESULT_METADATA_QUEUE_SIZE;
+        } else {
+            ALOGV("%s: result FMQ size overridden to %d", __FUNCTION__, resFMQSize);
+        }
     } else {
-        ALOGV("%s: result FMQ size overridden to %d", __FUNCTION__, resFMQSize);
+        ALOGV("%s: result FMQ size overridden to %d via fallback property", __FUNCTION__,
+                resFMQSize);
     }
+
     mResultMetadataQueue = std::make_shared<RequestMetadataQueue>(
             static_cast<size_t>(resFMQSize),
             false /* non blocking */);
diff --git a/camera/device/3.4/default/ExternalCameraDevice.cpp b/camera/device/3.4/default/ExternalCameraDevice.cpp
index 3f04751..9a2fddf 100644
--- a/camera/device/3.4/default/ExternalCameraDevice.cpp
+++ b/camera/device/3.4/default/ExternalCameraDevice.cpp
@@ -342,8 +342,7 @@
                                                   256, 144,
                                                   240, 160,
                                                   256, 154,
-                                                  240, 240,
-                                                  320, 240};
+                                                  240, 180};
     UPDATE(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, jpegAvailableThumbnailSizes,
            ARRAY_SIZE(jpegAvailableThumbnailSizes));
 
diff --git a/compatibility_matrices/compatibility_matrix.4.xml b/compatibility_matrices/compatibility_matrix.4.xml
index 6c28761..7d6fc60 100644
--- a/compatibility_matrices/compatibility_matrix.4.xml
+++ b/compatibility_matrices/compatibility_matrix.4.xml
@@ -181,7 +181,12 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.gnss</name>
-        <version>1.0-1</version>
+        <!--
+         - Both versions are listed here as a workaround for libvintf since 2.0 extends 1.1.
+         - Devices launched with Q must support gnss@2.0, see VtsTrebleVendorVintfTest
+         - test DeviceManifestTest#GnssHalVersionCompatibility.
+        -->
+        <version>1.1</version>
         <version>2.0</version>
         <interface>
             <name>IGnss</name>
diff --git a/current.txt b/current.txt
index cd51ff5..d97c0b3 100644
--- a/current.txt
+++ b/current.txt
@@ -394,16 +394,18 @@
 23780340c686ee86986aa5a9755c2d8566224fed177bbb22a5ebf06be574b60c android.hardware.camera.metadata@3.3::types
 05d1ee760d81cdd2dc7a70ce0241af9fa830edae33b4be83d9bf5fffe05ddc6f android.hardware.camera.provider@2.4::ICameraProvider
 da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
+ede69710c3a95c2cbe818e6c8bb72c7816823face5fc21c17731b26f41d94d65 android.hardware.gnss@1.0::IGnss
 21165b8e30c4b2d52980e4728f661420adc16e38bbe73476c06b2085be908f4c android.hardware.gnss@1.0::IGnssCallback
 d702fb01dc2a0733aa820b7eb65435ee3334f75632ef880bafd2fb8803a20a58 android.hardware.gnss@1.0::IGnssMeasurementCallback
+b5f1f4c1bd6de71a8e71d70f57cdab904ac024a12f3dee3e2173770a4583bcc2 android.hardware.gnss@1.1::IGnss
 7c7721c0f773fcf422b71a4f558545e9e36acc973e58ca51e5bd53905cf46bc0 android.hardware.graphics.bufferqueue@1.0::IGraphicBufferProducer
 d4fea995378bb4f421b4e24ccf68cad2734ab07fe4f874a126ba558b99df5766 android.hardware.graphics.composer@2.1::IComposerClient
 f7d7cb747dc01a9fdb2d39a80003b4d8df9be733d65f5842198802eb6209db69 android.hardware.graphics.mapper@2.0::IMapper
 65a021fa89085b62fc96b2b6d3bef2f9103cf4d63379c68bc154fd9eef672852 android.hardware.health@1.0::types
 b7ecf29927055ec422ec44bf776223f07d79ad9f92ccf9becf167e62c2607e7a android.hardware.keymaster@4.0::IKeymasterDevice
 574e8f1499436fb4075894dcae0b36682427956ecb114f17f1fe22d116a83c6b android.hardware.neuralnetworks@1.0::IPreparedModel
-567de4ebb3a224721eabae40c4484fad2cd1608eb0e66ec9214eb88e9b15d3c9 android.hardware.neuralnetworks@1.0::types
-d51937a3567a50f239589e40300264c4b57f2c3582c6fc6df082f45eb74d90e3 android.hardware.neuralnetworks@1.1::types
+1e3576c07006d82ba5bc6ddbf87c101414d361c41afe7a82713750844c488725 android.hardware.neuralnetworks@1.0::types
+eb754b58c93e5591613208b4c972811288b0fa16a82430d602f107c91a908b22 android.hardware.neuralnetworks@1.1::types
 1d4a5776614c08b5d794a5ec5ab04697260cbd4b3441d5935cd53ee71d19da02 android.hardware.radio@1.0::IRadioResponse
 ed9da80ec0c96991fd03f0a46107815d0e50f764656e49dba4980fa5c31d5bc3 android.hardware.radio@1.0::types
 1d19720d4fd38b1095f0f555a4bd92b3b12c9b1d0f560b0e9a474cd6dcc20db6 android.hardware.radio@1.2::IRadio
@@ -460,15 +462,15 @@
 5b1f4a4fb88c239e07d76026467a1f2ee0d08f4d52c1805bd93bd7c05e3fe69c android.hardware.drm@1.2::ICryptoFactory
 4895f98e9ef210e9acb01982f5d07b654538377e1404b8db5e19e7858835e9d8 android.hardware.drm@1.2::ICryptoPlugin
 976116b9033b2c222b940109fdf0ffcc29b77cbe631ef6b4fcc2ad5ce8e605f7 android.hardware.drm@1.2::IDrmFactory
-b2efccc6425085f84795a2ca15a09d9a81ffd02f9dc3d30ba21d1a59bdfa253f android.hardware.drm@1.2::IDrmPlugin
-39ca9e88404b6c090f7650455a7ed3fdee9cce4e3a356c9d547f8ff02f2e7fc8 android.hardware.drm@1.2::IDrmPluginListener
-f27baaa587bc3dd9b740cb6928ab812b9b7d105b5187663938aee578105f3c39 android.hardware.drm@1.2::types
+8ef1caf921c3e83a00180f770e3b8e8ff65d8a5c806482e51aa45e6d55f1aec1 android.hardware.drm@1.2::IDrmPlugin
+b778fcce93eb6294446a940e1bae0200da7bd97b91b91977be2dcd31ca58374f android.hardware.drm@1.2::IDrmPluginListener
+564732cbfe5c0895cfbd2bdf84c3f2b0f760ea20f2237c0d388aaeeaef2dd0a9 android.hardware.drm@1.2::types
 44480c912e4ab90b9ed17e56569cd5ca98413a8a2372efb028f4181204b6b73e android.hardware.fastboot@1.0::IFastboot
 7b2989744e3c555292d4b5b829acd09a7b40f96ead62ce54174cd959503b64bb android.hardware.fastboot@1.0::types
 7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss
 2e5ad983734069e84a760004b32da0d09e4170c05380abe27e6eb80e4aa70d5a android.hardware.gnss@2.0::IAGnssCallback
 1f4ac068a88a72360280d94a7f6fd7c63813c1eea4891a0eb01394d3e7e775f2 android.hardware.gnss@2.0::IAGnssRil
-4deafcdcffa2d002119e7f58810b767a84666e76475aae68e757ec2845d9756d android.hardware.gnss@2.0::IGnss
+f5605f48c2fb9f231615dd932bf730ae9540f4f98b5b7ae2b269975f452f6d73 android.hardware.gnss@2.0::IGnss
 db6bdf6dfc5edf6c85d2944976db899227abb51079c893874353c322342c50b6 android.hardware.gnss@2.0::IGnssBatching
 1f89392f1ebb693d8fa6f50324b1635fc79fab246d31900e63998e1b0e17511c android.hardware.gnss@2.0::IGnssBatchingCallback
 64232037109a5e5f53ab0377e755ec494ae93fcb5279e6eea71dec2e7ac6fbfc android.hardware.gnss@2.0::IGnssCallback
@@ -516,7 +518,7 @@
 92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
 36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
 e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-73e995644b1bb2678ec3ab850feb7a1b4495501953951c22316cefd67b900b3e android.hardware.neuralnetworks@1.2::types
+d18bba0b6c8d2d1da3cfb52b14f556d2e04eb91551d97ee60a3524cf993a3e0e android.hardware.neuralnetworks@1.2::types
 cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
 abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
 4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
diff --git a/drm/1.2/IDrmPlugin.hal b/drm/1.2/IDrmPlugin.hal
index 7d266f4..df09ccf 100644
--- a/drm/1.2/IDrmPlugin.hal
+++ b/drm/1.2/IDrmPlugin.hal
@@ -226,4 +226,22 @@
      * @param sessionId identifies the session the event originated from
      */
     sendSessionLostState(SessionId sessionId);
+
+    /**
+     * Send a keys change event to the listener. The keys change event
+     * indicates the status of each key in the session. Keys can be
+     * indicated as being usable, expired, outputnotallowed or statuspending.
+     *
+     * This method only differs from @1.0 version by the addition of new
+     * KeyStatusType(s) in keyStatusList.
+     *
+     * @param sessionId identifies the session the event originated from
+     * @param keyStatusList indicates the status for each key ID in the
+     * session.
+     * @param hasNewUsableKey indicates if the event includes at least one
+     * key that has become usable.
+     */
+    sendKeysChange_1_2(SessionId sessionId, vec<KeyStatus> keyStatusList,
+            bool hasNewUsableKey);
+
 };
diff --git a/drm/1.2/IDrmPluginListener.hal b/drm/1.2/IDrmPluginListener.hal
index a6bd6c9..e8cb91a 100644
--- a/drm/1.2/IDrmPluginListener.hal
+++ b/drm/1.2/IDrmPluginListener.hal
@@ -36,4 +36,22 @@
      * @param sessionId identifies the session that has been invalidated
      */
      oneway sendSessionLostState(SessionId sessionId);
+
+    /**
+     * Send a keys change event to the listener. The keys change event
+     * indicates the status of each key in the session. Keys can be
+     * indicated as being usable, expired, outputnotallowed or statuspending.
+     *
+     * This method only differs from @1.0 version by the addition of new
+     * KeyStatusType(s) in keyStatusList.
+     *
+     * @param sessionId identifies the session the event originated from
+     * @param keyStatusList indicates the status for each key ID in the
+     * session.
+     * @param hasNewUsableKey indicates if the event includes at least one
+     * key that has become usable.
+     */
+    oneway sendKeysChange_1_2(SessionId sessionId, vec<KeyStatus> keyStatusList,
+            bool hasNewUsableKey);
+
 };
diff --git a/drm/1.2/types.hal b/drm/1.2/types.hal
index 28c8e67..87218a4 100644
--- a/drm/1.2/types.hal
+++ b/drm/1.2/types.hal
@@ -16,6 +16,7 @@
 
 package android.hardware.drm@1.2;
 
+import @1.0::KeyStatusType;
 import @1.0::Status;
 import @1.1::HdcpLevel;
 
@@ -93,3 +94,25 @@
  * set in methods that take a KeySetId as an input parameter.
  */
 typedef vec<uint8_t> KeySetId;
+
+enum KeyStatusType : @1.0::KeyStatusType {
+    /**
+     * The key is not yet usable to decrypt media because the start
+     * time is in the future. The key must become usable when
+     * its start time is reached.
+     */
+    USABLEINFUTURE
+};
+
+/**
+ * Used by sendKeysChange_1_2 to report the usability status of each key to the
+ * app.
+ *
+ * This struct only differs from @1.0 version by the addition of new
+ * KeyStatusType(s).
+ *
+ */
+struct KeyStatus {
+    KeySetId keyId;
+    KeyStatusType type;
+};
diff --git a/drm/1.2/vts/functional/drm_hal_common.cpp b/drm/1.2/vts/functional/drm_hal_common.cpp
index b9a8425..bfffbe8 100644
--- a/drm/1.2/vts/functional/drm_hal_common.cpp
+++ b/drm/1.2/vts/functional/drm_hal_common.cpp
@@ -56,6 +56,7 @@
 namespace vts {
 
 const char *kCallbackLostState = "LostState";
+const char *kCallbackKeysChange = "KeysChange";
 
 drm_vts::VendorModules *DrmHalTest::gVendorModules = nullptr;
 
@@ -64,7 +65,19 @@
  */
 
 Return<void> DrmHalPluginListener::sendSessionLostState(const hidl_vec<uint8_t>& sessionId) {
-    NotifyFromCallback(kCallbackLostState, sessionId);
+    ListenerEventArgs args;
+    args.sessionId = sessionId;
+    NotifyFromCallback(kCallbackLostState, args);
+    return Void();
+}
+
+Return<void> DrmHalPluginListener::sendKeysChange_1_2(const hidl_vec<uint8_t>& sessionId,
+        const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey) {
+    ListenerEventArgs args;
+    args.sessionId = sessionId;
+    args.keyStatusList = keyStatusList;
+    args.hasNewUsableKey = hasNewUsableKey;
+    NotifyFromCallback(kCallbackKeysChange, args);
     return Void();
 }
 
diff --git a/drm/1.2/vts/functional/drm_hal_common.h b/drm/1.2/vts/functional/drm_hal_common.h
index 1b95dde..e348664 100644
--- a/drm/1.2/vts/functional/drm_hal_common.h
+++ b/drm/1.2/vts/functional/drm_hal_common.h
@@ -37,7 +37,7 @@
 
 using ::android::hardware::drm::V1_0::EventType;
 using ::android::hardware::drm::V1_0::KeyedVector;
-using ::android::hardware::drm::V1_0::KeyStatus;
+using KeyStatusV1_0 = ::android::hardware::drm::V1_0::KeyStatus;
 using ::android::hardware::drm::V1_0::KeyType;
 using ::android::hardware::drm::V1_0::Mode;
 using ::android::hardware::drm::V1_0::Pattern;
@@ -46,10 +46,6 @@
 
 using ::android::hardware::drm::V1_1::ICryptoFactory;
 
-using ::android::hardware::drm::V1_2::ICryptoPlugin;
-using ::android::hardware::drm::V1_2::IDrmFactory;
-using ::android::hardware::drm::V1_2::IDrmPlugin;
-using ::android::hardware::drm::V1_2::IDrmPluginListener;
 using StatusV1_2 = ::android::hardware::drm::V1_2::Status;
 
 using ::android::hardware::hidl_array;
@@ -166,9 +162,16 @@
  *  Event Handling tests
  */
 extern const char *kCallbackLostState;
+extern const char *kCallbackKeysChange;
+
+struct ListenerEventArgs {
+    SessionId sessionId;
+    hidl_vec<KeyStatus> keyStatusList;
+    bool hasNewUsableKey;
+};
 
 class DrmHalPluginListener
-    : public ::testing::VtsHalHidlTargetCallbackBase<SessionId>,
+    : public ::testing::VtsHalHidlTargetCallbackBase<ListenerEventArgs>,
       public IDrmPluginListener {
 public:
     DrmHalPluginListener() {
@@ -183,10 +186,13 @@
             int64_t) override { return Void(); }
 
     virtual Return<void> sendKeysChange(const hidl_vec<uint8_t>&,
-            const hidl_vec<KeyStatus>&, bool) override { return Void(); }
+            const hidl_vec<KeyStatusV1_0>&, bool) override { return Void(); }
 
     virtual Return<void> sendSessionLostState(const hidl_vec<uint8_t>& sessionId) override;
 
+    virtual Return<void> sendKeysChange_1_2(const hidl_vec<uint8_t>&,
+            const hidl_vec<KeyStatus>&, bool) override;
+
 };
 
 }  // namespace vts
diff --git a/drm/1.2/vts/functional/drm_hal_test.cpp b/drm/1.2/vts/functional/drm_hal_test.cpp
index 252ebb9..37ecc25 100644
--- a/drm/1.2/vts/functional/drm_hal_test.cpp
+++ b/drm/1.2/vts/functional/drm_hal_test.cpp
@@ -28,6 +28,8 @@
 using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::drm::V1_2::HdcpLevel;
 using ::android::hardware::drm::V1_2::KeySetId;
+using ::android::hardware::drm::V1_2::KeyStatus;
+using ::android::hardware::drm::V1_2::KeyStatusType;
 using ::android::hardware::drm::V1_2::OfflineLicenseState;
 
 using ::android::hardware::drm::V1_2::vts::DrmHalClearkeyTest;
@@ -35,6 +37,7 @@
 using ::android::hardware::drm::V1_2::vts::DrmHalTest;
 using ::android::hardware::drm::V1_2::vts::DrmHidlEnvironment;
 using ::android::hardware::drm::V1_2::vts::kCallbackLostState;
+using ::android::hardware::drm::V1_2::vts::kCallbackKeysChange;
 
 using ::android::hardware::hidl_string;
 
@@ -275,6 +278,35 @@
 }
 
 /**
+ * Simulate the plugin sending keys change and make sure
+ * the listener gets them.
+ */
+TEST_P(DrmHalTest, ListenerKeysChange) {
+    RETURN_IF_SKIPPED;
+    sp<DrmHalPluginListener> listener = new DrmHalPluginListener();
+    auto res = drmPlugin->setListener(listener);
+    EXPECT_OK(res);
+
+    auto sessionId = openSession();
+    const hidl_vec<KeyStatus> keyStatusList = {
+        {{1}, KeyStatusType::USABLE},
+        {{2}, KeyStatusType::EXPIRED},
+        {{3}, KeyStatusType::OUTPUTNOTALLOWED},
+        {{4}, KeyStatusType::STATUSPENDING},
+        {{5}, KeyStatusType::INTERNALERROR},
+        {{6}, KeyStatusType::USABLEINFUTURE},
+    };
+
+    drmPlugin->sendKeysChange_1_2(sessionId, keyStatusList, true);
+    auto result = listener->WaitForCallback(kCallbackKeysChange);
+    EXPECT_TRUE(result.no_timeout);
+    EXPECT_TRUE(result.args);
+    EXPECT_EQ(sessionId, result.args->sessionId);
+    EXPECT_EQ(keyStatusList, result.args->keyStatusList);
+    closeSession(sessionId);
+}
+
+/**
  *  CryptoPlugin Decrypt tests
  */
 
@@ -452,7 +484,7 @@
     auto result = listener->WaitForCallback(kCallbackLostState);
     EXPECT_TRUE(result.no_timeout);
     EXPECT_TRUE(result.args);
-    EXPECT_EQ(sessionId, *(result.args));
+    EXPECT_EQ(sessionId, result.args->sessionId);
 }
 
 /**
diff --git a/gnss/1.0/IGnss.hal b/gnss/1.0/IGnss.hal
index 602c615..d32bc63 100644
--- a/gnss/1.0/IGnss.hal
+++ b/gnss/1.0/IGnss.hal
@@ -75,8 +75,13 @@
     };
 
     /**
-     * Opens the interface and provides the callback routines
-     * to the implementation of this interface.
+     * Opens the interface and provides the callback routines to the implementation of this
+     * interface.
+     *
+     * The framework calls this method to instruct the GPS engine to prepare for serving requests
+     * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+     * framework upon successful return from this method until cleanup() method is called to
+     * close this interface.
      *
      * @param callback Callback interface for IGnss.
      *
@@ -105,6 +110,18 @@
 
     /**
      * Closes the interface.
+     *
+     * The cleanup() method is called by the framework to tell the GNSS HAL implementation to
+     * not expect any GNSS requests in the immediate future - e.g. this may be called when
+     * location is disabled by a user setting or low battery conditions. The GNSS HAL
+     * implementation must immediately stop responding to any existing requests until the
+     * setCallback() method is called again and the requests are re-initiated by the framework.
+     *
+     * After this method is called, the GNSS HAL implementation may choose to modify GNSS hardware
+     * states to save power. It is expected that when setCallback() method is called again to
+     * reopen this interface, to serve requests, there may be some minor delays in GNSS response
+     * requests as hardware readiness states are restored, not to exceed those that occur on normal
+     * device boot up.
      */
     cleanup();
 
@@ -153,7 +170,7 @@
      * @param mode  Parameter must be one of MS_BASED or STANDALONE.
      * It is allowed by the platform (and it is recommended) to fallback to
      * MS_BASED if MS_ASSISTED is passed in, and MS_BASED is supported.
-     * @recurrence GNSS postion recurrence value, either periodic or single.
+     * @recurrence GNSS position recurrence value, either periodic or single.
      * @param minIntervalMs Represents the time between fixes in milliseconds.
      * @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
      * @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/gnss/1.1/IGnss.hal b/gnss/1.1/IGnss.hal
index 672f742..3400807 100644
--- a/gnss/1.1/IGnss.hal
+++ b/gnss/1.1/IGnss.hal
@@ -29,6 +29,11 @@
      * Opens the interface and provides the callback routines to the implementation of this
      * interface.
      *
+     * The framework calls this method to instruct the GPS engine to prepare for serving requests
+     * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+     * framework upon successful return from this method until cleanup() method is called to
+     * close this interface.
+     *
      * @param callback Callback interface for IGnss.
      *
      * @return success Returns true on success.
@@ -42,7 +47,7 @@
      * @param mode Parameter must be one of MS_BASED or STANDALONE. It is allowed by the platform
      *     (and it is recommended) to fallback to MS_BASED if MS_ASSISTED is passed in, and MS_BASED
      *     is supported.
-     * @param recurrence GNSS postion recurrence value, either periodic or single.
+     * @param recurrence GNSS position recurrence value, either periodic or single.
      * @param minIntervalMs Represents the time between fixes in milliseconds.
      * @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
      * @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/gnss/2.0/IGnss.hal b/gnss/2.0/IGnss.hal
index f19f8d0..9935bf9 100644
--- a/gnss/2.0/IGnss.hal
+++ b/gnss/2.0/IGnss.hal
@@ -36,13 +36,18 @@
  * the interface @1.0::IGnssNi.hal and @1.0::IGnssNiCallback.hal are deprecated in this version
  * and are not supported by the framework. The GNSS HAL implementation of this interface
  * must return nullptr for the following @1.0::IGnss method.
- *      getExtensionGnssNi() generates (IGnssNi gnssNiIface);
+ *     getExtensionGnssNi() generates (IGnssNi gnssNiIface);
  */
 interface IGnss extends @1.1::IGnss {
     /**
      * Opens the interface and provides the callback routines to the implementation of this
      * interface.
      *
+     * The framework calls this method to instruct the GPS engine to prepare for serving requests
+     * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+     * framework upon successful return from this method until cleanup() method is called to
+     * close this interface.
+     *
      * @param callback Callback interface for IGnss.
      *
      * @return success Returns true on success.
@@ -83,8 +88,9 @@
     /**
      * This method returns the IGnssMeasurement interface.
      *
-     * Exactly one of getExtensionGnssMeasurement_1_1() and getExtensionGnssMeasurement_2_0() must
-     * return a non-null handle, and the other method must return nullptr.
+     * Exactly one of getExtensionGnssMeasurement(), getExtensionGnssMeasurement_1_1(), and
+     * getExtensionGnssMeasurement_2_0() methods must return a non-null handle, and the other
+     * methods must return nullptr.
      *
      * @return gnssMeasurementIface Handle to the IGnssMeasurement interface.
      */
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.cpp b/gnss/2.0/vts/functional/gnss_hal_test.cpp
index da6092b..a9f858c 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test.cpp
@@ -23,42 +23,26 @@
 using ::android::hardware::gnss::common::Utils;
 
 // Implementations for the main test class for GNSS HAL
-GnssHalTest::GnssHalTest()
-    : info_called_count_(0),
-      capabilities_called_count_(0),
-      measurement_corrections_capabilities_called_count_(0),
-      location_called_count_(0),
-      name_called_count_(0),
-      notify_count_(0) {}
-
 void GnssHalTest::SetUp() {
     gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
         GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
-    list_vec_gnss_sv_info_.clear();
     ASSERT_NE(gnss_hal_, nullptr);
 
     SetUpGnssCallback();
 }
 
 void GnssHalTest::TearDown() {
-    // Reset counters
-    info_called_count_ = 0;
-    capabilities_called_count_ = 0;
-    measurement_corrections_capabilities_called_count_ = 0;
-    location_called_count_ = 0;
-    name_called_count_ = 0;
-    measurement_called_count_ = 0;
-
     if (gnss_hal_ != nullptr) {
         gnss_hal_->cleanup();
+        gnss_hal_ = nullptr;
     }
-    if (notify_count_ > 0) {
-        ALOGW("%d unprocessed callbacks discarded", notify_count_);
-    }
+
+    // Set to nullptr to destruct the callback event queues and warn of any unprocessed events.
+    gnss_cb_ = nullptr;
 }
 
 void GnssHalTest::SetUpGnssCallback() {
-    gnss_cb_ = new GnssCallback(*this);
+    gnss_cb_ = new GnssCallback();
     ASSERT_NE(gnss_cb_, nullptr);
 
     auto result = gnss_hal_->setCallback_2_0(gnss_cb_);
@@ -72,13 +56,13 @@
     /*
      * All capabilities, name and systemInfo callbacks should trigger
      */
-    EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
-    EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
-    EXPECT_EQ(std::cv_status::no_timeout, wait(TIMEOUT_SEC));
+    EXPECT_TRUE(gnss_cb_->capabilities_cbq_.retrieve(gnss_cb_->last_capabilities_, TIMEOUT_SEC));
+    EXPECT_TRUE(gnss_cb_->info_cbq_.retrieve(gnss_cb_->last_info_, TIMEOUT_SEC));
+    EXPECT_TRUE(gnss_cb_->name_cbq_.retrieve(gnss_cb_->last_name_, TIMEOUT_SEC));
 
-    EXPECT_EQ(capabilities_called_count_, 1);
-    EXPECT_EQ(info_called_count_, 1);
-    EXPECT_EQ(name_called_count_, 1);
+    EXPECT_EQ(gnss_cb_->capabilities_cbq_.calledCount(), 1);
+    EXPECT_EQ(gnss_cb_->info_cbq_.calledCount(), 1);
+    EXPECT_EQ(gnss_cb_->name_cbq_.calledCount(), 1);
 }
 
 void GnssHalTest::StopAndClearLocations() {
@@ -92,9 +76,9 @@
      * the last reply for final startup messages to arrive (esp. system
      * info.)
      */
-    while (wait(TIMEOUT_SEC) == std::cv_status::no_timeout) {
+    while (gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_, TIMEOUT_SEC)) {
     }
-    location_called_count_ = 0;
+    gnss_cb_->location_cbq_.reset();
 }
 
 void GnssHalTest::SetPositionMode(const int min_interval_msec, const bool low_power_mode) {
@@ -121,19 +105,22 @@
      */
     const int kFirstGnssLocationTimeoutSeconds = 75;
 
-    wait(kFirstGnssLocationTimeoutSeconds);
-    EXPECT_EQ(location_called_count_, 1);
+    EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
+                                                 kFirstGnssLocationTimeoutSeconds));
+    int locationCalledCount = gnss_cb_->location_cbq_.calledCount();
+    EXPECT_EQ(locationCalledCount, 1);
 
-    if (location_called_count_ > 0) {
+    if (locationCalledCount > 0) {
         // don't require speed on first fix
-        CheckLocation(last_location_, false);
+        CheckLocation(gnss_cb_->last_location_, false);
         return true;
     }
     return false;
 }
 
 void GnssHalTest::CheckLocation(const GnssLocation_2_0& location, bool check_speed) {
-    const bool check_more_accuracies = (info_called_count_ > 0 && last_info_.yearOfHw >= 2017);
+    const bool check_more_accuracies =
+            (gnss_cb_->info_cbq_.calledCount() > 0 && gnss_cb_->last_info_.yearOfHw >= 2017);
 
     Utils::checkLocation(location.v1_0, check_speed, check_more_accuracies);
 }
@@ -148,77 +135,47 @@
     EXPECT_TRUE(StartAndCheckFirstLocation());
 
     for (int i = 1; i < count; i++) {
-        EXPECT_EQ(std::cv_status::no_timeout, wait(kLocationTimeoutSubsequentSec));
-        EXPECT_EQ(location_called_count_, i + 1);
+        EXPECT_TRUE(gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_,
+                                                     kLocationTimeoutSubsequentSec));
+        int locationCalledCount = gnss_cb_->location_cbq_.calledCount();
+        EXPECT_EQ(locationCalledCount, i + 1);
         // Don't cause confusion by checking details if no location yet
-        if (location_called_count_ > 0) {
+        if (locationCalledCount > 0) {
             // Should be more than 1 location by now, but if not, still don't check first fix speed
-            CheckLocation(last_location_, location_called_count_ > 1);
+            CheckLocation(gnss_cb_->last_location_, locationCalledCount > 1);
         }
     }
 }
 
-void GnssHalTest::notify() {
-    {
-        std::unique_lock<std::mutex> lock(mtx_);
-        notify_count_++;
-    }
-    cv_.notify_one();
-}
-
-std::cv_status GnssHalTest::wait(int timeout_seconds) {
-    std::unique_lock<std::mutex> lock(mtx_);
-
-    auto status = std::cv_status::no_timeout;
-    while (notify_count_ == 0) {
-        status = cv_.wait_for(lock, std::chrono::seconds(timeout_seconds));
-        if (status == std::cv_status::timeout) return status;
-    }
-    notify_count_--;
-    return status;
-}
-
-std::cv_status GnssHalTest::waitForMeasurementCorrectionsCapabilities(int timeout_seconds) {
-    std::unique_lock<std::mutex> lock(mtx_);
-    auto status = std::cv_status::no_timeout;
-    while (measurement_corrections_capabilities_called_count_ == 0) {
-        status = cv_.wait_for(lock, std::chrono::seconds(timeout_seconds));
-        if (status == std::cv_status::timeout) return status;
-    }
-    notify_count_--;
-    return status;
-}
+GnssHalTest::GnssCallback::GnssCallback()
+    : info_cbq_("system_info"),
+      name_cbq_("name"),
+      capabilities_cbq_("capabilities"),
+      location_cbq_("location"),
+      sv_info_cbq_("sv_info") {}
 
 Return<void> GnssHalTest::GnssCallback::gnssSetSystemInfoCb(
         const IGnssCallback_1_0::GnssSystemInfo& info) {
     ALOGI("Info received, year %d", info.yearOfHw);
-    parent_.info_called_count_++;
-    parent_.last_info_ = info;
-    parent_.notify();
+    info_cbq_.store(info);
     return Void();
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssSetCapabilitesCb(uint32_t capabilities) {
     ALOGI("Capabilities received %d", capabilities);
-    parent_.capabilities_called_count_++;
-    parent_.last_capabilities_ = capabilities;
-    parent_.notify();
+    capabilities_cbq_.store(capabilities);
     return Void();
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssSetCapabilitiesCb_2_0(uint32_t capabilities) {
     ALOGI("Capabilities (v2.0) received %d", capabilities);
-    parent_.capabilities_called_count_++;
-    parent_.last_capabilities_ = capabilities;
-    parent_.notify();
+    capabilities_cbq_.store(capabilities);
     return Void();
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssNameCb(const android::hardware::hidl_string& name) {
     ALOGI("Name received: %s", name.c_str());
-    parent_.name_called_count_++;
-    parent_.last_name_ = name;
-    parent_.notify();
+    name_cbq_.store(name);
     return Void();
 }
 
@@ -235,40 +192,32 @@
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssLocationCbImpl(const GnssLocation_2_0& location) {
-    parent_.location_called_count_++;
-    parent_.last_location_ = location;
-    parent_.notify();
+    location_cbq_.store(location);
     return Void();
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb(const IGnssCallback_1_0::GnssSvStatus&) {
     ALOGI("gnssSvStatusCb");
-
-    return Void();
-}
-
-Return<void> GnssHalTest::GnssMeasurementCallback::gnssMeasurementCb_2_0(
-    const IGnssMeasurementCallback_2_0::GnssData& data) {
-    ALOGD("GnssMeasurement received. Size = %d", (int)data.measurements.size());
-    parent_.measurement_called_count_++;
-    parent_.last_measurement_ = data;
-    parent_.notify();
-    return Void();
-}
-
-Return<void> GnssHalTest::GnssMeasurementCorrectionsCallback::setCapabilitiesCb(
-        uint32_t capabilities) {
-    ALOGI("GnssMeasurementCorrectionsCallback capabilities received %d", capabilities);
-    parent_.measurement_corrections_capabilities_called_count_++;
-    parent_.last_measurement_corrections_capabilities_ = capabilities;
-    parent_.notify();
     return Void();
 }
 
 Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb_2_0(
         const hidl_vec<IGnssCallback_2_0::GnssSvInfo>& svInfoList) {
     ALOGI("gnssSvStatusCb_2_0. Size = %d", (int)svInfoList.size());
-    parent_.list_vec_gnss_sv_info_.emplace_back(svInfoList);
-    parent_.notify();
+    sv_info_cbq_.store(svInfoList);
+    return Void();
+}
+
+Return<void> GnssHalTest::GnssMeasurementCallback::gnssMeasurementCb_2_0(
+    const IGnssMeasurementCallback_2_0::GnssData& data) {
+    ALOGD("GnssMeasurement received. Size = %d", (int)data.measurements.size());
+    measurement_cbq_.store(data);
+    return Void();
+}
+
+Return<void> GnssHalTest::GnssMeasurementCorrectionsCallback::setCapabilitiesCb(
+        uint32_t capabilities) {
+    ALOGI("GnssMeasurementCorrectionsCallback capabilities received %d", capabilities);
+    capabilities_cbq_.store(capabilities);
     return Void();
 }
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.h b/gnss/2.0/vts/functional/gnss_hal_test.h
index 737815f..05e37d3 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.h
+++ b/gnss/2.0/vts/functional/gnss_hal_test.h
@@ -22,7 +22,7 @@
 #include <VtsHalHidlTargetTestEnvBase.h>
 
 #include <condition_variable>
-#include <list>
+#include <deque>
 #include <mutex>
 
 using android::hardware::hidl_vec;
@@ -65,27 +65,61 @@
 // The main test class for GNSS HAL.
 class GnssHalTest : public ::testing::VtsHalHidlTargetTestBase {
    public:
-    GnssHalTest();
-
     virtual void SetUp() override;
 
     virtual void TearDown() override;
 
-    /* Used as a mechanism to inform the test that a callback has occurred */
-    void notify();
+    /* Producer/consumer queue for storing/retrieving callback events from GNSS HAL */
+    template <class T>
+    class CallbackQueue {
+      public:
+        CallbackQueue(const std::string& name) : name_(name), called_count_(0){};
+        ~CallbackQueue() { reset(); }
 
-    /* Test code calls this function to wait for a callback */
-    std::cv_status wait(int timeout_seconds);
+        /* Adds callback event to the end of the queue. */
+        void store(const T& event);
 
-    std::cv_status waitForMeasurementCorrectionsCapabilities(int timeout_seconds);
+        /*
+         * Removes the callack event at the front of the queue, stores it in event parameter
+         * and returns true. Returns false on timeout and event is not populated.
+         */
+        bool retrieve(T& event, int timeout_seconds);
+
+        /* Returns the number of events pending to be retrieved from the callback event queue. */
+        int size() const;
+
+        /* Returns the number of callback events received since last reset(). */
+        int calledCount() const;
+
+        /* Clears the callback event queue and resets the calledCount() to 0. */
+        void reset();
+
+      private:
+        CallbackQueue(const CallbackQueue&) = delete;
+        CallbackQueue& operator=(const CallbackQueue&) = delete;
+
+        std::string name_;
+        int called_count_;
+        mutable std::recursive_mutex mtx_;
+        std::condition_variable_any cv_;
+        std::deque<T> events_;
+    };
 
     /* Callback class for data & Event. */
     class GnssCallback : public IGnssCallback_2_0 {
       public:
-        GnssHalTest& parent_;
+        IGnssCallback_1_0::GnssSystemInfo last_info_;
+        android::hardware::hidl_string last_name_;
+        uint32_t last_capabilities_;
+        GnssLocation_2_0 last_location_;
 
-        GnssCallback(GnssHalTest& parent) : parent_(parent){};
+        CallbackQueue<IGnssCallback_1_0::GnssSystemInfo> info_cbq_;
+        CallbackQueue<android::hardware::hidl_string> name_cbq_;
+        CallbackQueue<uint32_t> capabilities_cbq_;
+        CallbackQueue<GnssLocation_2_0> location_cbq_;
+        CallbackQueue<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> sv_info_cbq_;
 
+        GnssCallback();
         virtual ~GnssCallback() = default;
 
         // Dummy callback handlers
@@ -125,9 +159,10 @@
 
     /* Callback class for GnssMeasurement. */
     class GnssMeasurementCallback : public IGnssMeasurementCallback_2_0 {
-       public:
-        GnssHalTest& parent_;
-        GnssMeasurementCallback(GnssHalTest& parent) : parent_(parent){};
+      public:
+        CallbackQueue<IGnssMeasurementCallback_2_0::GnssData> measurement_cbq_;
+
+        GnssMeasurementCallback() : measurement_cbq_("measurement"){};
         virtual ~GnssMeasurementCallback() = default;
 
         // Methods from V1_0::IGnssMeasurementCallback follow.
@@ -147,8 +182,10 @@
     /* Callback class for GnssMeasurementCorrections. */
     class GnssMeasurementCorrectionsCallback : public IMeasurementCorrectionsCallback {
       public:
-        GnssHalTest& parent_;
-        GnssMeasurementCorrectionsCallback(GnssHalTest& parent) : parent_(parent){};
+        uint32_t last_capabilities_;
+        CallbackQueue<uint32_t> capabilities_cbq_;
+
+        GnssMeasurementCorrectionsCallback() : capabilities_cbq_("capabilities"){};
         virtual ~GnssMeasurementCorrectionsCallback() = default;
 
         // Methods from V1_0::IMeasurementCorrectionsCallback follow.
@@ -203,32 +240,51 @@
     void SetPositionMode(const int min_interval_msec, const bool low_power_mode);
 
     sp<IGnss> gnss_hal_;         // GNSS HAL to call into
-    sp<IGnssCallback_2_0> gnss_cb_;  // Primary callback interface
-
-    // TODO: make these variables thread-safe.
-    /* Count of calls to set the following items, and the latest item (used by
-     * test.)
-     */
-    int info_called_count_;
-    int capabilities_called_count_;
-    int measurement_corrections_capabilities_called_count_;
-    int location_called_count_;
-    int measurement_called_count_;
-    int name_called_count_;
-
-    IGnssCallback_1_0::GnssSystemInfo last_info_;
-    uint32_t last_capabilities_;
-    uint32_t last_measurement_corrections_capabilities_;
-    GnssLocation_2_0 last_location_;
-    IGnssMeasurementCallback_2_0::GnssData last_measurement_;
-    android::hardware::hidl_string last_name_;
-
-    list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> list_vec_gnss_sv_info_;
-
-  private:
-    std::mutex mtx_;
-    std::condition_variable cv_;
-    int notify_count_;
+    sp<GnssCallback> gnss_cb_;   // Primary callback interface
 };
 
+template <class T>
+void GnssHalTest::CallbackQueue<T>::store(const T& event) {
+    std::unique_lock<std::recursive_mutex> lock(mtx_);
+    events_.push_back(event);
+    ++called_count_;
+    lock.unlock();
+    cv_.notify_all();
+}
+
+template <class T>
+bool GnssHalTest::CallbackQueue<T>::retrieve(T& event, int timeout_seconds) {
+    std::unique_lock<std::recursive_mutex> lock(mtx_);
+    cv_.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] { return !events_.empty(); });
+    if (events_.empty()) {
+        return false;
+    }
+    event = events_.front();
+    events_.pop_front();
+    return true;
+}
+
+template <class T>
+int GnssHalTest::CallbackQueue<T>::size() const {
+    std::unique_lock<std::recursive_mutex> lock(mtx_);
+    return events_.size();
+}
+
+template <class T>
+int GnssHalTest::CallbackQueue<T>::calledCount() const {
+    std::unique_lock<std::recursive_mutex> lock(mtx_);
+    return called_count_;
+}
+
+template <class T>
+void GnssHalTest::CallbackQueue<T>::reset() {
+    std::unique_lock<std::recursive_mutex> lock(mtx_);
+    if (!events_.empty()) {
+        ALOGW("%u unprocessed events discarded in callback queue %s", (unsigned int)events_.size(),
+              name_.c_str());
+    }
+    events_.clear();
+    called_count_ = 0;
+}
+
 #endif  // GNSS_HAL_TEST_H_
diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
index be182a9..155afd6 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
@@ -29,6 +29,7 @@
 using IGnssMeasurement_1_1 = android::hardware::gnss::V1_1::IGnssMeasurement;
 using IGnssMeasurement_1_0 = android::hardware::gnss::V1_0::IGnssMeasurement;
 using IAGnssRil_2_0 = android::hardware::gnss::V2_0::IAGnssRil;
+using IAGnssRil_1_0 = android::hardware::gnss::V1_0::IAGnssRil;
 using IAGnss_2_0 = android::hardware::gnss::V2_0::IAGnss;
 using IAGnss_1_0 = android::hardware::gnss::V1_0::IAGnss;
 using IAGnssCallback_2_0 = android::hardware::gnss::V2_0::IAGnssCallback;
@@ -53,10 +54,10 @@
 TEST_F(GnssHalTest, SetupTeardownCreateCleanup) {}
 
 /*
- * TestGnssMeasurementCallback:
+ * TestGnssMeasurementExtension:
  * Gets the GnssMeasurementExtension and verifies that it returns an actual extension.
  */
-TEST_F(GnssHalTest, TestGnssMeasurementCallback) {
+TEST_F(GnssHalTest, TestGnssMeasurementExtension) {
     auto gnssMeasurement_2_0 = gnss_hal_->getExtensionGnssMeasurement_2_0();
     auto gnssMeasurement_1_1 = gnss_hal_->getExtensionGnssMeasurement_1_1();
     auto gnssMeasurement_1_0 = gnss_hal_->getExtensionGnssMeasurement();
@@ -125,16 +126,21 @@
  * TestAGnssRilExtension:
  * Gets the AGnssRilExtension and verifies that it returns an actual extension.
  *
- * The GNSS HAL 2.0 implementation must support @2.0::IAGnssRil interface due to the deprecation
- * of framework network API methods needed to support the @1.0::IAGnssRil interface.
- *
- * TODO (b/121287858): Enforce gnss@2.0 HAL package is supported on devices launched with Q or later
+ * If IAGnssRil interface is supported, then the GNSS HAL 2.0 implementation must support
+ * @2.0::IAGnssRil interface due to the deprecation of framework network API methods needed
+ * to support the @1.0::IAGnssRil interface.
  */
 TEST_F(GnssHalTest, TestAGnssRilExtension) {
-    auto agnssRil = gnss_hal_->getExtensionAGnssRil_2_0();
-    ASSERT_TRUE(agnssRil.isOk());
-    sp<IAGnssRil_2_0> iAGnssRil = agnssRil;
-    ASSERT_NE(iAGnssRil, nullptr);
+    auto agnssRil_2_0 = gnss_hal_->getExtensionAGnssRil_2_0();
+    ASSERT_TRUE(agnssRil_2_0.isOk());
+    sp<IAGnssRil_2_0> iAGnssRil_2_0 = agnssRil_2_0;
+    if (iAGnssRil_2_0 == nullptr) {
+        // Verify IAGnssRil 1.0 is not supported.
+        auto agnssRil_1_0 = gnss_hal_->getExtensionAGnssRil();
+        ASSERT_TRUE(agnssRil_1_0.isOk());
+        sp<IAGnssRil_1_0> iAGnssRil_1_0 = agnssRil_1_0;
+        ASSERT_EQ(iAGnssRil_1_0, nullptr);
+    }
 }
 
 /*
@@ -146,7 +152,9 @@
     auto agnssRil = gnss_hal_->getExtensionAGnssRil_2_0();
     ASSERT_TRUE(agnssRil.isOk());
     sp<IAGnssRil_2_0> iAGnssRil = agnssRil;
-    ASSERT_NE(iAGnssRil, nullptr);
+    if (iAGnssRil == nullptr) {
+        return;
+    }
 
     // Update GNSS HAL that a network has connected.
     IAGnssRil_2_0::NetworkAttributes networkAttributes = {
@@ -185,16 +193,17 @@
         return;
     }
 
-    sp<IGnssMeasurementCallback_2_0> callback = new GnssMeasurementCallback(*this);
-
+    sp<GnssMeasurementCallback> callback = new GnssMeasurementCallback();
     auto result = iGnssMeasurement->setCallback_2_0(callback, /* enableFullTracking= */ true);
     ASSERT_TRUE(result.isOk());
     EXPECT_EQ(result, IGnssMeasurement_1_0::GnssMeasurementStatus::SUCCESS);
 
-    wait(kFirstGnssMeasurementTimeoutSeconds);
-    EXPECT_EQ(measurement_called_count_, 1);
-    ASSERT_TRUE(last_measurement_.measurements.size() > 0);
-    for (auto measurement : last_measurement_.measurements) {
+    IGnssMeasurementCallback_2_0::GnssData lastMeasurement;
+    ASSERT_TRUE(callback->measurement_cbq_.retrieve(lastMeasurement,
+                                                    kFirstGnssMeasurementTimeoutSeconds));
+    EXPECT_EQ(callback->measurement_cbq_.calledCount(), 1);
+    ASSERT_TRUE(lastMeasurement.measurements.size() > 0);
+    for (auto measurement : lastMeasurement.measurements) {
         // Verify CodeType is valid.
         ASSERT_NE(measurement.codeType, "");
 
@@ -219,44 +228,35 @@
 
 /*
  * TestAGnssExtension:
- * Gets the AGnssExtension and verifies that it supports @2.0::IAGnss interface by invoking
- * a method.
+ * Gets the AGnssExtension and verifies that it returns an actual extension.
  *
- * The GNSS HAL 2.0 implementation must support @2.0::IAGnss interface due to the deprecation
- * of framework network API methods needed to support the @1.0::IAGnss interface.
- *
- * TODO (b/121287858): Enforce gnss@2.0 HAL package is supported on devices launched with Q or later
+ * If IAGnss interface is supported, then the GNSS HAL 2.0 implementation must support
+ * @2.0::IAGnss interface due to the deprecation of framework network API methods needed
+ * to support the @1.0::IAGnss interface.
  */
 TEST_F(GnssHalTest, TestAGnssExtension) {
-    // Verify IAGnss 2.0 is supported.
-    auto agnss = gnss_hal_->getExtensionAGnss_2_0();
-    ASSERT_TRUE(agnss.isOk());
-    sp<IAGnss_2_0> iAGnss = agnss;
-    ASSERT_NE(iAGnss, nullptr);
+    auto agnss_2_0 = gnss_hal_->getExtensionAGnss_2_0();
+    ASSERT_TRUE(agnss_2_0.isOk());
+    sp<IAGnss_2_0> iAGnss_2_0 = agnss_2_0;
+    if (iAGnss_2_0 == nullptr) {
+        // Verify IAGnss 1.0 is not supported.
+        auto agnss_1_0 = gnss_hal_->getExtensionAGnss();
+        ASSERT_TRUE(agnss_1_0.isOk());
+        sp<IAGnss_1_0> iAGnss_1_0 = agnss_1_0;
+        ASSERT_EQ(iAGnss_1_0, nullptr);
+        return;
+    }
 
     // Set SUPL server host/port
-    auto result = iAGnss->setServer(IAGnssCallback_2_0::AGnssType::SUPL, "supl.google.com", 7275);
+    auto result =
+            iAGnss_2_0->setServer(IAGnssCallback_2_0::AGnssType::SUPL, "supl.google.com", 7275);
     ASSERT_TRUE(result.isOk());
     EXPECT_TRUE(result);
 }
 
 /*
- * TestAGnssExtension_1_0_Deprecation:
- * Gets the @1.0::IAGnss extension and verifies that it is a nullptr.
- *
- * TODO (b/121287858): Enforce gnss@2.0 HAL package is supported on devices launched with Q or later
- */
-TEST_F(GnssHalTest, TestAGnssExtension_1_0_Deprecation) {
-    // Verify IAGnss 1.0 is not supported.
-    auto agnss_1_0 = gnss_hal_->getExtensionAGnss();
-    ASSERT_TRUE(!agnss_1_0.isOk() || ((sp<IAGnss_1_0>)agnss_1_0) == nullptr);
-}
-
-/*
  * TestGnssNiExtension_Deprecation:
  * Gets the @1.0::IGnssNi extension and verifies that it is a nullptr.
- *
- * TODO (b/121287858): Enforce gnss@2.0 HAL package is supported on devices launched with Q or later
  */
 TEST_F(GnssHalTest, TestGnssNiExtension_Deprecation) {
     // Verify IGnssNi 1.0 is not supported.
@@ -266,22 +266,19 @@
 
 /*
  * TestGnssVisibilityControlExtension:
- * Gets the GnssVisibilityControlExtension and verifies that it supports the
- * gnss.visibility_control@1.0::IGnssVisibilityControl interface by invoking a method.
- *
- * The GNSS HAL 2.0 implementation must support gnss.visibility_control@1.0::IGnssVisibilityControl.
- *
- * TODO (b/121287858): Enforce gnss@2.0 HAL package is supported on devices launched with Q or later
+ * Gets the GnssVisibilityControlExtension and if it is not null, verifies that it supports
+ * the gnss.visibility_control@1.0::IGnssVisibilityControl interface by invoking a method.
  */
 TEST_F(GnssHalTest, TestGnssVisibilityControlExtension) {
-    // Verify IGnssVisibilityControl is supported.
     auto gnssVisibilityControl = gnss_hal_->getExtensionVisibilityControl();
     ASSERT_TRUE(gnssVisibilityControl.isOk());
     sp<IGnssVisibilityControl> iGnssVisibilityControl = gnssVisibilityControl;
-    ASSERT_NE(iGnssVisibilityControl, nullptr);
+    if (iGnssVisibilityControl == nullptr) {
+        return;
+    }
 
     // Set non-framework proxy apps.
-    hidl_vec<hidl_string> proxyApps{"ims.example.com", "mdt.example.com"};
+    hidl_vec<hidl_string> proxyApps{"com.example.ims", "com.example.mdt"};
     auto result = iGnssVisibilityControl->enableNfwLocationAccess(proxyApps);
     ASSERT_TRUE(result.isOk());
     EXPECT_TRUE(result);
@@ -294,7 +291,7 @@
  * capability flag is set.
  */
 TEST_F(GnssHalTest, TestGnssMeasurementCorrectionsCapabilities) {
-    if (!(last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
+    if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
         return;
     }
 
@@ -304,15 +301,15 @@
     ASSERT_NE(iMeasurementCorrections, nullptr);
 
     // Setup measurement corrections callback.
-    sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
-            new GnssMeasurementCorrectionsCallback(*this);
-    iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
+    sp<GnssMeasurementCorrectionsCallback> callback = new GnssMeasurementCorrectionsCallback();
+    iMeasurementCorrections->setCallback(callback);
 
     const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5;
-    waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
-    ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0);
+    callback->capabilities_cbq_.retrieve(callback->last_capabilities_,
+                                         kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
+    ASSERT_TRUE(callback->capabilities_cbq_.calledCount() > 0);
     using Capabilities = IMeasurementCorrectionsCallback::Capabilities;
-    ASSERT_TRUE((last_measurement_corrections_capabilities_ &
+    ASSERT_TRUE((callback->last_capabilities_ &
                  (Capabilities::LOS_SATS | Capabilities::EXCESS_PATH_LENGTH)) != 0);
 }
 
@@ -322,7 +319,7 @@
  * gnss.measurement_corrections@1.0::IMeasurementCorrections interface by invoking a method.
  */
 TEST_F(GnssHalTest, TestGnssMeasurementCorrections) {
-    if (!(last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
+    if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
         return;
     }
 
@@ -332,13 +329,14 @@
     sp<IMeasurementCorrections> iMeasurementCorrections = measurementCorrections;
     ASSERT_NE(iMeasurementCorrections, nullptr);
 
-    sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
-            new GnssMeasurementCorrectionsCallback(*this);
-    iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
+    sp<GnssMeasurementCorrectionsCallback> callback = new GnssMeasurementCorrectionsCallback();
+    iMeasurementCorrections->setCallback(callback);
 
     const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5;
-    waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
-    ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0);
+    callback->capabilities_cbq_.retrieve(callback->last_capabilities_,
+                                         kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
+    ASSERT_TRUE(callback->capabilities_cbq_.calledCount() > 0);
+
     // Set a mock MeasurementCorrections.
     auto result = iMeasurementCorrections->setCorrections(Utils::getMockMeasurementCorrections());
     ASSERT_TRUE(result.isOk());
@@ -363,22 +361,23 @@
         return;
     }
 
-    sp<IGnssMeasurementCallback_2_0> callback = new GnssMeasurementCallback(*this);
-
+    sp<GnssMeasurementCallback> callback = new GnssMeasurementCallback();
     auto result = iGnssMeasurement->setCallback_2_0(callback, /* enableFullTracking= */ true);
     ASSERT_TRUE(result.isOk());
     EXPECT_EQ(result, IGnssMeasurement_1_0::GnssMeasurementStatus::SUCCESS);
 
-    wait(kFirstGnssMeasurementTimeoutSeconds);
-    EXPECT_EQ(measurement_called_count_, 1);
+    IGnssMeasurementCallback_2_0::GnssData lastMeasurement;
+    ASSERT_TRUE(callback->measurement_cbq_.retrieve(lastMeasurement,
+                                                    kFirstGnssMeasurementTimeoutSeconds));
+    EXPECT_EQ(callback->measurement_cbq_.calledCount(), 1);
 
-    ASSERT_TRUE((int)last_measurement_.elapsedRealtime.flags <=
+    ASSERT_TRUE((int)lastMeasurement.elapsedRealtime.flags <=
                 (int)(ElapsedRealtimeFlags::HAS_TIMESTAMP_NS |
                       ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS));
 
     // We expect a non-zero timestamp when set.
-    if (last_measurement_.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
-        ASSERT_TRUE(last_measurement_.elapsedRealtime.timestampNs != 0);
+    if (lastMeasurement.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
+        ASSERT_TRUE(lastMeasurement.elapsedRealtime.timestampNs != 0);
     }
 
     iGnssMeasurement->close();
@@ -387,13 +386,13 @@
 TEST_F(GnssHalTest, TestGnssLocationElapsedRealtime) {
     StartAndCheckFirstLocation();
 
-    ASSERT_TRUE((int)last_location_.elapsedRealtime.flags <=
+    ASSERT_TRUE((int)gnss_cb_->last_location_.elapsedRealtime.flags <=
                 (int)(ElapsedRealtimeFlags::HAS_TIMESTAMP_NS |
                       ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS));
 
     // We expect a non-zero timestamp when set.
-    if (last_location_.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
-        ASSERT_TRUE(last_location_.elapsedRealtime.timestampNs != 0);
+    if (gnss_cb_->last_location_.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
+        ASSERT_TRUE(gnss_cb_->last_location_.elapsedRealtime.timestampNs != 0);
     }
 
     StopAndClearLocations();
@@ -402,23 +401,16 @@
 // This test only verify that injectBestLocation_2_0 does not crash.
 TEST_F(GnssHalTest, TestInjectBestLocation_2_0) {
     StartAndCheckFirstLocation();
-    gnss_hal_->injectBestLocation_2_0(last_location_);
+    gnss_hal_->injectBestLocation_2_0(gnss_cb_->last_location_);
     StopAndClearLocations();
 }
 
 /*
  * TestGnssBatchingExtension:
- * Gets the GnssBatchingExtension and verifies that it supports either the @1.0::IGnssBatching
- * or @2.0::IGnssBatching extension.
+ * Gets the @2.0::IGnssBatching extension and verifies that it doesn't return an error. Support
+ * for this interface is optional.
  */
 TEST_F(GnssHalTest, TestGnssBatchingExtension) {
-    auto gnssBatching_V2_0 = gnss_hal_->getExtensionGnssBatching_2_0();
-    ASSERT_TRUE(gnssBatching_V2_0.isOk());
-
-    auto gnssBatching_V1_0 = gnss_hal_->getExtensionGnssBatching();
-    ASSERT_TRUE(gnssBatching_V1_0.isOk());
-
-    sp<IGnssBatching_V1_0> iGnssBatching_V1_0 = gnssBatching_V1_0;
-    sp<IGnssBatching_V2_0> iGnssBatching_V2_0 = gnssBatching_V2_0;
-    ASSERT_TRUE(iGnssBatching_V1_0 != nullptr || iGnssBatching_V2_0 != nullptr);
+    auto gnssBatching_2_0 = gnss_hal_->getExtensionGnssBatching_2_0();
+    ASSERT_TRUE(gnssBatching_2_0.isOk());
 }
diff --git a/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp b/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
index 4018aea..3c408b7 100644
--- a/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
+++ b/graphics/composer/2.1/vts/functional/VtsHalGraphicsComposerV2_1TargetTest.cpp
@@ -850,10 +850,37 @@
     ASSERT_NO_FATAL_FAILURE(layer =
                                 mComposerClient->createLayer(mPrimaryDisplay, kBufferSlotCount));
 
+    auto handle = allocate();
+    ASSERT_NE(nullptr, handle);
+    IComposerClient::Rect displayFrame{0, 0, mDisplayWidth, mDisplayHeight};
+
     mWriter->selectDisplay(mPrimaryDisplay);
     mWriter->selectLayer(layer);
+    mWriter->setLayerBuffer(0, handle, -1);
+    mWriter->setLayerCompositionType(IComposerClient::Composition::CURSOR);
+    mWriter->setLayerDisplayFrame(displayFrame);
+    mWriter->setLayerPlaneAlpha(1);
+    mWriter->setLayerSourceCrop({0, 0, (float)mDisplayWidth, (float)mDisplayHeight});
+    mWriter->setLayerTransform(static_cast<Transform>(0));
+    mWriter->setLayerVisibleRegion(std::vector<IComposerClient::Rect>(1, displayFrame));
+    mWriter->setLayerZOrder(10);
+    mWriter->setLayerBlendMode(IComposerClient::BlendMode::NONE);
+    mWriter->setLayerSurfaceDamage(std::vector<IComposerClient::Rect>(1, displayFrame));
+    mWriter->setLayerDataspace(Dataspace::UNKNOWN);
+    mWriter->validateDisplay();
+
+    execute();
+    if (mReader->mCompositionChanges.size() != 0) {
+        GTEST_SUCCEED() << "Composition change requested, skipping test";
+        return;
+    }
+    mWriter->presentDisplay();
+    ASSERT_EQ(0, mReader->mErrors.size());
+
     mWriter->setLayerCursorPosition(1, 1);
     mWriter->setLayerCursorPosition(0, 0);
+    mWriter->validateDisplay();
+    mWriter->presentDisplay();
     execute();
 }
 
diff --git a/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp b/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
index 7834b94..9c80f4d 100644
--- a/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
+++ b/graphics/composer/2.2/vts/functional/VtsHalGraphicsComposerV2_2TargetTest.cpp
@@ -246,7 +246,19 @@
  * Test IComposerClient::getPerFrameMetadataKeys.
  */
 TEST_F(GraphicsComposerHidlTest, GetPerFrameMetadataKeys) {
-    mComposerClient->getPerFrameMetadataKeys(mPrimaryDisplay);
+    std::vector<IComposerClient::PerFrameMetadataKey> keys;
+    Error error = Error::NONE;
+    mComposerClient->getRaw()->getPerFrameMetadataKeys(
+            mPrimaryDisplay, [&](const auto& tmpError, const auto& tmpKeys) {
+                error = tmpError;
+                keys = tmpKeys;
+            });
+    if (error == Error::UNSUPPORTED) {
+        GTEST_SUCCEED() << "getPerFrameMetadataKeys is not supported";
+        return;
+    }
+    ASSERT_EQ(Error::NONE, error);
+    ASSERT_TRUE(keys.size() >= 0);
 }
 
 /**
diff --git a/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc b/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
index 08e32d8..81ce890 100644
--- a/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
+++ b/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
@@ -4,3 +4,4 @@
     group graphics drmrpc
     capabilities SYS_NICE
     onrestart restart surfaceflinger
+    writepid /dev/cpuset/system-background/tasks
diff --git a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
index 3792c2e..b289b6a 100644
--- a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
+++ b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerClient.h
@@ -95,7 +95,7 @@
 
     Return<void> getDisplayCapabilities(
         Display display, IComposerClient::getDisplayCapabilities_cb hidl_cb) override {
-        hidl_vec<IComposerClient::DisplayCapability> capabilities;
+        std::vector<IComposerClient::DisplayCapability> capabilities;
         Error error = mHal->getDisplayCapabilities(display, &capabilities);
         hidl_cb(error, capabilities);
         return Void();
diff --git a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
index 186b004..c3c4887 100644
--- a/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
+++ b/graphics/composer/2.3/utils/hal/include/composer-hal/2.3/ComposerHal.h
@@ -115,7 +115,7 @@
                                             hidl_vec<uint64_t>& sampleComponent2,
                                             hidl_vec<uint64_t>& sampleComponent3) = 0;
     virtual Error getDisplayCapabilities(
-        Display display, hidl_vec<IComposerClient::DisplayCapability>* outCapabilities) = 0;
+            Display display, std::vector<IComposerClient::DisplayCapability>* outCapabilities) = 0;
     virtual Error setLayerPerFrameMetadataBlobs(
         Display display, Layer layer,
         std::vector<IComposerClient::PerFrameMetadataBlob>& blobs) = 0;
diff --git a/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h b/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
index 4829e24..d3b29bb 100644
--- a/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
+++ b/graphics/composer/2.3/utils/passthrough/include/composer-passthrough/2.3/HwcHal.h
@@ -220,7 +220,8 @@
     }
 
     Error getDisplayCapabilities(
-        Display display, hidl_vec<IComposerClient::DisplayCapability>* outCapabilities) override {
+            Display display,
+            std::vector<IComposerClient::DisplayCapability>* outCapabilities) override {
         uint32_t count = 0;
         int32_t error = mDispatch.getDisplayCapabilities(mDevice, display, &count, nullptr);
         if (error != HWC2_ERROR_NONE) {
@@ -232,7 +233,7 @@
             reinterpret_cast<std::underlying_type<IComposerClient::DisplayCapability>::type*>(
                 outCapabilities->data()));
         if (error != HWC2_ERROR_NONE) {
-            *outCapabilities = hidl_vec<IComposerClient::DisplayCapability>();
+            *outCapabilities = std::vector<IComposerClient::DisplayCapability>();
             return static_cast<Error>(error);
         }
         return Error::NONE;
@@ -267,6 +268,19 @@
 
     Error getDisplayBrightnessSupport(Display display, bool* outSupport) {
         if (!mDispatch.getDisplayBrightnessSupport) {
+            // Preemptively set to false.
+            *outSupport = false;
+            // Try to query from getDisplayCapabilities.
+            std::vector<IComposerClient::DisplayCapability> capabilities;
+            Error error = getDisplayCapabilities(display, &capabilities);
+            if (error != Error::NONE) {
+                // This function is not registered, always return UNSUPPORTED.
+                return Error::UNSUPPORTED;
+            }
+            *outSupport =
+                    std::find(capabilities.begin(), capabilities.end(),
+                              IComposerClient::DisplayCapability::BRIGHTNESS) != capabilities.end();
+            // This function is not registered, always return UNSUPPORTED.
             return Error::UNSUPPORTED;
         }
         bool support = false;
diff --git a/graphics/composer/2.3/utils/vts/ComposerVts.cpp b/graphics/composer/2.3/utils/vts/ComposerVts.cpp
index b763209..d4f5b3a 100644
--- a/graphics/composer/2.3/utils/vts/ComposerVts.cpp
+++ b/graphics/composer/2.3/utils/vts/ComposerVts.cpp
@@ -192,10 +192,8 @@
 
 bool ComposerClient::getDisplayBrightnessSupport(Display display) {
     bool support = false;
-    mClient->getDisplayBrightnessSupport(display, [&](const auto& error, const auto& tmpSupport) {
-        ASSERT_EQ(Error::NONE, error) << "failed to get brightness support";
-        support = tmpSupport;
-    });
+    mClient->getDisplayBrightnessSupport(
+            display, [&](const auto& /*error*/, const auto& tmpSupport) { support = tmpSupport; });
     return support;
 }
 
diff --git a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
index b704fdb..18fbb6d 100644
--- a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
+++ b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
@@ -34,34 +34,48 @@
 template <typename Hal>
 class Gralloc0HalImpl : public V2_0::passthrough::detail::Gralloc0HalImpl<Hal> {
    public:
-    Error validateBufferSize(const native_handle_t* /*bufferHandle*/,
-                             const IMapper::BufferDescriptorInfo& /*descriptorInfo*/,
-                             uint32_t /*stride*/) override {
-        // need a gralloc0 extension to really validate
-        return Error::NONE;
-    }
+     Error validateBufferSize(const native_handle_t* bufferHandle,
+                              const IMapper::BufferDescriptorInfo& descriptorInfo,
+                              uint32_t stride) override {
+         if (!mModule->validateBufferSize) {
+             return Error::NONE;
+         }
 
-    Error getTransportSize(const native_handle_t* bufferHandle, uint32_t* outNumFds,
-                           uint32_t* outNumInts) override {
-        // need a gralloc0 extension to get the transport size
-        *outNumFds = bufferHandle->numFds;
-        *outNumInts = bufferHandle->numInts;
-        return Error::NONE;
+         int32_t ret = mModule->validateBufferSize(
+                 mModule, bufferHandle, descriptorInfo.width, descriptorInfo.height,
+                 static_cast<int32_t>(descriptorInfo.format),
+                 static_cast<uint64_t>(descriptorInfo.usage), stride);
+         return static_cast<Error>(ret);
+     }
+     Error getTransportSize(const native_handle_t* bufferHandle, uint32_t* outNumFds,
+                            uint32_t* outNumInts) override {
+         if (!mModule->getTransportSize) {
+             *outNumFds = bufferHandle->numFds;
+             *outNumInts = bufferHandle->numInts;
+             return Error::NONE;
+         }
+
+         int32_t ret = mModule->getTransportSize(mModule, bufferHandle, outNumFds, outNumInts);
+         return static_cast<Error>(ret);
     }
 
     Error createDescriptor_2_1(const IMapper::BufferDescriptorInfo& descriptorInfo,
                                BufferDescriptor* outDescriptor) override {
         return createDescriptor(
-            V2_0::IMapper::BufferDescriptorInfo{
-                descriptorInfo.width, descriptorInfo.height, descriptorInfo.layerCount,
-                static_cast<common::V1_0::PixelFormat>(descriptorInfo.format), descriptorInfo.usage,
-            },
-            outDescriptor);
+                V2_0::IMapper::BufferDescriptorInfo{
+                        descriptorInfo.width,
+                        descriptorInfo.height,
+                        descriptorInfo.layerCount,
+                        static_cast<common::V1_0::PixelFormat>(descriptorInfo.format),
+                        descriptorInfo.usage,
+                },
+                outDescriptor);
     }
 
    private:
     using BaseType2_0 = V2_0::passthrough::detail::Gralloc0HalImpl<Hal>;
     using BaseType2_0::createDescriptor;
+    using BaseType2_0::mModule;
 };
 
 }  // namespace detail
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index fc96724..3d37e9f 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -371,7 +371,7 @@
             strptime(date.c_str(), "%Y-%m-%d", &time);
 
             // Day of the month (0-31)
-            EXPECT_GT(time.tm_mday, 0);
+            EXPECT_GE(time.tm_mday, 0);
             EXPECT_LT(time.tm_mday, 32);
             // Months since Jan (0-11)
             EXPECT_GE(time.tm_mon, 0);
@@ -414,7 +414,7 @@
     EXPECT_NE(strcmp(property_value, "nogood"), 0);
     string prop_string(property_value);
     EXPECT_EQ(prop_string.size(), 64);
-    EXPECT_EQ(0, memcmp(verified_boot_hash.data(), prop_string.data(), verified_boot_hash.size()));
+    EXPECT_EQ(prop_string, bin2hex(verified_boot_hash));
 
     property_get("ro.boot.vbmeta.device_state", property_value, "nogood");
     EXPECT_NE(property_value, "nogood");
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index a358946..02db063 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -858,20 +858,21 @@
      *   elements of the input matrices.
      *
      * The operation has the following independently optional inputs:
+     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+     *   have values or neither of them have values (i.e., all set to null). If
+     *   they have values, the peephole optimization is used.
      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
-     *   (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
-     *   bias (\f$b_i\f$) either all have values, or none of them have values
-     *   (i.e., all set to null). If they have no values, coupling of input and
-     *   forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
-     *   is calculated using the following equation instead.
+     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+     *   or none of them have values. If they have no values, coupling of input
+     *   and forget gates (CIFG) is used, in which case the input gate
+     *   (\f$i_t\f$) is calculated using the following equation instead.
      *   \f{eqnarray*}{
      *   i_t = 1 - f_t
      *   \f}
-     * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
-     *   (\f$W_{co}\f$) either both have values or neither of them have values.
-     *   If they have values, the peephole optimization is used. Additionally,
-     *   if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
-     *   required to have values for peephole optimization.
+     *   In case peephole optimization is used and CIFG is not used
+     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+     *   cell-to-input weights must have no value.
      * * The projection weights (\f$W_{proj}\f$) is required only for the
      *   recurrent projection layer, and should otherwise have no value.
      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
@@ -984,8 +985,8 @@
      * Outputs:
      * * 0: The scratch buffer.
      *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
-     *      [batch_size, num_units * 4] with CIFG, or
-     *      [batch_size, num_units * 3] without CIFG.
+     *      [batch_size, num_units * 3] with CIFG, or
+     *      [batch_size, num_units * 4] without CIFG.
      * * 1: The output state (out) (\f$h_t\f$).
      *      A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
      *      [batch_size, output_size].
@@ -1225,9 +1226,9 @@
      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
      *      the input.
      * * 1: An {@link OperandType::INT32} scalar, specifying the output
-     *      height of the output tensor.
-     * * 2: An {@link OperandType::INT32} scalar, specifying the output
      *      width of the output tensor.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
      *
      * Outputs:
      * * 0: The output 4-D tensor, of shape
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 72a5007..f0c93b7 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
 namespace functional {
 
 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
@@ -42,53 +41,6 @@
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
-                                sp<IPreparedModel>* preparedModel) {
-    ASSERT_NE(nullptr, preparedModel);
-
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
-        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-            ASSERT_EQ(ErrorStatus::NONE, status);
-            ASSERT_NE(0ul, supported.size());
-            fullySupportsModel =
-                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
-        });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = preparedModelCallback->getPreparedModel();
-
-    // The getSupportedOperations call returns a list of operations that are
-    // guaranteed not to fail if prepareModel is called, and
-    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
-    // If a driver has any doubt that it can prepare an operation, it must
-    // return false. So here, if a driver isn't sure if it can support an
-    // operation, but reports that it successfully prepared the model, the test
-    // can continue.
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
-                  << std::endl;
-        return;
-    }
-    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
-}
-
 // Primary validation function. This function will take a valid request, apply a
 // mutation to it to invalidate the request, then pass it to interface calls
 // that use the request. Note that the request here is passed by value, and any
@@ -237,15 +189,8 @@
     return requests;
 }
 
-void ValidationTest::validateRequests(const V1_0::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                       const std::vector<Request>& requests) {
-    // create IPreparedModel
-    sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
-
     // validate each request
     for (const Request& request : requests) {
         removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index 8883057..aee2f85 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -25,6 +29,55 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
+                                sp<IPreparedModel>* preparedModel) {
+    ASSERT_NE(nullptr, preparedModel);
+
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    *preparedModel = preparedModelCallback->getPreparedModel();
+
+    // The getSupportedOperations call returns a list of operations that are
+    // guaranteed not to fail if prepareModel is called, and
+    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+    // If a driver has any doubt that it can prepare an operation, it must
+    // return false. So here, if a driver isn't sure if it can support an
+    // operation, but reports that it successfully prepared the model, the test
+    // can continue.
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Unable to test Request validation because vendor service "
+                     "cannot prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
@@ -68,6 +121,19 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
+    validateModel(model);
+
+    // create IPreparedModel
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequests(preparedModel, requests);
+}
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_0
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index d4c114d..22285be 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -63,8 +63,12 @@
 // Tag for the validation tests
 class ValidationTest : public NeuralnetworksHidlTest {
    protected:
-    void validateModel(const Model& model);
-    void validateRequests(const Model& model, const std::vector<Request>& request);
+     void validateEverything(const Model& model, const std::vector<Request>& request);
+
+   private:
+     void validateModel(const Model& model);
+     void validateRequests(const sp<IPreparedModel>& preparedModel,
+                           const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 335b803..73705bb 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -138,7 +138,7 @@
      *
      * Supported tensor {@link OperandType}:
      * * {@link OperandType::TENSOR_FLOAT32}
-     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (the pad value is undefined)
      *
      * Supported tensor rank: up to 4
      *
@@ -161,6 +161,9 @@
      *          output0.dimension[i] =
      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
      *
+     *      NOTE: The pad value for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *      is undefined.
+     *
      * Available since API level 28.
      */
     PAD = 32,
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index 5225bf7..f4adbab 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
 namespace functional {
 
 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
 using test_helper::for_all;
 using test_helper::MixedTyped;
@@ -42,54 +41,6 @@
 
 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
 
-static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
-                                sp<IPreparedModel>* preparedModel) {
-    ASSERT_NE(nullptr, preparedModel);
-
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
-        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-            ASSERT_EQ(ErrorStatus::NONE, status);
-            ASSERT_NE(0ul, supported.size());
-            fullySupportsModel =
-                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
-        });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
-        model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = preparedModelCallback->getPreparedModel();
-
-    // The getSupportedOperations_1_1 call returns a list of operations that are
-    // guaranteed not to fail if prepareModel_1_1 is called, and
-    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
-    // If a driver has any doubt that it can prepare an operation, it must
-    // return false. So here, if a driver isn't sure if it can support an
-    // operation, but reports that it successfully prepared the model, the test
-    // can continue.
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
-                  << std::endl;
-        return;
-    }
-    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
-}
-
 // Primary validation function. This function will take a valid request, apply a
 // mutation to it to invalidate the request, then pass it to interface calls
 // that use the request. Note that the request here is passed by value, and any
@@ -238,15 +189,8 @@
     return requests;
 }
 
-void ValidationTest::validateRequests(const V1_1::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
                                       const std::vector<Request>& requests) {
-    // create IPreparedModel
-    sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
-
     // validate each request
     for (const Request& request : requests) {
         removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 224a51d..08069f2 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -25,6 +29,56 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
+                                sp<IPreparedModel>* preparedModel) {
+    ASSERT_NE(nullptr, preparedModel);
+
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+            model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    *preparedModel = preparedModelCallback->getPreparedModel();
+
+    // The getSupportedOperations_1_1 call returns a list of operations that are
+    // guaranteed not to fail if prepareModel_1_1 is called, and
+    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+    // If a driver has any doubt that it can prepare an operation, it must
+    // return false. So here, if a driver isn't sure if it can support an
+    // operation, but reports that it successfully prepared the model, the test
+    // can continue.
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Unable to test Request validation because vendor service "
+                     "cannot prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
@@ -68,6 +122,19 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
+    validateModel(model);
+
+    // create IPreparedModel
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequests(preparedModel, requests);
+}
+
 }  // namespace functional
 }  // namespace vts
 }  // namespace V1_1
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index 1c8c0e1..f3f587b 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -72,8 +72,12 @@
 // Tag for the validation tests
 class ValidationTest : public NeuralnetworksHidlTest {
    protected:
-    void validateModel(const Model& model);
-    void validateRequests(const Model& model, const std::vector<Request>& request);
+     void validateEverything(const Model& model, const std::vector<Request>& request);
+
+   private:
+     void validateModel(const Model& model);
+     void validateRequests(const sp<IPreparedModel>& preparedModel,
+                           const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 087249a..f368ce2 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -1177,20 +1177,21 @@
      * https://arxiv.org/pdf/1607.06450.pdf
      *
      * The operation has the following independently optional inputs:
+     * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+     *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+     *   have values or neither of them have values (i.e., all set to null). If
+     *   they have values, the peephole optimization is used.
      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
-     *   (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
-     *   bias (\f$b_i\f$) either all have values, or none of them have values
-     *   (i.e., all set to null). If they have no values, coupling of input and
-     *   forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
-     *   is calculated using the following equation instead.
+     *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+     *   or none of them have values. If they have no values, coupling of input
+     *   and forget gates (CIFG) is used, in which case the input gate
+     *   (\f$i_t\f$) is calculated using the following equation instead.
      *   \f{eqnarray*}{
      *   i_t = 1 - f_t
      *   \f}
-     * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
-     *   (\f$W_{co}\f$) either both have values or neither of them have values.
-     *   If they have values, the peephole optimization is used. Additionally,
-     *   if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
-     *   required to have values for peephole optimization.
+     *   In case peephole optimization is used and CIFG is not used
+     *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+     *   cell-to-input weights must have no value.
      * * The projection weights (\f$W_{proj}\f$) is required only for the
      *   recurrent projection layer, and should otherwise have no value.
      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
@@ -1593,9 +1594,9 @@
      *      the input. Since API level 29, zero batches is supported for this
      *      tensor.
      * * 1: An {@link OperandType::INT32} scalar, specifying the output
-     *      height of the output tensor.
-     * * 2: An {@link OperandType::INT32} scalar, specifying the output
      *      width of the output tensor.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
      * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
      *      Set to true to specify NCHW data layout for input0 and output0.
      *      Available since API level 29.
@@ -1603,15 +1604,15 @@
      * Inputs (resizing by scale, since API level 29):
      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
      *      the input. Zero batches is supported for this tensor.
-     * * 1: A scalar, specifying height_scale, the scaling factor of the height
+     * * 1: A scalar, specifying width_scale, the scaling factor of the width
      *      dimension from the input tensor to the output tensor. The output
-     *      height is calculated as new_height = floor(height * height_scale).
+     *      width is calculated as new_width = floor(width * width_scale).
      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
      *      of {@link OperandType::TENSOR_FLOAT16} and of
      *      {@link OperandType::FLOAT32} otherwise.
-     * * 2: A scalar, specifying width_scale, the scaling factor of the width
+     * * 2: A scalar, specifying height_scale, the scaling factor of the height
      *      dimension from the input tensor to the output tensor. The output
-     *      width is calculated as new_width = floor(width * width_scale).
+     *      height is calculated as new_height = floor(height * height_scale).
      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
      *      of {@link OperandType::TENSOR_FLOAT16} and of
      *      {@link OperandType::FLOAT32} otherwise.
@@ -1999,7 +2000,8 @@
      * Supported tensor {@link OperandType}:
      * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
      * * {@link OperandType::TENSOR_FLOAT32}
-     * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM} (full support since API
+     *   level 29, see the output section)
      *
      * Supported tensor rank: up to 4
      *
@@ -2022,6 +2024,10 @@
      *          output0.dimension[i] =
      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
      *
+     *      NOTE: Before API level 29, the pad value for
+     *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
+     *      Since API level 29, the pad value is always the logical zero.
+     *
      * Available since API level 28.
      */
     PAD = @1.1::OperationType:PAD,
@@ -3531,6 +3537,8 @@
      * * {@link OperandType::TENSOR_INT32}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: A tensor.
      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
@@ -3552,6 +3560,8 @@
      * * {@link OperandType::TENSOR_INT32}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
      *
+     * Supported tensor rank: from 1.
+     *
      * Inputs:
      * * 0: A tensor.
      * * 1: A tensor of the same {@link OperandType} and compatible dimensions
@@ -4659,24 +4669,24 @@
      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
      *      the input. Zero batches is supported for this tensor.
      * * 1: An {@link OperandType::INT32} scalar, specifying the output
-     *      height of the output tensor.
-     * * 2: An {@link OperandType::INT32} scalar, specifying the output
      *      width of the output tensor.
+     * * 2: An {@link OperandType::INT32} scalar, specifying the output
+     *      height of the output tensor.
      * * 3: An {@link OperandType::BOOL} scalar, default to false.
      *      Set to true to specify NCHW data layout for input0 and output0.
      *
      * Inputs (resizing by scale):
      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
      *      the input. Zero batches is supported for this tensor.
-     * * 1: A scalar, specifying height_scale, the scaling factor of the height
+     * * 1: A scalar, specifying width_scale, the scaling factor of the width
      *      dimension from the input tensor to the output tensor. The output
-     *      height is calculated as new_height = floor(height * height_scale).
+     *      width is calculated as new_width = floor(width * width_scale).
      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
      *      of {@link OperandType::TENSOR_FLOAT16} and of
      *      {@link OperandType::FLOAT32} otherwise.
-     * * 2: A scalar, specifying width_scale, the scaling factor of the width
+     * * 2: A scalar, specifying height_scale, the scaling factor of the height
      *      dimension from the input tensor to the output tensor. The output
-     *      width is calculated as new_width = floor(width * width_scale).
+     *      height is calculated as new_height = floor(height * height_scale).
      *      The scalar must be of {@link OperandType::FLOAT16} if input0 is
      *      of {@link OperandType::TENSOR_FLOAT16} and of
      *      {@link OperandType::FLOAT32} otherwise.
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 891b414..6c26820 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -20,6 +20,7 @@
     defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
     srcs: [
         "GeneratedTestsV1_0.cpp",
+        "ValidateBurst.cpp",
     ],
     cflags: [
         "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
@@ -32,6 +33,7 @@
     defaults: ["VtsHalNeuralNetworksTargetTestDefaults"],
     srcs: [
         "GeneratedTestsV1_1.cpp",
+        "ValidateBurst.cpp",
     ],
     cflags: [
         "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
@@ -46,6 +48,7 @@
         "BasicTests.cpp",
         "CompilationCachingTests.cpp",
         "GeneratedTests.cpp",
+        "ValidateBurst.cpp",
     ],
     cflags: [
         "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE"
@@ -58,6 +61,7 @@
     srcs: [
         "BasicTests.cpp",
         "GeneratedTests.cpp",
+        "ValidateBurst.cpp",
     ],
     cflags: [
         "-DNN_TEST_DYNAMIC_OUTPUT_SHAPE",
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 167fc09..4411b90 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -16,21 +16,22 @@
 
 #define LOG_TAG "neuralnetworks_hidl_hal_test"
 
-#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <ftw.h>
+#include <gtest/gtest.h>
+#include <hidlmemory/mapping.h>
+#include <unistd.h>
+
+#include <cstdio>
+#include <cstdlib>
+#include <random>
 
 #include "Callbacks.h"
 #include "GeneratedTestHarness.h"
 #include "TestHarness.h"
 #include "Utils.h"
-
-#include <android-base/logging.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidlmemory/mapping.h>
-#include <cstdio>
-#include <cstdlib>
-#include <random>
-
-#include <gtest/gtest.h>
+#include "VtsHalNeuralnetworks.h"
 
 namespace android {
 namespace hardware {
@@ -44,9 +45,9 @@
 using ::android::nn::allocateSharedMemory;
 using ::test_helper::MixedTypedExample;
 
-namespace {
+namespace float32_model {
 
-// In frameworks/ml/nn/runtime/tests/generated/, creates a hidl model of mobilenet.
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of float32 mobilenet.
 #include "examples/mobilenet_224_gender_basic_fixed.example.cpp"
 #include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp"
 
@@ -54,6 +55,44 @@
 [[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
 [[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
 
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+    float outputValue = 1.0f + static_cast<float>(len);
+    return {{.operands = {
+                     // Input
+                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
+                     // Output
+                     {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
+}
+
+}  // namespace float32_model
+
+namespace quant8_model {
+
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of quant8 mobilenet.
+#include "examples/mobilenet_quantized.example.cpp"
+#include "vts_models/mobilenet_quantized.model.cpp"
+
+// Prevent the compiler from complaining about an otherwise unused function.
+[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
+[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
+
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+    uint8_t outputValue = 1 + static_cast<uint8_t>(len);
+    return {{.operands = {// Input
+                          {.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}},
+                          // Output
+                          {.operandDimensions = {{0, {1}}},
+                           .quant8AsymmOperands = {{0, {outputValue}}}}}}};
+}
+
+}  // namespace quant8_model
+
+namespace {
+
 enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
 
 // Creates cache handles based on provided file groups.
@@ -89,11 +128,137 @@
     createCacheHandles(fileGroups, std::vector<AccessMode>(fileGroups.size(), mode), handles);
 }
 
+// Create a chain of broadcast operations. The second operand is always constant tensor [1].
+// For simplicity, activation scalar is shared. The second operand is not shared
+// in the model to let driver maintain a non-trivial size of constant data and the corresponding
+// data locations in cache.
+//
+//                --------- activation --------
+//                ↓      ↓      ↓             ↓
+// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output
+//                ↑      ↑      ↑             ↑
+//               [1]    [1]    [1]           [1]
+//
+// This function assumes the operation is either ADD or MUL.
+template <typename CppType, OperandType operandType>
+Model createLargeTestModelImpl(OperationType op, uint32_t len) {
+    EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL);
+
+    // Model operations and operands.
+    std::vector<Operation> operations(len);
+    std::vector<Operand> operands(len * 2 + 2);
+
+    // The constant buffer pool. This contains the activation scalar, followed by the
+    // per-operation constant operands.
+    std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(CppType));
+
+    // The activation scalar, value = 0.
+    operands[0] = {
+            .type = OperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = len,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = sizeof(int32_t)},
+    };
+    memset(operandValues.data(), 0, sizeof(int32_t));
+
+    // The buffer value of the constant second operand. The logical value is always 1.0f.
+    CppType bufferValue;
+    // The scale of the first and second operand.
+    float scale1, scale2;
+    if (operandType == OperandType::TENSOR_FLOAT32) {
+        bufferValue = 1.0f;
+        scale1 = 0.0f;
+        scale2 = 0.0f;
+    } else if (op == OperationType::ADD) {
+        bufferValue = 1;
+        scale1 = 1.0f;
+        scale2 = 1.0f;
+    } else {
+        // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale,
+        // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point.
+        bufferValue = 2;
+        scale1 = 1.0f;
+        scale2 = 0.5f;
+    }
+
+    for (uint32_t i = 0; i < len; i++) {
+        const uint32_t firstInputIndex = i * 2 + 1;
+        const uint32_t secondInputIndex = firstInputIndex + 1;
+        const uint32_t outputIndex = secondInputIndex + 1;
+
+        // The first operation input.
+        operands[firstInputIndex] = {
+                .type = operandType,
+                .dimensions = {1},
+                .numberOfConsumers = 1,
+                .scale = scale1,
+                .zeroPoint = 0,
+                .lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
+                                    : OperandLifeTime::TEMPORARY_VARIABLE),
+                .location = {},
+        };
+
+        // The second operation input, value = 1.
+        operands[secondInputIndex] = {
+                .type = operandType,
+                .dimensions = {1},
+                .numberOfConsumers = 1,
+                .scale = scale2,
+                .zeroPoint = 0,
+                .lifetime = OperandLifeTime::CONSTANT_COPY,
+                .location = {.poolIndex = 0,
+                             .offset = static_cast<uint32_t>(i * sizeof(CppType) + sizeof(int32_t)),
+                             .length = sizeof(CppType)},
+        };
+        memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue,
+               sizeof(CppType));
+
+        // The operation. All operations share the same activation scalar.
+        // The output operand is created as an input in the next iteration of the loop, in the case
+        // of all but the last member of the chain; and after the loop as a model output, in the
+        // case of the last member of the chain.
+        operations[i] = {
+                .type = op,
+                .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0},
+                .outputs = {outputIndex},
+        };
+    }
+
+    // The model output.
+    operands.back() = {
+            .type = operandType,
+            .dimensions = {1},
+            .numberOfConsumers = 0,
+            .scale = scale1,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {},
+    };
+
+    const std::vector<uint32_t> inputIndexes = {1};
+    const std::vector<uint32_t> outputIndexes = {len * 2 + 1};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+            .operands = operands,
+            .operations = operations,
+            .inputIndexes = inputIndexes,
+            .outputIndexes = outputIndexes,
+            .operandValues = operandValues,
+            .pools = pools,
+    };
+}
+
 }  // namespace
 
 // Tag for the compilation caching tests.
-class CompilationCachingTest : public NeuralnetworksHidlTest {
+class CompilationCachingTestBase : public NeuralnetworksHidlTest {
   protected:
+    CompilationCachingTestBase(OperandType type) : kOperandType(type) {}
+
     void SetUp() override {
         NeuralnetworksHidlTest::SetUp();
         ASSERT_NE(device.get(), nullptr);
@@ -139,21 +304,53 @@
     }
 
     void TearDown() override {
-        // The tmp directory is only removed when the driver reports caching not supported,
-        // otherwise it is kept for debugging purpose.
-        if (!mIsCachingSupported) {
-            remove(mTmpCache.c_str());
-            rmdir(mCacheDir.c_str());
+        // If the test passes, remove the tmp directory.  Otherwise, keep it for debugging purposes.
+        if (!::testing::Test::HasFailure()) {
+            // Recursively remove the cache directory specified by mCacheDir.
+            auto callback = [](const char* entry, const struct stat*, int, struct FTW*) {
+                return remove(entry);
+            };
+            nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
         }
         NeuralnetworksHidlTest::TearDown();
     }
 
-    void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
-                          const hidl_vec<hidl_handle>& dataCache, bool* supported,
-                          sp<IPreparedModel>* preparedModel = nullptr) {
-        if (preparedModel != nullptr) *preparedModel = nullptr;
+    // Model and examples creators. According to kOperandType, the following methods will return
+    // either float32 model/examples or the quant8 variant.
+    Model createTestModel() {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::createTestModel();
+        } else {
+            return quant8_model::createTestModel();
+        }
+    }
 
-        // See if service can handle model.
+    std::vector<MixedTypedExample> get_examples() {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::get_examples();
+        } else {
+            return quant8_model::get_examples();
+        }
+    }
+
+    Model createLargeTestModel(OperationType op, uint32_t len) {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return createLargeTestModelImpl<float, OperandType::TENSOR_FLOAT32>(op, len);
+        } else {
+            return createLargeTestModelImpl<uint8_t, OperandType::TENSOR_QUANT8_ASYMM>(op, len);
+        }
+    }
+
+    std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+        if (kOperandType == OperandType::TENSOR_FLOAT32) {
+            return float32_model::getLargeModelExamples(len);
+        } else {
+            return quant8_model::getLargeModelExamples(len);
+        }
+    }
+
+    // See if the service can handle the model.
+    bool isModelFullySupported(const V1_2::Model& model) {
         bool fullySupportsModel = false;
         Return<void> supportedCall = device->getSupportedOperations_1_2(
                 model,
@@ -163,9 +360,14 @@
                     fullySupportsModel = std::all_of(supported.begin(), supported.end(),
                                                      [](bool valid) { return valid; });
                 });
-        ASSERT_TRUE(supportedCall.isOk());
-        *supported = fullySupportsModel;
-        if (!fullySupportsModel) return;
+        EXPECT_TRUE(supportedCall.isOk());
+        return fullySupportsModel;
+    }
+
+    void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
+                          const hidl_vec<hidl_handle>& dataCache,
+                          sp<IPreparedModel>* preparedModel = nullptr) {
+        if (preparedModel != nullptr) *preparedModel = nullptr;
 
         // Launch prepare model.
         sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
@@ -199,8 +401,8 @@
         return false;
     }
 
-    bool checkEarlyTermination(bool supported) {
-        if (!supported) {
+    bool checkEarlyTermination(const V1_2::Model& model) {
+        if (!isModelFullySupported(model)) {
             LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                          "prepare model that it does not support.";
             std::cout << "[          ]   Early termination of test because vendor service cannot "
@@ -250,21 +452,31 @@
     uint32_t mNumModelCache;
     uint32_t mNumDataCache;
     uint32_t mIsCachingSupported;
+
+    // The primary data type of the testModel.
+    const OperandType kOperandType;
 };
 
-TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
+// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
+// pass running with float32 models and the second pass running with quant8 models.
+class CompilationCachingTest : public CompilationCachingTestBase,
+                               public ::testing::WithParamInterface<OperandType> {
+  protected:
+    CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
+};
+
+TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
     sp<IPreparedModel> preparedModel = nullptr;
 
     // Save the compilation to cache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache);
     }
 
     // Retrieve preparedModel from cache.
@@ -294,14 +506,14 @@
                                            /*testDynamicOutputShape=*/false);
 }
 
-TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
+TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
     sp<IPreparedModel> preparedModel = nullptr;
 
     // Save the compilation to cache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
@@ -318,8 +530,7 @@
                     write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
                     sizeof(dummyBytes));
         }
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache);
     }
 
     // Retrieve preparedModel from cache.
@@ -358,13 +569,13 @@
                                            /*testDynamicOutputShape=*/false);
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
 
     // Test with number of model cache files greater than mNumModelCache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an additional cache file for model cache.
         mModelCache.push_back({mTmpCache});
@@ -372,8 +583,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mModelCache.pop_back();
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -392,7 +602,6 @@
 
     // Test with number of model cache files smaller than mNumModelCache.
     if (mModelCache.size() > 0) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pop out the last cache file.
         auto tmp = mModelCache.back();
@@ -401,8 +610,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mModelCache.push_back(tmp);
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -421,7 +629,6 @@
 
     // Test with number of data cache files greater than mNumDataCache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an additional cache file for data cache.
         mDataCache.push_back({mTmpCache});
@@ -429,8 +636,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mDataCache.pop_back();
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -449,7 +655,6 @@
 
     // Test with number of data cache files smaller than mNumDataCache.
     if (mDataCache.size() > 0) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pop out the last cache file.
         auto tmp = mDataCache.back();
@@ -458,8 +663,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mDataCache.push_back(tmp);
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -477,18 +681,17 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
 
     // Save the compilation to cache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache);
     }
 
     // Test with number of model cache files greater than mNumModelCache.
@@ -558,13 +761,13 @@
     }
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
 
     // Go through each handle in model cache, test with NumFd greater than 1.
     for (uint32_t i = 0; i < mNumModelCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an invalid number of fds for handle i.
         mModelCache[i].push_back(mTmpCache);
@@ -572,8 +775,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mModelCache[i].pop_back();
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -592,7 +794,6 @@
 
     // Go through each handle in model cache, test with NumFd equal to 0.
     for (uint32_t i = 0; i < mNumModelCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an invalid number of fds for handle i.
         auto tmp = mModelCache[i].back();
@@ -601,8 +802,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mModelCache[i].push_back(tmp);
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -621,7 +821,6 @@
 
     // Go through each handle in data cache, test with NumFd greater than 1.
     for (uint32_t i = 0; i < mNumDataCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an invalid number of fds for handle i.
         mDataCache[i].push_back(mTmpCache);
@@ -629,8 +828,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mDataCache[i].pop_back();
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -649,7 +847,6 @@
 
     // Go through each handle in data cache, test with NumFd equal to 0.
     for (uint32_t i = 0; i < mNumDataCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         // Pass an invalid number of fds for handle i.
         auto tmp = mDataCache[i].back();
@@ -658,8 +855,7 @@
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
         mDataCache[i].push_back(tmp);
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -677,18 +873,17 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
 
     // Save the compilation to cache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache);
     }
 
     // Go through each handle in model cache, test with NumFd greater than 1.
@@ -758,23 +953,22 @@
     }
 }
 
-TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
     std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
     std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
 
     // Go through each handle in model cache, test with invalid access mode.
     for (uint32_t i = 0; i < mNumModelCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         modelCacheMode[i] = AccessMode::READ_ONLY;
         createCacheHandles(mModelCache, modelCacheMode, &modelCache);
         createCacheHandles(mDataCache, dataCacheMode, &dataCache);
         modelCacheMode[i] = AccessMode::READ_WRITE;
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -793,15 +987,13 @@
 
     // Go through each handle in data cache, test with invalid access mode.
     for (uint32_t i = 0; i < mNumDataCache; i++) {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         dataCacheMode[i] = AccessMode::READ_ONLY;
         createCacheHandles(mModelCache, modelCacheMode, &modelCache);
         createCacheHandles(mDataCache, dataCacheMode, &dataCache);
         dataCacheMode[i] = AccessMode::READ_WRITE;
         sp<IPreparedModel> preparedModel = nullptr;
-        saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
         generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
@@ -819,20 +1011,19 @@
     }
 }
 
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
     // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    const Model testModel = createTestModel();
+    if (checkEarlyTermination(testModel)) return;
     std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
     std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
 
     // Save the compilation to cache.
     {
-        bool supported;
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModel, modelCache, dataCache);
     }
 
     // Go through each handle in model cache, test with invalid access mode.
@@ -864,129 +1055,198 @@
     }
 }
 
-class CompilationCachingSecurityTest : public CompilationCachingTest,
-                                       public ::testing::WithParamInterface<uint32_t> {
-  protected:
-    void SetUp() {
-        CompilationCachingTest::SetUp();
-        generator.seed(kSeed);
-    }
+// Copy file contents between file groups.
+// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
+// The outer vector sizes must match and the inner vectors must have size = 1.
+static void copyCacheFiles(const std::vector<std::vector<std::string>>& from,
+                           const std::vector<std::vector<std::string>>& to) {
+    constexpr size_t kBufferSize = 1000000;
+    uint8_t buffer[kBufferSize];
 
-    // Get a random integer within a closed range [lower, upper].
-    template <typename T>
-    T getRandomInt(T lower, T upper) {
-        std::uniform_int_distribution<T> dis(lower, upper);
-        return dis(generator);
-    }
+    ASSERT_EQ(from.size(), to.size());
+    for (uint32_t i = 0; i < from.size(); i++) {
+        ASSERT_EQ(from[i].size(), 1u);
+        ASSERT_EQ(to[i].size(), 1u);
+        int fromFd = open(from[i][0].c_str(), O_RDONLY);
+        int toFd = open(to[i][0].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+        ASSERT_GE(fromFd, 0);
+        ASSERT_GE(toFd, 0);
 
-    const uint32_t kSeed = GetParam();
-    std::mt19937 generator;
-};
-
-TEST_P(CompilationCachingSecurityTest, CorruptedSecuritySensitiveCache) {
-    if (!mIsCachingSupported) return;
-
-    // Create test HIDL model and compile.
-    Model testModel = createTestModel();
-
-    for (uint32_t i = 0; i < mNumModelCache; i++) {
-        // Save the compilation to cache.
-        {
-            bool supported;
-            hidl_vec<hidl_handle> modelCache, dataCache;
-            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
-            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-            saveModelToCache(testModel, modelCache, dataCache, &supported);
-            if (checkEarlyTermination(supported)) return;
+        ssize_t readBytes;
+        while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) {
+            ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes);
         }
+        ASSERT_GE(readBytes, 0);
 
-        // Randomly flip one single bit of the cache entry.
-        FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+");
-        ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
-        long int fileSize = ftell(pFile);
-        if (fileSize == 0) {
-            fclose(pFile);
-            continue;
-        }
-        ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
-        int readByte = fgetc(pFile);
-        ASSERT_NE(readByte, EOF);
-        ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
-        ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
-        fclose(pFile);
-
-        // Retrieve preparedModel from cache, expect failure.
-        {
-            sp<IPreparedModel> preparedModel = nullptr;
-            ErrorStatus status;
-            hidl_vec<hidl_handle> modelCache, dataCache;
-            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
-            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-            prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
-            ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
-            ASSERT_EQ(preparedModel, nullptr);
-        }
+        close(fromFd);
+        close(toFd);
     }
 }
 
-TEST_P(CompilationCachingSecurityTest, WrongLengthSecuritySensitiveCache) {
+// Number of operations in the large test model.
+constexpr uint32_t kLargeModelSize = 100;
+constexpr uint32_t kNumIterationsTOCTOU = 100;
+
+TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
     if (!mIsCachingSupported) return;
 
-    // Create test HIDL model and compile.
-    Model testModel = createTestModel();
+    // Create test models and check if fully supported by the service.
+    const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+    if (checkEarlyTermination(testModelMul)) return;
+    const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+    if (checkEarlyTermination(testModelAdd)) return;
 
-    for (uint32_t i = 0; i < mNumModelCache; i++) {
-        // Save the compilation to cache.
-        {
-            bool supported;
-            hidl_vec<hidl_handle> modelCache, dataCache;
-            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
-            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-            saveModelToCache(testModel, modelCache, dataCache, &supported);
-            if (checkEarlyTermination(supported)) return;
-        }
-
-        // Randomly append bytes to the cache entry.
-        FILE* pFile = fopen(mModelCache[i][0].c_str(), "a");
-        uint32_t appendLength = getRandomInt(1, 256);
-        for (uint32_t i = 0; i < appendLength; i++) {
-            ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
-        }
-        fclose(pFile);
-
-        // Retrieve preparedModel from cache, expect failure.
-        {
-            sp<IPreparedModel> preparedModel = nullptr;
-            ErrorStatus status;
-            hidl_vec<hidl_handle> modelCache, dataCache;
-            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
-            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-            prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
-            ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
-            ASSERT_EQ(preparedModel, nullptr);
-        }
+    // Save the testModelMul compilation to cache.
+    auto modelCacheMul = mModelCache;
+    for (auto& cache : modelCacheMul) {
+        cache[0].append("_mul");
     }
-}
-
-TEST_P(CompilationCachingSecurityTest, WrongToken) {
-    if (!mIsCachingSupported) return;
-
-    // Create test HIDL model and compile.
-    Model testModel = createTestModel();
-
-    // Save the compilation to cache.
     {
-        bool supported;
+        hidl_vec<hidl_handle> modelCache, dataCache;
+        createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+        createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+        saveModelToCache(testModelMul, modelCache, dataCache);
+    }
+
+    // Use a different token for testModelAdd.
+    mToken[0]++;
+
+    // This test is probabilistic, so we run it multiple times.
+    for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
+        // Save the testModelAdd compilation to cache.
+        {
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+
+            // Spawn a thread to copy the cache content concurrently while saving to cache.
+            std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
+            saveModelToCache(testModelAdd, modelCache, dataCache);
+            thread.join();
+        }
+
+        // Retrieve preparedModel from cache.
+        {
+            sp<IPreparedModel> preparedModel = nullptr;
+            ErrorStatus status;
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+            prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+
+            // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
+            // the prepared model must be executed with the correct result and not crash.
+            if (status != ErrorStatus::NONE) {
+                ASSERT_EQ(preparedModel, nullptr);
+            } else {
+                ASSERT_NE(preparedModel, nullptr);
+                generated_tests::EvaluatePreparedModel(
+                        preparedModel, [](int) { return false; },
+                        getLargeModelExamples(kLargeModelSize),
+                        testModelAdd.relaxComputationFloat32toFloat16,
+                        /*testDynamicOutputShape=*/false);
+            }
+        }
+    }
+}
+
+TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
+    if (!mIsCachingSupported) return;
+
+    // Create test models and check if fully supported by the service.
+    const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+    if (checkEarlyTermination(testModelMul)) return;
+    const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+    if (checkEarlyTermination(testModelAdd)) return;
+
+    // Save the testModelMul compilation to cache.
+    auto modelCacheMul = mModelCache;
+    for (auto& cache : modelCacheMul) {
+        cache[0].append("_mul");
+    }
+    {
+        hidl_vec<hidl_handle> modelCache, dataCache;
+        createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+        createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+        saveModelToCache(testModelMul, modelCache, dataCache);
+    }
+
+    // Use a different token for testModelAdd.
+    mToken[0]++;
+
+    // This test is probabilistic, so we run it multiple times.
+    for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
+        // Save the testModelAdd compilation to cache.
+        {
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+            saveModelToCache(testModelAdd, modelCache, dataCache);
+        }
+
+        // Retrieve preparedModel from cache.
+        {
+            sp<IPreparedModel> preparedModel = nullptr;
+            ErrorStatus status;
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+
+            // Spawn a thread to copy the cache content concurrently while preparing from cache.
+            std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
+            prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+            thread.join();
+
+            // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
+            // the prepared model must be executed with the correct result and not crash.
+            if (status != ErrorStatus::NONE) {
+                ASSERT_EQ(preparedModel, nullptr);
+            } else {
+                ASSERT_NE(preparedModel, nullptr);
+                generated_tests::EvaluatePreparedModel(
+                        preparedModel, [](int) { return false; },
+                        getLargeModelExamples(kLargeModelSize),
+                        testModelAdd.relaxComputationFloat32toFloat16,
+                        /*testDynamicOutputShape=*/false);
+            }
+        }
+    }
+}
+
+TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
+    if (!mIsCachingSupported) return;
+
+    // Create test models and check if fully supported by the service.
+    const Model testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+    if (checkEarlyTermination(testModelMul)) return;
+    const Model testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+    if (checkEarlyTermination(testModelAdd)) return;
+
+    // Save the testModelMul compilation to cache.
+    auto modelCacheMul = mModelCache;
+    for (auto& cache : modelCacheMul) {
+        cache[0].append("_mul");
+    }
+    {
+        hidl_vec<hidl_handle> modelCache, dataCache;
+        createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+        createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+        saveModelToCache(testModelMul, modelCache, dataCache);
+    }
+
+    // Use a different token for testModelAdd.
+    mToken[0]++;
+
+    // Save the testModelAdd compilation to cache.
+    {
         hidl_vec<hidl_handle> modelCache, dataCache;
         createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
         createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
-        saveModelToCache(testModel, modelCache, dataCache, &supported);
-        if (checkEarlyTermination(supported)) return;
+        saveModelToCache(testModelAdd, modelCache, dataCache);
     }
 
-    // Randomly flip one single bit in mToken.
-    uint32_t ind = getRandomInt(0u, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
-    mToken[ind] ^= (1U << getRandomInt(0, 7));
+    // Replace the model cache of testModelAdd with testModelMul.
+    copyCacheFiles(modelCacheMul, mModelCache);
 
     // Retrieve the preparedModel from cache, expect failure.
     {
@@ -1001,8 +1261,153 @@
     }
 }
 
+static const auto kOperandTypeChoices =
+        ::testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
+
+class CompilationCachingSecurityTest
+    : public CompilationCachingTestBase,
+      public ::testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
+  protected:
+    CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
+
+    void SetUp() {
+        CompilationCachingTestBase::SetUp();
+        generator.seed(kSeed);
+    }
+
+    // Get a random integer within a closed range [lower, upper].
+    template <typename T>
+    T getRandomInt(T lower, T upper) {
+        std::uniform_int_distribution<T> dis(lower, upper);
+        return dis(generator);
+    }
+
+    // Randomly flip one single bit of the cache entry.
+    void flipOneBitOfCache(const std::string& filename, bool* skip) {
+        FILE* pFile = fopen(filename.c_str(), "r+");
+        ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
+        long int fileSize = ftell(pFile);
+        if (fileSize == 0) {
+            fclose(pFile);
+            *skip = true;
+            return;
+        }
+        ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
+        int readByte = fgetc(pFile);
+        ASSERT_NE(readByte, EOF);
+        ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
+        ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
+        fclose(pFile);
+        *skip = false;
+    }
+
+    // Randomly append bytes to the cache entry.
+    void appendBytesToCache(const std::string& filename, bool* skip) {
+        FILE* pFile = fopen(filename.c_str(), "a");
+        uint32_t appendLength = getRandomInt(1, 256);
+        for (uint32_t i = 0; i < appendLength; i++) {
+            ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
+        }
+        fclose(pFile);
+        *skip = false;
+    }
+
+    enum class ExpectedResult { GENERAL_FAILURE, NOT_CRASH };
+
+    // Test if the driver behaves as expected when given corrupted cache or token.
+    // The modifier will be invoked after save to cache but before prepare from cache.
+    // The modifier accepts one pointer argument "skip" as the returning value, indicating
+    // whether the test should be skipped or not.
+    void testCorruptedCache(ExpectedResult expected, std::function<void(bool*)> modifier) {
+        const Model testModel = createTestModel();
+        if (checkEarlyTermination(testModel)) return;
+
+        // Save the compilation to cache.
+        {
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+            saveModelToCache(testModel, modelCache, dataCache);
+        }
+
+        bool skip = false;
+        modifier(&skip);
+        if (skip) return;
+
+        // Retrieve preparedModel from cache.
+        {
+            sp<IPreparedModel> preparedModel = nullptr;
+            ErrorStatus status;
+            hidl_vec<hidl_handle> modelCache, dataCache;
+            createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+            createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+            prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+
+            switch (expected) {
+                case ExpectedResult::GENERAL_FAILURE:
+                    ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+                    ASSERT_EQ(preparedModel, nullptr);
+                    break;
+                case ExpectedResult::NOT_CRASH:
+                    ASSERT_EQ(preparedModel == nullptr, status != ErrorStatus::NONE);
+                    break;
+                default:
+                    FAIL();
+            }
+        }
+    }
+
+    const uint32_t kSeed = std::get<1>(GetParam());
+    std::mt19937 generator;
+};
+
+TEST_P(CompilationCachingSecurityTest, CorruptedModelCache) {
+    if (!mIsCachingSupported) return;
+    for (uint32_t i = 0; i < mNumModelCache; i++) {
+        testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
+                           [this, i](bool* skip) { flipOneBitOfCache(mModelCache[i][0], skip); });
+    }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongLengthModelCache) {
+    if (!mIsCachingSupported) return;
+    for (uint32_t i = 0; i < mNumModelCache; i++) {
+        testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
+                           [this, i](bool* skip) { appendBytesToCache(mModelCache[i][0], skip); });
+    }
+}
+
+TEST_P(CompilationCachingSecurityTest, CorruptedDataCache) {
+    if (!mIsCachingSupported) return;
+    for (uint32_t i = 0; i < mNumDataCache; i++) {
+        testCorruptedCache(ExpectedResult::NOT_CRASH,
+                           [this, i](bool* skip) { flipOneBitOfCache(mDataCache[i][0], skip); });
+    }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongLengthDataCache) {
+    if (!mIsCachingSupported) return;
+    for (uint32_t i = 0; i < mNumDataCache; i++) {
+        testCorruptedCache(ExpectedResult::NOT_CRASH,
+                           [this, i](bool* skip) { appendBytesToCache(mDataCache[i][0], skip); });
+    }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongToken) {
+    if (!mIsCachingSupported) return;
+    testCorruptedCache(ExpectedResult::GENERAL_FAILURE, [this](bool* skip) {
+        // Randomly flip one single bit in mToken.
+        uint32_t ind =
+                getRandomInt(0u, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
+        mToken[ind] ^= (1U << getRandomInt(0, 7));
+        *skip = false;
+    });
+}
+
 INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
-                        ::testing::Range(0U, 10U));
+                        ::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
 
 }  // namespace functional
 }  // namespace vts
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
new file mode 100644
index 0000000..8c6391e
--- /dev/null
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "ExecutionBurstServer.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <cstring>
+
+namespace android {
+namespace hardware {
+namespace neuralnetworks {
+namespace V1_2 {
+namespace vts {
+namespace functional {
+
+using ::android::nn::ExecutionBurstController;
+using ::android::nn::RequestChannelSender;
+using ::android::nn::ResultChannelReceiver;
+using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
+
+// This constant value represents the length of an FMQ that is large enough to
+// return a result from a burst execution for all of the generated test cases.
+constexpr size_t kExecutionBurstChannelLength = 1024;
+
+// This constant value represents a length of an FMQ that is not large enough
+// to return a result from a burst execution for some of the generated test
+// cases.
+constexpr size_t kExecutionBurstChannelSmallLength = 8;
+
+///////////////////////// UTILITY FUNCTIONS /////////////////////////
+
+static bool badTiming(Timing timing) {
+    return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
+}
+
+static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurstCallback>& callback,
+                        std::unique_ptr<RequestChannelSender>* sender,
+                        std::unique_ptr<ResultChannelReceiver>* receiver,
+                        sp<IBurstContext>* context,
+                        size_t resultChannelLength = kExecutionBurstChannelLength) {
+    ASSERT_NE(nullptr, preparedModel.get());
+    ASSERT_NE(nullptr, sender);
+    ASSERT_NE(nullptr, receiver);
+    ASSERT_NE(nullptr, context);
+
+    // create FMQ objects
+    auto [fmqRequestChannel, fmqRequestDescriptor] =
+            RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
+    auto [fmqResultChannel, fmqResultDescriptor] =
+            ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
+    ASSERT_NE(nullptr, fmqRequestChannel.get());
+    ASSERT_NE(nullptr, fmqResultChannel.get());
+    ASSERT_NE(nullptr, fmqRequestDescriptor);
+    ASSERT_NE(nullptr, fmqResultDescriptor);
+
+    // configure burst
+    ErrorStatus errorStatus;
+    sp<IBurstContext> burstContext;
+    const Return<void> ret = preparedModel->configureExecutionBurst(
+            callback, *fmqRequestDescriptor, *fmqResultDescriptor,
+            [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
+                errorStatus = status;
+                burstContext = context;
+            });
+    ASSERT_TRUE(ret.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, errorStatus);
+    ASSERT_NE(nullptr, burstContext.get());
+
+    // return values
+    *sender = std::move(fmqRequestChannel);
+    *receiver = std::move(fmqResultChannel);
+    *context = burstContext;
+}
+
+static void createBurstWithResultChannelLength(
+        const sp<IPreparedModel>& preparedModel, size_t resultChannelLength,
+        std::shared_ptr<ExecutionBurstController>* controller) {
+    ASSERT_NE(nullptr, preparedModel.get());
+    ASSERT_NE(nullptr, controller);
+
+    // create FMQ objects
+    std::unique_ptr<RequestChannelSender> sender;
+    std::unique_ptr<ResultChannelReceiver> receiver;
+    sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+    sp<IBurstContext> context;
+    ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context,
+                                        resultChannelLength));
+    ASSERT_NE(nullptr, sender.get());
+    ASSERT_NE(nullptr, receiver.get());
+    ASSERT_NE(nullptr, context.get());
+
+    // return values
+    *controller = std::make_shared<ExecutionBurstController>(std::move(sender), std::move(receiver),
+                                                             context, callback);
+}
+
+// Primary validation function. This function will take a valid serialized
+// request, apply a mutation to it to invalidate the serialized request, then
+// pass it to interface calls that use the serialized request. Note that the
+// serialized request here is passed by value, and any mutation to the
+// serialized request does not leave this function.
+static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+                     const std::string& message, std::vector<FmqRequestDatum> serialized,
+                     const std::function<void(std::vector<FmqRequestDatum>*)>& mutation) {
+    mutation(&serialized);
+
+    // skip if packet is too large to send
+    if (serialized.size() > kExecutionBurstChannelLength) {
+        return;
+    }
+
+    SCOPED_TRACE(message);
+
+    // send invalid packet
+    ASSERT_TRUE(sender->sendPacket(serialized));
+
+    // receive error
+    auto results = receiver->getBlocking();
+    ASSERT_TRUE(results.has_value());
+    const auto [status, outputShapes, timing] = std::move(*results);
+    EXPECT_NE(ErrorStatus::NONE, status);
+    EXPECT_EQ(0u, outputShapes.size());
+    EXPECT_TRUE(badTiming(timing));
+}
+
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// creates pre-set invalid packet entries for convenience.
+static std::vector<FmqRequestDatum> createBadRequestPacketEntries() {
+    const FmqRequestDatum::PacketInformation packetInformation = {
+            /*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10,
+            /*.numberOfPools=*/10};
+    const FmqRequestDatum::OperandInformation operandInformation = {
+            /*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10};
+    const int32_t invalidPoolIdentifier = std::numeric_limits<int32_t>::max();
+    std::vector<FmqRequestDatum> bad(7);
+    bad[0].packetInformation(packetInformation);
+    bad[1].inputOperandInformation(operandInformation);
+    bad[2].inputOperandDimensionValue(0);
+    bad[3].outputOperandInformation(operandInformation);
+    bad[4].outputOperandDimensionValue(0);
+    bad[5].poolIdentifier(invalidPoolIdentifier);
+    bad[6].measureTiming(MeasureTiming::YES);
+    return bad;
+}
+
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// retrieves pre-set invalid packet entries for convenience. This function
+// caches these data so they can be reused on subsequent validation checks.
+static const std::vector<FmqRequestDatum>& getBadRequestPacketEntries() {
+    static const std::vector<FmqRequestDatum> bad = createBadRequestPacketEntries();
+    return bad;
+}
+
+///////////////////////// REMOVE DATUM ////////////////////////////////////
+
+static void removeDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+                            const std::vector<FmqRequestDatum>& serialized) {
+    for (size_t index = 0; index < serialized.size(); ++index) {
+        const std::string message = "removeDatum: removed datum at index " + std::to_string(index);
+        validate(sender, receiver, message, serialized,
+                 [index](std::vector<FmqRequestDatum>* serialized) {
+                     serialized->erase(serialized->begin() + index);
+                 });
+    }
+}
+
+///////////////////////// ADD DATUM ////////////////////////////////////
+
+static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+                         const std::vector<FmqRequestDatum>& serialized) {
+    const std::vector<FmqRequestDatum>& extra = getBadRequestPacketEntries();
+    for (size_t index = 0; index <= serialized.size(); ++index) {
+        for (size_t type = 0; type < extra.size(); ++type) {
+            const std::string message = "addDatum: added datum type " + std::to_string(type) +
+                                        " at index " + std::to_string(index);
+            validate(sender, receiver, message, serialized,
+                     [index, type, &extra](std::vector<FmqRequestDatum>* serialized) {
+                         serialized->insert(serialized->begin() + index, extra[type]);
+                     });
+        }
+    }
+}
+
+///////////////////////// MUTATE DATUM ////////////////////////////////////
+
+static bool interestingCase(const FmqRequestDatum& lhs, const FmqRequestDatum& rhs) {
+    using Discriminator = FmqRequestDatum::hidl_discriminator;
+
+    const bool differentValues = (lhs != rhs);
+    const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator());
+    const auto discriminator = rhs.getDiscriminator();
+    const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue ||
+                                   discriminator == Discriminator::outputOperandDimensionValue);
+
+    return differentValues && !(sameDiscriminator && isDimensionValue);
+}
+
+static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+                            const std::vector<FmqRequestDatum>& serialized) {
+    const std::vector<FmqRequestDatum>& change = getBadRequestPacketEntries();
+    for (size_t index = 0; index < serialized.size(); ++index) {
+        for (size_t type = 0; type < change.size(); ++type) {
+            if (interestingCase(serialized[index], change[type])) {
+                const std::string message = "mutateDatum: changed datum at index " +
+                                            std::to_string(index) + " to datum type " +
+                                            std::to_string(type);
+                validate(sender, receiver, message, serialized,
+                         [index, type, &change](std::vector<FmqRequestDatum>* serialized) {
+                             (*serialized)[index] = change[type];
+                         });
+            }
+        }
+    }
+}
+
+///////////////////////// BURST VALIATION TESTS ////////////////////////////////////
+
+static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
+                                       const std::vector<Request>& requests) {
+    // create burst
+    std::unique_ptr<RequestChannelSender> sender;
+    std::unique_ptr<ResultChannelReceiver> receiver;
+    sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+    sp<IBurstContext> context;
+    ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context));
+    ASSERT_NE(nullptr, sender.get());
+    ASSERT_NE(nullptr, receiver.get());
+    ASSERT_NE(nullptr, context.get());
+
+    // validate each request
+    for (const Request& request : requests) {
+        // load memory into callback slots
+        std::vector<intptr_t> keys;
+        keys.reserve(request.pools.size());
+        std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+                       [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+        const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+
+        // ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
+        // subsequent slot validation testing)
+        ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
+            return slot != std::numeric_limits<int32_t>::max();
+        }));
+
+        // serialize the request
+        const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
+
+        // validations
+        removeDatumTest(sender.get(), receiver.get(), serialized);
+        addDatumTest(sender.get(), receiver.get(), serialized);
+        mutateDatumTest(sender.get(), receiver.get(), serialized);
+    }
+}
+
+// This test validates that when the Result message size exceeds length of the
+// result FMQ, the service instance gracefully fails and returns an error.
+static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
+                                   const std::vector<Request>& requests) {
+    // create regular burst
+    std::shared_ptr<ExecutionBurstController> controllerRegular;
+    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+            preparedModel, kExecutionBurstChannelLength, &controllerRegular));
+    ASSERT_NE(nullptr, controllerRegular.get());
+
+    // create burst with small output channel
+    std::shared_ptr<ExecutionBurstController> controllerSmall;
+    ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+            preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
+    ASSERT_NE(nullptr, controllerSmall.get());
+
+    // validate each request
+    for (const Request& request : requests) {
+        // load memory into callback slots
+        std::vector<intptr_t> keys(request.pools.size());
+        for (size_t i = 0; i < keys.size(); ++i) {
+            keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+        }
+
+        // collect serialized result by running regular burst
+        const auto [statusRegular, outputShapesRegular, timingRegular] =
+                controllerRegular->compute(request, MeasureTiming::NO, keys);
+
+        // skip test if regular burst output isn't useful for testing a failure
+        // caused by having too small of a length for the result FMQ
+        const std::vector<FmqResultDatum> serialized =
+                ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+        if (statusRegular != ErrorStatus::NONE ||
+            serialized.size() <= kExecutionBurstChannelSmallLength) {
+            continue;
+        }
+
+        // by this point, execution should fail because the result channel isn't
+        // large enough to return the serialized result
+        const auto [statusSmall, outputShapesSmall, timingSmall] =
+                controllerSmall->compute(request, MeasureTiming::NO, keys);
+        EXPECT_NE(ErrorStatus::NONE, statusSmall);
+        EXPECT_EQ(0u, outputShapesSmall.size());
+        EXPECT_TRUE(badTiming(timingSmall));
+    }
+}
+
+static bool isSanitized(const FmqResultDatum& datum) {
+    using Discriminator = FmqResultDatum::hidl_discriminator;
+
+    // check to ensure the padding values in the returned
+    // FmqResultDatum::OperandInformation are initialized to 0
+    if (datum.getDiscriminator() == Discriminator::operandInformation) {
+        static_assert(
+                offsetof(FmqResultDatum::OperandInformation, isSufficient) == 0,
+                "unexpected value for offset of FmqResultDatum::OperandInformation::isSufficient");
+        static_assert(
+                sizeof(FmqResultDatum::OperandInformation::isSufficient) == 1,
+                "unexpected value for size of FmqResultDatum::OperandInformation::isSufficient");
+        static_assert(offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) == 4,
+                      "unexpected value for offset of "
+                      "FmqResultDatum::OperandInformation::numberOfDimensions");
+        static_assert(sizeof(FmqResultDatum::OperandInformation::numberOfDimensions) == 4,
+                      "unexpected value for size of "
+                      "FmqResultDatum::OperandInformation::numberOfDimensions");
+        static_assert(sizeof(FmqResultDatum::OperandInformation) == 8,
+                      "unexpected value for size of "
+                      "FmqResultDatum::OperandInformation");
+
+        constexpr size_t paddingOffset =
+                offsetof(FmqResultDatum::OperandInformation, isSufficient) +
+                sizeof(FmqResultDatum::OperandInformation::isSufficient);
+        constexpr size_t paddingSize =
+                offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) - paddingOffset;
+
+        FmqResultDatum::OperandInformation initialized{};
+        std::memset(&initialized, 0, sizeof(initialized));
+
+        const char* initializedPaddingStart =
+                reinterpret_cast<const char*>(&initialized) + paddingOffset;
+        const char* datumPaddingStart =
+                reinterpret_cast<const char*>(&datum.operandInformation()) + paddingOffset;
+
+        return std::memcmp(datumPaddingStart, initializedPaddingStart, paddingSize) == 0;
+    }
+
+    // there are no other padding initialization checks required, so return true
+    // for any sum-type that isn't FmqResultDatum::OperandInformation
+    return true;
+}
+
+static void validateBurstSanitized(const sp<IPreparedModel>& preparedModel,
+                                   const std::vector<Request>& requests) {
+    // create burst
+    std::unique_ptr<RequestChannelSender> sender;
+    std::unique_ptr<ResultChannelReceiver> receiver;
+    sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+    sp<IBurstContext> context;
+    ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context));
+    ASSERT_NE(nullptr, sender.get());
+    ASSERT_NE(nullptr, receiver.get());
+    ASSERT_NE(nullptr, context.get());
+
+    // validate each request
+    for (const Request& request : requests) {
+        // load memory into callback slots
+        std::vector<intptr_t> keys;
+        keys.reserve(request.pools.size());
+        std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+                       [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+        const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+
+        // send valid request
+        ASSERT_TRUE(sender->send(request, MeasureTiming::YES, slots));
+
+        // receive valid result
+        auto serialized = receiver->getPacketBlocking();
+        ASSERT_TRUE(serialized.has_value());
+
+        // sanitize result
+        ASSERT_TRUE(std::all_of(serialized->begin(), serialized->end(), isSanitized))
+                << "The result serialized data is not properly sanitized";
+    }
+}
+
+///////////////////////////// ENTRY POINT //////////////////////////////////
+
+void ValidationTest::validateBurst(const sp<IPreparedModel>& preparedModel,
+                                   const std::vector<Request>& requests) {
+    ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, requests));
+    ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, requests));
+    ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, requests));
+}
+
+}  // namespace functional
+}  // namespace vts
+}  // namespace V1_2
+}  // namespace neuralnetworks
+}  // namespace hardware
+}  // namespace android
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 870d017..9703c2d 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -35,9 +35,7 @@
 namespace functional {
 
 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
 using ::android::hidl::memory::V1_0::IMemory;
-using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
 using test_helper::for_all;
 using test_helper::MixedTyped;
 using test_helper::MixedTypedExample;
@@ -48,55 +46,6 @@
     return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
 }
 
-static void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                                sp<IPreparedModel>* preparedModel) {
-    ASSERT_NE(nullptr, preparedModel);
-
-    // see if service can handle model
-    bool fullySupportsModel = false;
-    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_2(
-        model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
-            ASSERT_EQ(ErrorStatus::NONE, status);
-            ASSERT_NE(0ul, supported.size());
-            fullySupportsModel =
-                std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
-        });
-    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
-    // launch prepare model
-    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
-    ASSERT_NE(nullptr, preparedModelCallback.get());
-    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
-            model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
-            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
-    ASSERT_TRUE(prepareLaunchStatus.isOk());
-    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
-    // retrieve prepared model
-    preparedModelCallback->wait();
-    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
-    *preparedModel = getPreparedModel_1_2(preparedModelCallback);
-
-    // The getSupportedOperations_1_2 call returns a list of operations that are
-    // guaranteed not to fail if prepareModel_1_2 is called, and
-    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
-    // If a driver has any doubt that it can prepare an operation, it must
-    // return false. So here, if a driver isn't sure if it can support an
-    // operation, but reports that it successfully prepared the model, the test
-    // can continue.
-    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
-        ASSERT_EQ(nullptr, preparedModel->get());
-        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
-                     "prepare model that it does not support.";
-        std::cout << "[          ]   Unable to test Request validation because vendor service "
-                     "cannot prepare model that it does not support."
-                  << std::endl;
-        return;
-    }
-    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
-    ASSERT_NE(nullptr, preparedModel->get());
-}
-
 // Primary validation function. This function will take a valid request, apply a
 // mutation to it to invalidate the request, then pass it to interface calls
 // that use the request. Note that the request here is passed by value, and any
@@ -316,14 +265,8 @@
     return requests;
 }
 
-void ValidationTest::validateRequests(const Model& model, const std::vector<Request>& requests) {
-    // create IPreparedModel
-    sp<IPreparedModel> preparedModel;
-    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
-    if (preparedModel == nullptr) {
-        return;
-    }
-
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
+                                      const std::vector<Request>& requests) {
     // validate each request
     for (const Request& request : requests) {
         removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 4728c28..4ddefe8 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
 
 #include "VtsHalNeuralnetworks.h"
 
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
 namespace android {
 namespace hardware {
 namespace neuralnetworks {
@@ -25,6 +29,60 @@
 namespace vts {
 namespace functional {
 
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using V1_1::ExecutionPreference;
+
+// internal helper function
+static void createPreparedModel(const sp<IDevice>& device, const Model& model,
+                                sp<IPreparedModel>* preparedModel) {
+    ASSERT_NE(nullptr, preparedModel);
+
+    // see if service can handle model
+    bool fullySupportsModel = false;
+    Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_2(
+            model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+                ASSERT_EQ(ErrorStatus::NONE, status);
+                ASSERT_NE(0ul, supported.size());
+                fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+                                                 [](bool valid) { return valid; });
+            });
+    ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+    // launch prepare model
+    sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+    ASSERT_NE(nullptr, preparedModelCallback.get());
+    Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
+            model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+            hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+    ASSERT_TRUE(prepareLaunchStatus.isOk());
+    ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+    // retrieve prepared model
+    preparedModelCallback->wait();
+    ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+    *preparedModel = getPreparedModel_1_2(preparedModelCallback);
+
+    // The getSupportedOperations_1_2 call returns a list of operations that are
+    // guaranteed not to fail if prepareModel_1_2 is called, and
+    // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+    // If a driver has any doubt that it can prepare an operation, it must
+    // return false. So here, if a driver isn't sure if it can support an
+    // operation, but reports that it successfully prepared the model, the test
+    // can continue.
+    if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+        ASSERT_EQ(nullptr, preparedModel->get());
+        LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+                     "prepare model that it does not support.";
+        std::cout << "[          ]   Unable to test Request validation because vendor service "
+                     "cannot prepare model that it does not support."
+                  << std::endl;
+        return;
+    }
+    ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+    ASSERT_NE(nullptr, preparedModel->get());
+}
+
 // A class for test environment setup
 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
 
@@ -68,6 +126,20 @@
     ::testing::VtsHalHidlTargetTestBase::TearDown();
 }
 
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
+    validateModel(model);
+
+    // create IPreparedModel
+    sp<IPreparedModel> preparedModel;
+    ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+    if (preparedModel == nullptr) {
+        return;
+    }
+
+    validateRequests(preparedModel, requests);
+    validateBurst(preparedModel, requests);
+}
+
 sp<IPreparedModel> getPreparedModel_1_2(
     const sp<V1_2::implementation::PreparedModelCallback>& callback) {
     sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 404eec0..8d1acbe 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -72,8 +72,14 @@
 // Tag for the validation tests
 class ValidationTest : public NeuralnetworksHidlTest {
    protected:
-    void validateModel(const Model& model);
-    void validateRequests(const Model& model, const std::vector<Request>& request);
+     void validateEverything(const Model& model, const std::vector<Request>& requests);
+
+   private:
+     void validateModel(const Model& model);
+     void validateRequests(const sp<IPreparedModel>& preparedModel,
+                           const std::vector<Request>& requests);
+     void validateBurst(const sp<IPreparedModel>& preparedModel,
+                        const std::vector<Request>& requests);
 };
 
 // Tag for the generated tests
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index 730d969..a3073ac 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -19,19 +19,25 @@
 
 #define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
 
+namespace {
+const RadioAccessSpecifier GERAN_SPECIFIER_P900 = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
+                                                   .geranBands = {GeranBands::BAND_P900},
+                                                   .channels = {1, 2}};
+const RadioAccessSpecifier GERAN_SPECIFIER_850 = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
+                                                  .geranBands = {GeranBands::BAND_850},
+                                                  .channels = {128, 129}};
+}  // namespace
+
 /*
  * Test IRadio.startNetworkScan() for the response returned.
  */
 TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT, .interval = 60, .specifiers = {specifier}};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850}};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -89,18 +95,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 4,
-        .specifiers = {specifier},
-        .maxSearchTime = 60,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 1};
+            .type = ScanType::ONE_SHOT,
+            .interval = 4,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 60,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 1};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -126,18 +127,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 301,
-        .specifiers = {specifier},
-        .maxSearchTime = 60,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 1};
+            .type = ScanType::ONE_SHOT,
+            .interval = 301,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 60,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 1};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -163,18 +159,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        .maxSearchTime = 59,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 1};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 59,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 1};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -200,18 +191,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        .maxSearchTime = 3601,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 1};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 3601,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 1};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -237,18 +223,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        .maxSearchTime = 600,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 0};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 600,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 0};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -274,18 +255,13 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        .maxSearchTime = 600,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 11};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            .maxSearchTime = 600,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 11};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -311,20 +287,15 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        // Some vendor may not support max search time of 360s.
-        // This issue is tracked in b/112205669.
-        .maxSearchTime = 300,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 10};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            // Some vendor may not support max search time of 360s.
+            // This issue is tracked in b/112205669.
+            .maxSearchTime = 300,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 10};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -350,21 +321,16 @@
 TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
     serial = GetRandomSerialNumber();
 
-    RadioAccessSpecifier specifier = {
-        .radioAccessNetwork = RadioAccessNetworks::GERAN,
-        .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
-        .channels = {1,2}};
-
     ::android::hardware::radio::V1_2::NetworkScanRequest request = {
-        .type = ScanType::ONE_SHOT,
-        .interval = 60,
-        .specifiers = {specifier},
-        // Some vendor may not support max search time of 360s.
-        // This issue is tracked in b/112205669.
-        .maxSearchTime = 300,
-        .incrementalResults = false,
-        .incrementalResultsPeriodicity = 10,
-        .mccMncs = {"310410"}};
+            .type = ScanType::ONE_SHOT,
+            .interval = 60,
+            .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+            // Some vendor may not support max search time of 360s.
+            // This issue is tracked in b/112205669.
+            .maxSearchTime = 300,
+            .incrementalResults = false,
+            .incrementalResultsPeriodicity = 10,
+            .mccMncs = {"310410"}};
 
     Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
     ASSERT_OK(res);
@@ -757,6 +723,7 @@
     // Check the mcc [0, 999] and mnc [0, 999].
     string hidl_mcc;
     string hidl_mnc;
+    bool checkMccMnc = true;
     int totalIdentitySizeExpected = 1;
     ::android::hardware::radio::V1_2::CellIdentity cellIdentities =
         radioRsp_v1_2->dataRegResp.cellIdentity;
@@ -765,6 +732,7 @@
     if (cellInfoType == CellInfoType::NONE) {
         // All the fields are 0
         totalIdentitySizeExpected = 0;
+        checkMccMnc = false;
     } else if (cellInfoType == CellInfoType::GSM) {
         EXPECT_EQ(1, cellIdentities.cellIdentityGsm.size());
         ::android::hardware::radio::V1_2::CellIdentityGsm cig = cellIdentities.cellIdentityGsm[0];
@@ -791,6 +759,7 @@
         // CellIndentityCdma has no mcc and mnc.
         EXPECT_EQ(CellInfoType::CDMA, cellInfoType);
         EXPECT_EQ(1, cellIdentities.cellIdentityCdma.size());
+        checkMccMnc = false;
     }
 
     // Check only one CellIdentity is size 1, and others must be 0.
@@ -799,10 +768,13 @@
                   cellIdentities.cellIdentityLte.size() + cellIdentities.cellIdentityWcdma.size() +
                   cellIdentities.cellIdentityTdscdma.size());
 
-    int mcc = stoi(hidl_mcc);
-    int mnc = stoi(hidl_mnc);
-    EXPECT_TRUE(mcc >= 0 && mcc <= 999);
-    EXPECT_TRUE(mnc >= 0 && mnc <= 999);
+    // 32 bit system might return invalid mcc and mnc hidl string "\xff\xff..."
+    if (checkMccMnc && hidl_mcc.size() < 4 && hidl_mnc.size() < 4) {
+        int mcc = stoi(hidl_mcc);
+        int mnc = stoi(hidl_mnc);
+        EXPECT_TRUE(mcc >= 0 && mcc <= 999);
+        EXPECT_TRUE(mnc >= 0 && mnc <= 999);
+    }
 }
 
 /*
diff --git a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
index 030f489..5b7a06d 100644
--- a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
@@ -33,9 +33,9 @@
     EXPECT_EQ(serial, radioRsp_v1_3->rspInfo.serial);
     ALOGI("getModemStackStatus, rspInfo.error = %s\n",
           toString(radioRsp_v1_3->rspInfo.error).c_str());
-    ASSERT_TRUE(CheckAnyOfErrors(
-            radioRsp_v1_3->rspInfo.error,
-            {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::MODEM_ERR}));
+    ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_3->rspInfo.error,
+                                 {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE,
+                                  RadioError::MODEM_ERR, RadioError::REQUEST_NOT_SUPPORTED}));
 
     // checking if getModemStackStatus returns true, as modem was enabled above
     if (RadioError::NONE == radioRsp_v1_3->rspInfo.error) {
@@ -50,9 +50,9 @@
         EXPECT_EQ(serial, radioRsp_v1_3->rspInfo.serial);
         ALOGI("getModemStackStatus, rspInfo.error = %s\n",
               toString(radioRsp_v1_3->rspInfo.error).c_str());
-        ASSERT_TRUE(CheckAnyOfErrors(
-                radioRsp_v1_3->rspInfo.error,
-                {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::MODEM_ERR}));
+        ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_3->rspInfo.error,
+                                     {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE,
+                                      RadioError::MODEM_ERR, RadioError::REQUEST_NOT_SUPPORTED}));
         // verify that enableModem did set isEnabled correctly
         EXPECT_EQ(true, radioRsp_v1_3->isModemEnabled);
     }
diff --git a/soundtrigger/2.2/vts/functional/Android.bp b/soundtrigger/2.2/vts/functional/Android.bp
new file mode 100644
index 0000000..08ccd7b
--- /dev/null
+++ b/soundtrigger/2.2/vts/functional/Android.bp
@@ -0,0 +1,27 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "VtsHalSoundtriggerV2_2TargetTest",
+    defaults: ["VtsHalTargetTestDefaults"],
+    srcs: ["VtsHalSoundtriggerV2_2TargetTest.cpp"],
+    static_libs: [
+        "android.hardware.soundtrigger@2.0",
+        "android.hardware.soundtrigger@2.1",
+        "android.hardware.soundtrigger@2.2",
+    ],
+    test_suites: ["general-tests"],
+}