Merge changes from topic "quant_coupling"

* changes:
  Add QUANT8_ASYMM_SIGNED support to SELECT op
  Add quantization coupling test
diff --git a/current.txt b/current.txt
index 8c7b4da..f176ab2 100644
--- a/current.txt
+++ b/current.txt
@@ -579,7 +579,7 @@
 9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
 fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
 40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
+72de91c3feba4b19c159cd1c413cbea596b78240caa43e31194e20e6f5b05c49 android.hardware.neuralnetworks@1.2::types
 a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
 1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
 fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -602,7 +602,7 @@
 9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
 4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
 94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-c511b1427b1c3f76af90967bbddaaf250db983a8d3abb9ff189fb5a807cf3d4d android.hardware.neuralnetworks@1.3::types
+554baa3b317e077b850afcbaac99daeef56861b1786540e56275a4fcad1f43e3 android.hardware.neuralnetworks@1.3::types
 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
 a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
 44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 837ced5..ef71ea8 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -2448,15 +2448,17 @@
      *       then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 50: The clipping threshold for the output from the
      *       projection layer, such that values are bound within
      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 51: merge_outputs
      *       An {@link OperandType::BOOL} scalar specifying if the outputs
      *       from forward and backward cells should be merged.
@@ -4124,7 +4126,6 @@
      * * 0: A tensor of the same type and shape as input1 and input2.
      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
-     *
      */
     SELECT = 84,
 
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 7df14b1..f959e45 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -2375,15 +2375,17 @@
      *       then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 50: The clipping threshold for the output from the
      *       projection layer, such that values are bound within
      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
      *       If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
      *       this scalar must be of the type {@link OperandType::FLOAT32},
-     *       otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
-     *       this scalar must be of type {@link OperandType::FLOAT16}.
+     *       otherwise if all the input tensors have the type
+     *       {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+     *       of type {@link OperandType::FLOAT16}.
      * * 51: merge_outputs
      *       An {@link OperandType::BOOL} scalar specifying if the outputs
      *       from forward and backward cells should be merged.
@@ -4034,6 +4036,7 @@
      * * {@link OperandType::TENSOR_FLOAT32}
      * * {@link OperandType::TENSOR_INT32}
      * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+     * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
      *
      * Supported tensor rank: from 1
      *
@@ -4044,14 +4047,14 @@
      *      true) or input2 (if false).
      * * 1: An input tensor of the same shape as input0.
      * * 2: An input tensor of the same shape and type as input1.
-     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+     *      For a {@link OperandType::TENSOR_QUANT8_ASYMM}
+     *      and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
      *
      * Outputs:
      * * 0: A tensor of the same type and shape as input1 and input2.
      *      For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
-     *
      */
     SELECT = @1.2::OperationType:SELECT,
 
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index d8a7534..60992d5 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -456,8 +456,7 @@
     }
 
     // Execute and verify results.
-    EvaluatePreparedModel(preparedModel, testModel,
-                          /*testDynamicOutputShape=*/false);
+    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@@ -519,8 +518,7 @@
     }
 
     // Execute and verify results.
-    EvaluatePreparedModel(preparedModel, testModel,
-                          /*testDynamicOutputShape=*/false);
+    EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@@ -541,8 +539,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -566,8 +563,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -590,8 +586,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -615,8 +610,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -727,8 +721,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -752,8 +745,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -776,8 +768,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -801,8 +792,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -914,8 +904,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -937,8 +926,7 @@
         saveModelToCache(model, modelCache, dataCache, &preparedModel);
         ASSERT_NE(preparedModel, nullptr);
         // Execute and verify results.
-        EvaluatePreparedModel(preparedModel, testModel,
-                              /*testDynamicOutputShape=*/false);
+        EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
         // Check if prepareModelFromCache fails.
         preparedModel = nullptr;
         ErrorStatus status;
@@ -1082,8 +1070,7 @@
                 ASSERT_EQ(preparedModel, nullptr);
             } else {
                 ASSERT_NE(preparedModel, nullptr);
-                EvaluatePreparedModel(preparedModel, testModelAdd,
-                                      /*testDynamicOutputShape=*/false);
+                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
             }
         }
     }
@@ -1144,8 +1131,7 @@
                 ASSERT_EQ(preparedModel, nullptr);
             } else {
                 ASSERT_NE(preparedModel, nullptr);
-                EvaluatePreparedModel(preparedModel, testModelAdd,
-                                      /*testDynamicOutputShape=*/false);
+                EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
             }
         }
     }
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 2ec2988..3e947f5 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -79,6 +79,21 @@
     Executor executor;
     MeasureTiming measureTiming;
     OutputType outputType;
+    // `reportSkipping` indicates if a test should print an info message in case
+    // it is skipped. The field is set to true by default and is set to false in
+    // quantization coupling tests to suppress skipping a test
+    bool reportSkipping;
+    TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
+        : executor(executor),
+          measureTiming(measureTiming),
+          outputType(outputType),
+          reportSkipping(true) {}
+    TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
+               bool reportSkipping)
+        : executor(executor),
+          measureTiming(measureTiming),
+          outputType(outputType),
+          reportSkipping(reportSkipping) {}
 };
 
 }  // namespace
@@ -219,7 +234,10 @@
 }
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                           const TestConfig& testConfig) {
+                           const TestConfig& testConfig, bool* skipped = nullptr) {
+    if (skipped != nullptr) {
+        *skipped = false;
+    }
     // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
     if (testConfig.outputType == OutputType::INSUFFICIENT &&
         !isOutputSizeGreaterThanOne(testModel, 0)) {
@@ -290,6 +308,12 @@
 
     if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
         executionStatus == ErrorStatus::GENERAL_FAILURE) {
+        if (skipped != nullptr) {
+            *skipped = true;
+        }
+        if (!testConfig.reportSkipping) {
+            return;
+        }
         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
                      "execute model that it does not support.";
         std::cout << "[          ]   Early termination of test because vendor service cannot "
@@ -343,44 +367,117 @@
 }
 
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                           bool testDynamicOutputShape) {
+                           TestKind testKind) {
     std::initializer_list<OutputType> outputTypesList;
     std::initializer_list<MeasureTiming> measureTimingList;
     std::initializer_list<Executor> executorList;
 
-    if (testDynamicOutputShape) {
-        outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
-        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
-        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
-    } else {
-        outputTypesList = {OutputType::FULLY_SPECIFIED};
-        measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
-        executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+    switch (testKind) {
+        case TestKind::GENERAL: {
+            outputTypesList = {OutputType::FULLY_SPECIFIED};
+            measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+            executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        } break;
+        case TestKind::DYNAMIC_SHAPE: {
+            outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+            measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+            executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        } break;
+        case TestKind::QUANTIZATION_COUPLING: {
+            LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
+            return;
+        } break;
     }
 
     for (const OutputType outputType : outputTypesList) {
         for (const MeasureTiming measureTiming : measureTimingList) {
             for (const Executor executor : executorList) {
-                const TestConfig testConfig = {.executor = executor,
-                                               .measureTiming = measureTiming,
-                                               .outputType = outputType};
+                const TestConfig testConfig(executor, measureTiming, outputType);
                 EvaluatePreparedModel(preparedModel, testModel, testConfig);
             }
         }
     }
 }
 
-void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
+                                   const TestModel& testModel,
+                                   const sp<IPreparedModel>& preparedCoupledModel,
+                                   const TestModel& coupledModel) {
+    std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
+    std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
+                                                              MeasureTiming::YES};
+    std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
+                                                    Executor::BURST};
+
+    for (const OutputType outputType : outputTypesList) {
+        for (const MeasureTiming measureTiming : measureTimingList) {
+            for (const Executor executor : executorList) {
+                const TestConfig testConfig(executor, measureTiming, outputType,
+                                            /*reportSkipping=*/false);
+                bool baseSkipped = false;
+                EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
+                bool coupledSkipped = false;
+                EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
+                                      &coupledSkipped);
+                ASSERT_EQ(baseSkipped, coupledSkipped);
+                if (baseSkipped) {
+                    LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                                 "execute model that it does not support.";
+                    std::cout << "[          ]   Early termination of test because vendor service "
+                                 "cannot "
+                                 "execute model that it does not support."
+                              << std::endl;
+                    GTEST_SKIP();
+                }
+            }
+        }
+    }
+}
+
+void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
     Model model = createModel(testModel);
-    if (testDynamicOutputShape) {
+    if (testKind == TestKind::DYNAMIC_SHAPE) {
         makeOutputDimensionsUnspecified(&model);
     }
 
     sp<IPreparedModel> preparedModel;
-    createPreparedModel(device, model, &preparedModel);
-    if (preparedModel == nullptr) return;
-
-    EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
+    switch (testKind) {
+        case TestKind::GENERAL: {
+            createPreparedModel(device, model, &preparedModel);
+            if (preparedModel == nullptr) return;
+            EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
+        } break;
+        case TestKind::DYNAMIC_SHAPE: {
+            createPreparedModel(device, model, &preparedModel);
+            if (preparedModel == nullptr) return;
+            EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
+        } break;
+        case TestKind::QUANTIZATION_COUPLING: {
+            ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
+            createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
+            TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
+            sp<IPreparedModel> preparedCoupledModel;
+            createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
+                                /*reportSkipping*/ false);
+            // If we couldn't prepare a model with unsigned quantization, we must
+            // fail to prepare a model with signed quantization as well.
+            if (preparedModel == nullptr) {
+                ASSERT_EQ(preparedCoupledModel, nullptr);
+                // If we failed to prepare both of the models, we can safely skip
+                // the test.
+                LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+                             "prepare model that it does not support.";
+                std::cout
+                        << "[          ]   Early termination of test because vendor service cannot "
+                           "prepare model that it does not support."
+                        << std::endl;
+                GTEST_SKIP();
+            }
+            ASSERT_NE(preparedCoupledModel, nullptr);
+            EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
+                                          signedQuantizedModel);
+        } break;
+    }
 }
 
 void GeneratedTestBase::SetUp() {
@@ -403,12 +500,19 @@
 // Tag for the dynamic output shape tests
 class DynamicOutputShapeTest : public GeneratedTest {};
 
+// Tag for the dynamic output shape tests
+class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
+
 TEST_P(GeneratedTest, Test) {
-    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
 }
 
 TEST_P(DynamicOutputShapeTest, Test) {
-    Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
+}
+
+TEST_P(DISABLED_QuantizationCouplingTest, Test) {
+    Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
 }
 
 INSTANTIATE_GENERATED_TEST(GeneratedTest,
@@ -417,4 +521,8 @@
 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
                            [](const TestModel& testModel) { return !testModel.expectFailure; });
 
+INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
+    return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
+});
+
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 45cff5b..ad6323f 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -57,8 +57,19 @@
 
 void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
 
+enum class TestKind {
+    // Runs a test model and compares the results to a golden data
+    GENERAL,
+    // Same as GENERAL but sets dimensions for the output tensors to zeros
+    DYNAMIC_SHAPE,
+    // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
+    // (OK/SKIPPED/FAILED) as the model with all such tensors converted to
+    // TENSOR_QUANT8_ASYMM_SIGNED.
+    QUANTIZATION_COUPLING
+};
+
 void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
-                           const test_helper::TestModel& testModel, bool testDynamicOutputShape);
+                           const test_helper::TestModel& testModel, TestKind testKind);
 
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
 
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 625913d..92d8fa7 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -37,7 +37,7 @@
 
 // internal helper function
 void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                         sp<IPreparedModel>* preparedModel) {
+                         sp<IPreparedModel>* preparedModel, bool reportSkipping) {
     ASSERT_NE(nullptr, preparedModel);
     *preparedModel = nullptr;
 
@@ -74,6 +74,9 @@
     // can continue.
     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
         ASSERT_EQ(nullptr, preparedModel->get());
+        if (!reportSkipping) {
+            return;
+        }
         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
                      "model that it does not support.";
         std::cout << "[          ]   Early termination of test because vendor service cannot "
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 8cb42d4..4e51052 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -47,7 +47,7 @@
 // Create an IPreparedModel object. If the model cannot be prepared,
 // "preparedModel" will be nullptr instead.
 void createPreparedModel(const sp<IDevice>& device, const Model& model,
-                         sp<IPreparedModel>* preparedModel);
+                         sp<IPreparedModel>* preparedModel, bool reportSkipping = true);
 
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);