Add additional DVBT settings to TunerTestingConfig am: 8fa4cff4ce

Original change: https://android-review.googlesource.com/c/platform/hardware/interfaces/+/1705148

Change-Id: Ia337384e925b7d7109a7985c5e2a582c3b97461d
diff --git a/current.txt b/current.txt
index 2bd03ba..cd1446d 100644
--- a/current.txt
+++ b/current.txt
@@ -780,6 +780,8 @@
 550619f876cadbea1f718edce120f0e1dd4a6f4bd4c28b59d479677dc86b0aec android.hardware.neuralnetworks@1.3::types
 c3fec5bd470984402997f78a74b6511efc4063b270f2bd9ee7b78f48b683a1bb android.hardware.neuralnetworks@1.3::IDevice
 0fdfad62c2ec33b52e6687004e5a1971c02d10b93ee4d26df5ccff7ce032494a android.hardware.neuralnetworks@1.3::IPreparedModel
+b40c13f9a9affc806c778c1f8c78e90d4acb50f1d6a6be185d933d7a04b91c5b android.hardware.sensors@1.0::ISensors
+432086950205f5876da85dbd42004b0d0d05b429b9494b4f76a4d888758c5bd8 android.hardware.sensors@1.0::types
 e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
 b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
 0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
diff --git a/identity/aidl/default/libeic/EicCbor.c b/identity/aidl/default/libeic/EicCbor.c
index fe131eb..0e2684f 100644
--- a/identity/aidl/default/libeic/EicCbor.c
+++ b/identity/aidl/default/libeic/EicCbor.c
@@ -114,7 +114,7 @@
         data[4] = size & 0xff;
         eicCborAppend(cbor, data, 5);
     } else {
-        data[0] = (majorType << 5) | 24;
+        data[0] = (majorType << 5) | 27;
         data[1] = (((uint64_t)size) >> 56) & 0xff;
         data[2] = (((uint64_t)size) >> 48) & 0xff;
         data[3] = (((uint64_t)size) >> 40) & 0xff;
diff --git a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
index e46cb48..0639da8 100644
--- a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
+++ b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
@@ -243,7 +243,9 @@
 
     EXPECT_EQ(ErrorCode::OK, result);
     EXPECT_EQ(2U, cert_chain.size());
-    if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+    if (dumpAttestations) {
+      for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+    }
     auto [err, attestation] = parse_attestation_record(cert_chain[0]);
     ASSERT_EQ(ErrorCode::OK, err);
 
@@ -287,7 +289,9 @@
 
     EXPECT_EQ(ErrorCode::OK, result);
     EXPECT_EQ(2U, cert_chain.size());
-    if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+    if (dumpAttestations) {
+      for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+    }
     auto [err, attestation] = parse_attestation_record(cert_chain[0]);
     ASSERT_EQ(ErrorCode::OK, err);
 
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 7987ab4..00970c0 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -65,7 +65,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
 
@@ -111,7 +112,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation));
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index 8e82d25..b4b6f68 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -328,7 +328,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
@@ -373,7 +374,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     // clear pools field of request, as they will be provided via slots
     const auto requestWithoutPools = nn::Request{
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index 1d87937..d0ef36e 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -97,7 +97,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -140,7 +141,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     auto hidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index cb56bdc..1623de5 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -143,7 +143,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -184,7 +185,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     const auto hidlRequest = NN_TRY(convert(requestInShared));
     const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
@@ -236,7 +238,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto hidlRequest = NN_TRY(convert(requestInShared));
     auto hidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index 3cbba4d..87cd0e4 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -178,7 +178,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -248,7 +249,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto aidlRequest = NN_TRY(convert(requestInShared));
     const auto aidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 1915607..18e7636 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -80,7 +80,8 @@
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared =
             NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
-                    &request, &maybeRequestInShared, &relocation)));
+                    &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+                    &maybeRequestInShared, &relocation)));
 
     const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
     const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -127,7 +128,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     const auto aidlRequest = NN_TRY(convert(requestInShared));
     const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -197,7 +199,8 @@
     std::optional<nn::Request> maybeRequestInShared;
     hal::utils::RequestRelocation relocation;
     const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
-            &request, &maybeRequestInShared, &relocation));
+            &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+            &maybeRequestInShared, &relocation));
 
     auto aidlRequest = NN_TRY(convert(requestInShared));
     auto aidlMeasure = NN_TRY(convert(measure));
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index fdc90df..702ee92 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -122,8 +122,8 @@
 // Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
 // shared memory pool. Use `relocationOut` to flush the input or output data after the call.
 nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
-        RequestRelocation* relocationOut);
+        const nn::Request* request, uint32_t alignment, uint32_t padding,
+        std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut);
 
 nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
         size_t numberOfOperands, const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index eaeb9ad..8e55bf0 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -220,8 +220,8 @@
 }
 
 nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
-        const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut,
-        RequestRelocation* relocationOut) {
+        const nn::Request* request, uint32_t alignment, uint32_t padding,
+        std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut) {
     CHECK(request != nullptr);
     CHECK(maybeRequestInSharedOut != nullptr);
     CHECK(relocationOut != nullptr);
@@ -249,7 +249,7 @@
         const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
                                       location.pointer);
         CHECK(data != nullptr);
-        input.location = inputBuilder.append(location.length);
+        input.location = inputBuilder.append(location.length, alignment, padding);
         inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
     }
 
@@ -273,7 +273,7 @@
         output.lifetime = nn::Request::Argument::LifeTime::POOL;
         void* data = std::get<void*>(location.pointer);
         CHECK(data != nullptr);
-        output.location = outputBuilder.append(location.length);
+        output.location = outputBuilder.append(location.length, alignment, padding);
         outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
     }
 
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index 7c05984..27e641a 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -498,6 +498,8 @@
                  ::android::hardware::radio::V1_6::RadioError::NONE,
                  ::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
     }
+
+    sleep(1);
 }
 
 /*
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
index 4222441..8c99d92 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
@@ -45,10 +45,8 @@
                ::android::hardware::radio::V1_0::RadioError::NONE) {
         static const std::regex kOperatorNumericRe("^[0-9]{5,6}$");
         for (OperatorInfo info : radioRsp_v1_6->networkInfos) {
-            if (info.operatorNumeric != nullptr) {
-                ASSERT_TRUE(
-                        std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
-            }
+            ASSERT_TRUE(
+                    std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
         }
     }
 
diff --git a/sensors/1.0/ISensors.hal b/sensors/1.0/ISensors.hal
index 8d41de2..0e172ef 100644
--- a/sensors/1.0/ISensors.hal
+++ b/sensors/1.0/ISensors.hal
@@ -103,7 +103,7 @@
      * Flush adds a FLUSH_COMPLETE metadata event to the end of the "batch mode"
      * FIFO for the specified sensor and flushes the FIFO.  If the FIFO is empty
      * or if the sensor doesn't support batching (FIFO size zero), return
-     * SUCCESS and add a trivial FLUSH_COMPLETE event added to the event stream.
+     * OK and add a trivial FLUSH_COMPLETE event added to the event stream.
      * This applies to all sensors other than one-shot sensors. If the sensor
      * is a one-shot sensor, flush must return BAD_VALUE and not generate any
      * flush complete metadata.  If the sensor is not active at the time flush()
diff --git a/sensors/1.0/types.hal b/sensors/1.0/types.hal
index cbbe92f..ac7be06 100644
--- a/sensors/1.0/types.hal
+++ b/sensors/1.0/types.hal
@@ -1130,7 +1130,7 @@
 
     /**
      * High performance mode hint. Device is able to use up more power and take
-     * more reources to improve throughput and latency in high performance mode.
+     * more resources to improve throughput and latency in high performance mode.
      * One possible use case is virtual reality, when sensor latency need to be
      * carefully controlled.
      * int32_t: 1 or 0, denote if device is in/out of high performance mode,
diff --git a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
index 56bc9cf..1f579ba 100644
--- a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
+++ b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
@@ -190,7 +190,7 @@
     });
 }
 
-// Test if sensor list returned is valid
+// Test if sensor hal can switch to different operation modes
 TEST_P(SensorsHidlTest, SetOperationMode) {
     std::vector<SensorInfo> sensorList = getSensorsList();
 
@@ -208,7 +208,7 @@
     ASSERT_EQ(Result::OK, S()->setOperationMode(OperationMode::NORMAL));
 }
 
-// Test if sensor list returned is valid
+// Test if sensor hal can receive injected events in loopback mode
 TEST_P(SensorsHidlTest, InjectSensorEventData) {
     std::vector<SensorInfo> sensorList = getSensorsList();
     std::vector<SensorInfo> sensorSupportInjection;