Merge "Camera: Add fall back properties for FMQ size override" into qt-dev
diff --git a/automotive/OWNERS b/automotive/OWNERS
index 4a94494..3cf4489 100644
--- a/automotive/OWNERS
+++ b/automotive/OWNERS
@@ -1,3 +1,4 @@
randolphs@google.com
pirozzoj@google.com
twasilczyk@google.com
+pfg@google.com
diff --git a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
index 258dbd9..7082566 100644
--- a/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
+++ b/automotive/vehicle/2.0/default/common/include/vhal_v2_0/VmsUtils.h
@@ -105,6 +105,24 @@
std::vector<VmsAssociatedLayer> associated_layers;
};
+// An enum to represent the result of parsing START_SESSION message from the VMS service.
+enum VmsSessionStatus {
+ // New server session is received if the new client ID is -1 and the new server ID is not an
+ // invalid ID.
+ kNewServerSession,
+ // Ack to new client session is received if the new client ID is same as the old one and the new
+ // server ID is not an invalid ID.
+ kAckToNewClientSession,
+ // Error codes:
+ // Invalid message with either invalid format or unexpected data.
+ kInvalidMessage,
+ // Invalid server ID. New ID should always be greater than or equal to max_of(0, current server
+ // ID)
+ kInvalidServiceId,
+ // Invalid client ID. New ID should always be either -1 or the current client ID.
+ kInvalidClientId
+};
+
// Creates an empty base VMS message with some pre-populated default fields.
std::unique_ptr<VehiclePropValue> createBaseVmsMessage(size_t message_size);
@@ -146,11 +164,21 @@
// Creates a VehiclePropValue containing a message of type VmsMessageType.DATA.
// Returns a nullptr if the byte string in bytes is empty.
//
-// For example, to build a VehiclePropMessage containing a proto, the caller
+// For example, to build a VehiclePropValue message containing a proto, the caller
// should convert the proto to a byte string using the SerializeToString proto
// API, then use this inteface to build the VehicleProperty.
std::unique_ptr<VehiclePropValue> createDataMessage(const std::string& bytes);
+// Creates a VehiclePropValue containing a message of type
+// VmsMessageType.PUBLISHER_ID_REQUEST with the given publisher information.
+// Returns a nullptr if the input is empty.
+std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
+ const std::string& vms_provider_description);
+
+// Creates a VehiclePropValue message of type VmsMessageType.START_SESSION.
+std::unique_ptr<VehiclePropValue> createStartSessionMessage(const int service_id,
+ const int client_id);
+
// Returns true if the VehiclePropValue pointed to by value contains a valid Vms
// message, i.e. the VehicleProperty, VehicleArea, and VmsMessageType are all
// valid. Note: If the VmsMessageType enum is extended, this function will
@@ -169,12 +197,6 @@
// function to ParseFromString.
std::string parseData(const VehiclePropValue& value);
-// Creates a VehiclePropValue containing a message of type
-// VmsMessageType.PUBLISHER_ID_REQUEST with the given publisher information.
-// Returns a nullptr if the input is empty.
-std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
- const std::string& vms_provider_description);
-
// Returns the publisher ID by parsing the VehiclePropValue containing the ID.
// Returns null if the message is invalid.
int32_t parsePublisherIdResponse(const VehiclePropValue& publisher_id_response);
@@ -204,6 +226,12 @@
// has newly started or restarted.
bool hasServiceNewlyStarted(const VehiclePropValue& availability_change);
+// Takes a start session message, current service ID, current client ID; and returns the type/status
+// of the message. It also populates the new service ID with the correct value.
+VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session,
+ const int service_id, const int client_id,
+ int* new_service_id);
+
} // namespace vms
} // namespace V2_0
} // namespace vehicle
diff --git a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
index 1863191..111f6ea 100644
--- a/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
+++ b/automotive/vehicle/2.0/default/common/src/VmsUtils.cpp
@@ -31,6 +31,7 @@
static constexpr int kLayerNumberSize = 1;
static constexpr int kLayerSize = 3;
static constexpr int kLayerAndPublisherSize = 4;
+static constexpr int kSessionIdsSize = 2;
static constexpr int kPublisherIdIndex =
toInt(VmsPublisherInformationIntegerValuesIndex::PUBLISHER_ID);
static constexpr int kSubscriptionStateSequenceNumberIndex =
@@ -41,9 +42,9 @@
// TODO(aditin): We should extend the VmsMessageType enum to include a first and
// last, which would prevent breakages in this API. However, for all of the
// functions in this module, we only need to guarantee that the message type is
-// between SUBSCRIBE and PUBLISHER_ID_RESPONSE.
+// between SUBSCRIBE and START_SESSION.
static constexpr int kFirstMessageType = toInt(VmsMessageType::SUBSCRIBE);
-static constexpr int kLastMessageType = toInt(VmsMessageType::PUBLISHER_ID_RESPONSE);
+static constexpr int kLastMessageType = toInt(VmsMessageType::START_SESSION);
std::unique_ptr<VehiclePropValue> createBaseVmsMessage(size_t message_size) {
auto result = createVehiclePropValue(VehiclePropertyType::INT32, message_size);
@@ -132,6 +133,28 @@
return result;
}
+std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
+ const std::string& vms_provider_description) {
+ auto result = createBaseVmsMessage(kMessageTypeSize);
+ result->value.int32Values = hidl_vec<int32_t>{
+ toInt(VmsMessageType::PUBLISHER_ID_REQUEST),
+ };
+ result->value.bytes =
+ std::vector<uint8_t>(vms_provider_description.begin(), vms_provider_description.end());
+ return result;
+}
+
+std::unique_ptr<VehiclePropValue> createStartSessionMessage(const int service_id,
+ const int client_id) {
+ auto result = createBaseVmsMessage(kMessageTypeSize + kSessionIdsSize);
+ result->value.int32Values = hidl_vec<int32_t>{
+ toInt(VmsMessageType::START_SESSION),
+ service_id,
+ client_id,
+ };
+ return result;
+}
+
bool isValidVmsProperty(const VehiclePropValue& value) {
return (value.prop == toInt(VehicleProperty::VEHICLE_MAP_SERVICE));
}
@@ -159,17 +182,6 @@
}
}
-std::unique_ptr<VehiclePropValue> createPublisherIdRequest(
- const std::string& vms_provider_description) {
- auto result = createBaseVmsMessage(kMessageTypeSize);
- result->value.int32Values = hidl_vec<int32_t>{
- toInt(VmsMessageType::PUBLISHER_ID_REQUEST),
- };
- result->value.bytes =
- std::vector<uint8_t>(vms_provider_description.begin(), vms_provider_description.end());
- return result;
-}
-
int32_t parsePublisherIdResponse(const VehiclePropValue& publisher_id_response) {
if (isValidVmsMessage(publisher_id_response) &&
parseMessageType(publisher_id_response) == VmsMessageType::PUBLISHER_ID_RESPONSE &&
@@ -256,6 +268,31 @@
availability_change.value.int32Values[kAvailabilitySequenceNumberIndex] == 0);
}
+VmsSessionStatus parseStartSessionMessage(const VehiclePropValue& start_session,
+ const int service_id, const int client_id,
+ int* new_service_id) {
+ if (isValidVmsMessage(start_session) &&
+ parseMessageType(start_session) == VmsMessageType::START_SESSION &&
+ start_session.value.int32Values.size() == kSessionIdsSize + 1) {
+ *new_service_id = start_session.value.int32Values[1];
+ const int new_client_id = start_session.value.int32Values[2];
+ if (*new_service_id < std::max(0, service_id)) {
+ *new_service_id = service_id;
+ return VmsSessionStatus::kInvalidServiceId;
+ }
+ if (new_client_id == -1) {
+ return VmsSessionStatus::kNewServerSession;
+ }
+ if (new_client_id == client_id) {
+ return VmsSessionStatus::kAckToNewClientSession;
+ }
+ *new_service_id = service_id;
+ return VmsSessionStatus::kInvalidClientId;
+ }
+ *new_service_id = service_id;
+ return VmsSessionStatus::kInvalidMessage;
+}
+
} // namespace vms
} // namespace V2_0
} // namespace vehicle
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index 08cdffa..39fe991 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -530,6 +530,7 @@
{.config = {.prop = toInt(VehicleProperty::HVAC_TEMPERATURE_DISPLAY_UNITS),
.access = VehiclePropertyAccess::READ_WRITE,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {(int)VehicleUnit::FAHRENHEIT, (int)VehicleUnit::CELSIUS},
.areaConfigs = {VehicleAreaConfig{.areaId = (0)}}},
.initialValue = {.int32Values = {(int)VehicleUnit::FAHRENHEIT}}},
diff --git a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
index 5ea5bd4..2b3efc7 100644
--- a/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
+++ b/automotive/vehicle/2.0/default/tests/VmsUtils_test.cpp
@@ -158,7 +158,7 @@
TEST(VmsUtilsTest, invalidMessageType) {
VmsLayer layer(1, 0, 2);
auto message = createSubscribeMessage(layer);
- message->value.int32Values[0] = 0;
+ message->value.int32Values[0] = -1;
EXPECT_FALSE(isValidVmsMessage(*message));
}
@@ -325,6 +325,98 @@
EXPECT_FALSE(hasServiceNewlyStarted(*message));
}
+TEST(VmsUtilsTest, startSessionRequest) {
+ auto message = createStartSessionMessage(123, 456);
+ ASSERT_NE(message, nullptr);
+ EXPECT_TRUE(isValidVmsMessage(*message));
+ EXPECT_EQ(message->prop, toInt(VehicleProperty::VEHICLE_MAP_SERVICE));
+ EXPECT_EQ(message->value.int32Values.size(), 0x3ul);
+ EXPECT_EQ(parseMessageType(*message), VmsMessageType::START_SESSION);
+ EXPECT_EQ(message->value.int32Values[1], 123);
+ EXPECT_EQ(message->value.int32Values[2], 456);
+}
+
+TEST(VmsUtilsTest, startSessionServiceNewlyStarted) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, -1};
+ EXPECT_EQ(parseStartSessionMessage(*message, 122, 456, &new_service_id),
+ VmsSessionStatus::kNewServerSession);
+ EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionServiceNewlyStartedEdgeCase) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 0, -1};
+ EXPECT_EQ(parseStartSessionMessage(*message, -1, 0, &new_service_id),
+ VmsSessionStatus::kNewServerSession);
+ EXPECT_EQ(new_service_id, 0);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStarted) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 456};
+ EXPECT_EQ(parseStartSessionMessage(*message, -1, 456, &new_service_id),
+ VmsSessionStatus::kAckToNewClientSession);
+ EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStartedWithSameServerId) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 456};
+ EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+ VmsSessionStatus::kAckToNewClientSession);
+ EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionClientNewlyStartedEdgeCase) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 0, 0};
+ EXPECT_EQ(parseStartSessionMessage(*message, 0, 0, &new_service_id),
+ VmsSessionStatus::kAckToNewClientSession);
+ EXPECT_EQ(new_service_id, 0);
+}
+
+TEST(VmsUtilsTest, startSessionOldServiceId) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 120, 456};
+ EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+ VmsSessionStatus::kInvalidServiceId);
+ EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidServiceIdEdgeCase) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), -1, 456};
+ EXPECT_EQ(parseStartSessionMessage(*message, -1, 456, &new_service_id),
+ VmsSessionStatus::kInvalidServiceId);
+ EXPECT_EQ(new_service_id, -1);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidClientId) {
+ auto message = createBaseVmsMessage(3);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123, 457};
+ EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+ VmsSessionStatus::kInvalidClientId);
+ EXPECT_EQ(new_service_id, 123);
+}
+
+TEST(VmsUtilsTest, startSessionInvalidMessageFormat) {
+ auto message = createBaseVmsMessage(2);
+ int new_service_id;
+ message->value.int32Values = hidl_vec<int32_t>{toInt(VmsMessageType::START_SESSION), 123};
+ EXPECT_EQ(parseStartSessionMessage(*message, 123, 456, &new_service_id),
+ VmsSessionStatus::kInvalidMessage);
+ EXPECT_EQ(new_service_id, 123);
+}
+
} // namespace
} // namespace vms
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index b04d096..e36468a 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -1120,9 +1120,9 @@
*
* Distance units are defined in VehicleUnit.
* VehiclePropConfig.configArray is used to indicate the supported distance display units.
- * For example: configArray[0] = 0x21 // METER
- * configArray[1] = 0x23 // KILOMETER
- * configArray[2] = 0x24 // MILE
+ * For example: configArray[0] = METER
+ * configArray[1] = KILOMETER
+ * configArray[2] = MILE
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
* @access VehiclePropertyAccess:READ_WRITE
* @data_enum VehicleUnit
@@ -1141,8 +1141,8 @@
*
* VehiclePropConfig.configArray is used to indicate the supported fuel volume display units.
* Volume units are defined in VehicleUnit.
- * For example: configArray[0] = 0x41 // LITER
- * configArray[1] = 0x42 // GALLON
+ * For example: configArray[0] = LITER
+ * configArray[1] = GALLON
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
* @access VehiclePropertyAccess:READ_WRITE
* @data_enum VehicleUnit
@@ -1161,9 +1161,9 @@
*
* VehiclePropConfig.configArray is used to indicate the supported pressure display units.
* Pressure units are defined in VehicleUnit.
- * For example: configArray[0] = 0x70 // KILOPASCAL
- * configArray[1] = 0x71 // PSI
- * configArray[2] = 0x72 // BAR
+ * For example: configArray[0] = KILOPASCAL
+ * configArray[1] = PSI
+ * configArray[2] = BAR
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
* @access VehiclePropertyAccess:READ_WRITE
* @data_enum VehicleUnit
@@ -1182,9 +1182,9 @@
*
* VehiclePropConfig.configArray is used to indicate the supported electrical energy units.
* Electrical energy units are defined in VehicleUnit.
- * For example: configArray[0] = 0x60 // watt-hours
- * configArray[1] = 0x64 // ampere-hours
- * configArray[2] = 0x65 // kilowatt-hours
+ * For example: configArray[0] = WATT_HOUR
+ * configArray[1] = AMPERE_HOURS
+ * configArray[2] = KILOWATT_HOUR
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
* @access VehiclePropertyAccess:READ_WRITE
* @data_enum VehicleUnit
@@ -1212,6 +1212,25 @@
| VehicleArea:GLOBAL),
/**
+ * Speed units for display
+ *
+ * Indicates type of units the car is using to display speed to user. Eg. m/s, km/h, or mph.
+ *
+ * VehiclePropConfig.configArray is used to indicate the supported speed display units.
+ * Pressure units are defined in VehicleUnit.
+ * For example: configArray[0] = METER_PER_SEC
+ * configArray[1] = MILES_PER_HOUR
+ * configArray[2] = KILOMETERS_PER_HOUR
+ * @change_mode VehiclePropertyChangeMode:ON_CHANGE
+ * @access VehiclePropertyAccess:READ_WRITE
+ */
+ VEHICLE_SPEED_DISPLAY_UNITS = (
+ 0x0605
+ | VehiclePropertyGroup:SYSTEM
+ | VehiclePropertyType:INT32
+ | VehicleArea:GLOBAL),
+
+ /**
* Outside temperature
*
* @change_mode VehiclePropertyChangeMode:CONTINUOUS
@@ -2586,42 +2605,45 @@
* Units used for int or float type with no attached enum types.
*/
enum VehicleUnit : int32_t {
- SHOULD_NOT_USE = 0x000,
+ SHOULD_NOT_USE = 0x000,
- METER_PER_SEC = 0x01,
- RPM = 0x02,
- HERTZ = 0x03,
- PERCENTILE = 0x10,
- MILLIMETER = 0x20,
- METER = 0x21,
- KILOMETER = 0x23,
- MILE = 0x24,
- CELSIUS = 0x30,
- FAHRENHEIT = 0x31,
- KELVIN = 0x32,
- MILLILITER = 0x40,
- LITER = 0x41,
+ METER_PER_SEC = 0x01,
+ RPM = 0x02,
+ HERTZ = 0x03,
+ PERCENTILE = 0x10,
+ MILLIMETER = 0x20,
+ METER = 0x21,
+ KILOMETER = 0x23,
+ MILE = 0x24,
+ CELSIUS = 0x30,
+ FAHRENHEIT = 0x31,
+ KELVIN = 0x32,
+ MILLILITER = 0x40,
+ LITER = 0x41,
/** deprecated. Use US_GALLON instead. */
- GALLON = 0x42,
- US_GALLON = 0x42,
- IMPERIAL_GALLON= 0x43,
- NANO_SECS = 0x50,
- SECS = 0x53,
- YEAR = 0x59,
+ GALLON = 0x42,
+ US_GALLON = 0x42,
+ IMPERIAL_GALLON = 0x43,
+ NANO_SECS = 0x50,
+ SECS = 0x53,
+ YEAR = 0x59,
// Electrical Units
- WATT_HOUR = 0x60,
- MILLIAMPERE = 0x61,
- MILLIVOLT = 0x62,
- MILLIWATTS = 0x63,
- AMPERE_HOURS = 0x64,
- KILOWATT_HOUR = 0x65,
+ WATT_HOUR = 0x60,
+ MILLIAMPERE = 0x61,
+ MILLIVOLT = 0x62,
+ MILLIWATTS = 0x63,
+ AMPERE_HOURS = 0x64,
+ KILOWATT_HOUR = 0x65,
- KILOPASCAL = 0x70,
- PSI = 0x71,
- BAR = 0x72,
- DEGREES = 0x80,
+ KILOPASCAL = 0x70,
+ PSI = 0x71,
+ BAR = 0x72,
+ DEGREES = 0x80,
+
+ MILES_PER_HOUR = 0x90,
+ KILOMETERS_PER_HOUR = 0x91,
};
/**
@@ -3249,6 +3271,16 @@
*/
enum VmsMessageType : int32_t {
/**
+ * A notification indicating that the sender has been reset.
+ *
+ * The receiving party must reset its internal state and respond to the
+ * sender with a START_SESSION message as acknowledgement.
+ *
+ * This message type uses enum VmsStartSessionMessageIntegerValuesIndex.
+ */
+ START_SESSION = 17,
+
+ /**
* A request from the subscribers to the VMS service to subscribe to a layer.
*
* This message type uses enum VmsMessageWithLayerIntegerValuesIndex.
@@ -3364,7 +3396,7 @@
*/
PUBLISHER_INFORMATION_RESPONSE = 16,
- LAST_VMS_MESSAGE_TYPE = PUBLISHER_INFORMATION_RESPONSE,
+ LAST_VMS_MESSAGE_TYPE = START_SESSION,
};
/**
@@ -3378,6 +3410,30 @@
};
/*
+ * Handshake data sent as part of a VmsMessageType.START_SESSION message.
+ *
+ * A new session is initiated by sending a START_SESSION message with the
+ * sender's identifier populated and the receiver's identifier set to -1.
+ *
+ * Identifier values are independently generated, but must be non-negative, and
+ * increase monotonically between reboots.
+ *
+ * Upon receiving a START_SESSION with a mis-matching identifier, the receiver
+ * must clear any cached VMS offering or subscription state and acknowledge the
+ * new session by responding with a START_SESSION message that populates both
+ * identifier fields.
+ *
+ * Any VMS messages received between initiation and completion of the handshake
+ * must be discarded.
+ */
+enum VmsStartSessionMessageIntegerValuesIndex : VmsBaseMessageIntegerValuesIndex {
+ /* Identifier field for the Android system service. */
+ SERVICE_ID = 1,
+ /* Identifier field for the HAL client process. */
+ CLIENT_ID = 2,
+};
+
+/*
* A VMS message with a layer is sent as part of a VmsMessageType.SUBSCRIBE or
* VmsMessageType.UNSUBSCRIBE messages.
*
diff --git a/current.txt b/current.txt
index 078a9df..64fd4ae 100644
--- a/current.txt
+++ b/current.txt
@@ -393,15 +393,17 @@
23780340c686ee86986aa5a9755c2d8566224fed177bbb22a5ebf06be574b60c android.hardware.camera.metadata@3.3::types
05d1ee760d81cdd2dc7a70ce0241af9fa830edae33b4be83d9bf5fffe05ddc6f android.hardware.camera.provider@2.4::ICameraProvider
da33234403ff5d60f3473711917b9948e6484a4260b5247acdafb111193a9de2 android.hardware.configstore@1.0::ISurfaceFlingerConfigs
+ede69710c3a95c2cbe818e6c8bb72c7816823face5fc21c17731b26f41d94d65 android.hardware.gnss@1.0::IGnss
21165b8e30c4b2d52980e4728f661420adc16e38bbe73476c06b2085be908f4c android.hardware.gnss@1.0::IGnssCallback
d702fb01dc2a0733aa820b7eb65435ee3334f75632ef880bafd2fb8803a20a58 android.hardware.gnss@1.0::IGnssMeasurementCallback
+b5f1f4c1bd6de71a8e71d70f57cdab904ac024a12f3dee3e2173770a4583bcc2 android.hardware.gnss@1.1::IGnss
7c7721c0f773fcf422b71a4f558545e9e36acc973e58ca51e5bd53905cf46bc0 android.hardware.graphics.bufferqueue@1.0::IGraphicBufferProducer
d4fea995378bb4f421b4e24ccf68cad2734ab07fe4f874a126ba558b99df5766 android.hardware.graphics.composer@2.1::IComposerClient
f7d7cb747dc01a9fdb2d39a80003b4d8df9be733d65f5842198802eb6209db69 android.hardware.graphics.mapper@2.0::IMapper
65a021fa89085b62fc96b2b6d3bef2f9103cf4d63379c68bc154fd9eef672852 android.hardware.health@1.0::types
b7ecf29927055ec422ec44bf776223f07d79ad9f92ccf9becf167e62c2607e7a android.hardware.keymaster@4.0::IKeymasterDevice
574e8f1499436fb4075894dcae0b36682427956ecb114f17f1fe22d116a83c6b android.hardware.neuralnetworks@1.0::IPreparedModel
-e75759b40a1c5f97b463b30aab91954012c9ea9e454dde308db853a56796e5a6 android.hardware.neuralnetworks@1.0::types
+1e3576c07006d82ba5bc6ddbf87c101414d361c41afe7a82713750844c488725 android.hardware.neuralnetworks@1.0::types
eb754b58c93e5591613208b4c972811288b0fa16a82430d602f107c91a908b22 android.hardware.neuralnetworks@1.1::types
1d4a5776614c08b5d794a5ec5ab04697260cbd4b3441d5935cd53ee71d19da02 android.hardware.radio@1.0::IRadioResponse
ed9da80ec0c96991fd03f0a46107815d0e50f764656e49dba4980fa5c31d5bc3 android.hardware.radio@1.0::types
@@ -467,7 +469,7 @@
7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss
2e5ad983734069e84a760004b32da0d09e4170c05380abe27e6eb80e4aa70d5a android.hardware.gnss@2.0::IAGnssCallback
1f4ac068a88a72360280d94a7f6fd7c63813c1eea4891a0eb01394d3e7e775f2 android.hardware.gnss@2.0::IAGnssRil
-4deafcdcffa2d002119e7f58810b767a84666e76475aae68e757ec2845d9756d android.hardware.gnss@2.0::IGnss
+f5605f48c2fb9f231615dd932bf730ae9540f4f98b5b7ae2b269975f452f6d73 android.hardware.gnss@2.0::IGnss
db6bdf6dfc5edf6c85d2944976db899227abb51079c893874353c322342c50b6 android.hardware.gnss@2.0::IGnssBatching
1f89392f1ebb693d8fa6f50324b1635fc79fab246d31900e63998e1b0e17511c android.hardware.gnss@2.0::IGnssBatchingCallback
64232037109a5e5f53ab0377e755ec494ae93fcb5279e6eea71dec2e7ac6fbfc android.hardware.gnss@2.0::IGnssCallback
@@ -515,7 +517,7 @@
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-e3b6176e3bf235c4e0e4e451b0166e396c7ee176cfe167c9147c3d46d7b34f0c android.hardware.neuralnetworks@1.2::types
+d18bba0b6c8d2d1da3cfb52b14f556d2e04eb91551d97ee60a3524cf993a3e0e android.hardware.neuralnetworks@1.2::types
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
diff --git a/gnss/1.0/IGnss.hal b/gnss/1.0/IGnss.hal
index 602c615..d32bc63 100644
--- a/gnss/1.0/IGnss.hal
+++ b/gnss/1.0/IGnss.hal
@@ -75,8 +75,13 @@
};
/**
- * Opens the interface and provides the callback routines
- * to the implementation of this interface.
+ * Opens the interface and provides the callback routines to the implementation of this
+ * interface.
+ *
+ * The framework calls this method to instruct the GPS engine to prepare for serving requests
+ * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+ * framework upon successful return from this method until cleanup() method is called to
+ * close this interface.
*
* @param callback Callback interface for IGnss.
*
@@ -105,6 +110,18 @@
/**
* Closes the interface.
+ *
+ * The cleanup() method is called by the framework to tell the GNSS HAL implementation to
+ * not expect any GNSS requests in the immediate future - e.g. this may be called when
+ * location is disabled by a user setting or low battery conditions. The GNSS HAL
+ * implementation must immediately stop responding to any existing requests until the
+ * setCallback() method is called again and the requests are re-initiated by the framework.
+ *
+ * After this method is called, the GNSS HAL implementation may choose to modify GNSS hardware
+ * states to save power. It is expected that when setCallback() method is called again to
+ * reopen this interface, to serve requests, there may be some minor delays in GNSS response
+ * requests as hardware readiness states are restored, not to exceed those that occur on normal
+ * device boot up.
*/
cleanup();
@@ -153,7 +170,7 @@
* @param mode Parameter must be one of MS_BASED or STANDALONE.
* It is allowed by the platform (and it is recommended) to fallback to
* MS_BASED if MS_ASSISTED is passed in, and MS_BASED is supported.
- * @recurrence GNSS postion recurrence value, either periodic or single.
+ * @recurrence GNSS position recurrence value, either periodic or single.
* @param minIntervalMs Represents the time between fixes in milliseconds.
* @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
* @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/gnss/1.1/IGnss.hal b/gnss/1.1/IGnss.hal
index 672f742..3400807 100644
--- a/gnss/1.1/IGnss.hal
+++ b/gnss/1.1/IGnss.hal
@@ -29,6 +29,11 @@
* Opens the interface and provides the callback routines to the implementation of this
* interface.
*
+ * The framework calls this method to instruct the GPS engine to prepare for serving requests
+ * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+ * framework upon successful return from this method until cleanup() method is called to
+ * close this interface.
+ *
* @param callback Callback interface for IGnss.
*
* @return success Returns true on success.
@@ -42,7 +47,7 @@
* @param mode Parameter must be one of MS_BASED or STANDALONE. It is allowed by the platform
* (and it is recommended) to fallback to MS_BASED if MS_ASSISTED is passed in, and MS_BASED
* is supported.
- * @param recurrence GNSS postion recurrence value, either periodic or single.
+ * @param recurrence GNSS position recurrence value, either periodic or single.
* @param minIntervalMs Represents the time between fixes in milliseconds.
* @param preferredAccuracyMeters Represents the requested fix accuracy in meters.
* @param preferredTimeMs Represents the requested time to first fix in milliseconds.
diff --git a/gnss/2.0/IGnss.hal b/gnss/2.0/IGnss.hal
index f19f8d0..9935bf9 100644
--- a/gnss/2.0/IGnss.hal
+++ b/gnss/2.0/IGnss.hal
@@ -36,13 +36,18 @@
* the interface @1.0::IGnssNi.hal and @1.0::IGnssNiCallback.hal are deprecated in this version
* and are not supported by the framework. The GNSS HAL implementation of this interface
* must return nullptr for the following @1.0::IGnss method.
- * getExtensionGnssNi() generates (IGnssNi gnssNiIface);
+ * getExtensionGnssNi() generates (IGnssNi gnssNiIface);
*/
interface IGnss extends @1.1::IGnss {
/**
* Opens the interface and provides the callback routines to the implementation of this
* interface.
*
+ * The framework calls this method to instruct the GPS engine to prepare for serving requests
+ * from the framework. The GNSS HAL implementation must respond to all GNSS requests from the
+ * framework upon successful return from this method until cleanup() method is called to
+ * close this interface.
+ *
* @param callback Callback interface for IGnss.
*
* @return success Returns true on success.
@@ -83,8 +88,9 @@
/**
* This method returns the IGnssMeasurement interface.
*
- * Exactly one of getExtensionGnssMeasurement_1_1() and getExtensionGnssMeasurement_2_0() must
- * return a non-null handle, and the other method must return nullptr.
+ * Exactly one of getExtensionGnssMeasurement(), getExtensionGnssMeasurement_1_1(), and
+ * getExtensionGnssMeasurement_2_0() methods must return a non-null handle, and the other
+ * methods must return nullptr.
*
* @return gnssMeasurementIface Handle to the IGnssMeasurement interface.
*/
diff --git a/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc b/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
index 08e32d8..81ce890 100644
--- a/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
+++ b/graphics/composer/2.3/default/android.hardware.graphics.composer@2.3-service.rc
@@ -4,3 +4,4 @@
group graphics drmrpc
capabilities SYS_NICE
onrestart restart surfaceflinger
+ writepid /dev/cpuset/system-background/tasks
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index b0a1c1a..02db063 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -858,20 +858,21 @@
* elements of the input matrices.
*
* The operation has the following independently optional inputs:
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+ * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+ * have values or neither of them have values (i.e., all set to null). If
+ * they have values, the peephole optimization is used.
* * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
- * (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
- * bias (\f$b_i\f$) either all have values, or none of them have values
- * (i.e., all set to null). If they have no values, coupling of input and
- * forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
- * is calculated using the following equation instead.
+ * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values. If they have no values, coupling of input
+ * and forget gates (CIFG) is used, in which case the input gate
+ * (\f$i_t\f$) is calculated using the following equation instead.
* \f{eqnarray*}{
* i_t = 1 - f_t
* \f}
- * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
- * (\f$W_{co}\f$) either both have values or neither of them have values.
- * If they have values, the peephole optimization is used. Additionally,
- * if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
- * required to have values for peephole optimization.
+ * In case peephole optimization is used and CIFG is not used
+ * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+ * cell-to-input weights must have no value.
* * The projection weights (\f$W_{proj}\f$) is required only for the
* recurrent projection layer, and should otherwise have no value.
* * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
@@ -984,8 +985,8 @@
* Outputs:
* * 0: The scratch buffer.
* A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units * 4] with CIFG, or
- * [batch_size, num_units * 3] without CIFG.
+ * [batch_size, num_units * 3] with CIFG, or
+ * [batch_size, num_units * 4] without CIFG.
* * 1: The output state (out) (\f$h_t\f$).
* A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
* [batch_size, output_size].
diff --git a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
index 72a5007..f0c93b7 100644
--- a/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.0/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -42,53 +41,6 @@
///////////////////////// UTILITY FUNCTIONS /////////////////////////
-static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
- sp<IPreparedModel>* preparedModel) {
- ASSERT_NE(nullptr, preparedModel);
-
- // see if service can handle model
- bool fullySupportsModel = false;
- Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel =
- std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
- });
- ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
- // launch prepare model
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- // retrieve prepared model
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- *preparedModel = preparedModelCallback->getPreparedModel();
-
- // The getSupportedOperations call returns a list of operations that are
- // guaranteed not to fail if prepareModel is called, and
- // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
- // If a driver has any doubt that it can prepare an operation, it must
- // return false. So here, if a driver isn't sure if it can support an
- // operation, but reports that it successfully prepared the model, the test
- // can continue.
- if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel->get());
- LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
- "prepare model that it does not support.";
- std::cout << "[ ] Unable to test Request validation because vendor service "
- "cannot prepare model that it does not support."
- << std::endl;
- return;
- }
- ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel->get());
-}
-
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request. Note that the request here is passed by value, and any
@@ -237,15 +189,8 @@
return requests;
}
-void ValidationTest::validateRequests(const V1_0::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
- // create IPreparedModel
- sp<IPreparedModel> preparedModel;
- ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
- if (preparedModel == nullptr) {
- return;
- }
-
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index 31638c4..aee2f85 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -25,6 +29,55 @@
namespace vts {
namespace functional {
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_0::Model& model,
+ sp<IPreparedModel>* preparedModel) {
+ ASSERT_NE(nullptr, preparedModel);
+
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedOpsLaunchStatus = device->getSupportedOperations(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ *preparedModel = preparedModelCallback->getPreparedModel();
+
+ // The getSupportedOperations call returns a list of operations that are
+ // guaranteed not to fail if prepareModel is called, and
+ // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+ // If a driver has any doubt that it can prepare an operation, it must
+ // return false. So here, if a driver isn't sure if it can support an
+ // operation, but reports that it successfully prepared the model, the test
+ // can continue.
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Unable to test Request validation because vendor service "
+ "cannot prepare model that it does not support."
+ << std::endl;
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
+}
+
// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
@@ -68,9 +121,17 @@
::testing::VtsHalHidlTargetTestBase::TearDown();
}
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
validateModel(model);
- validateRequests(model, request);
+
+ // create IPreparedModel
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
+
+ validateRequests(preparedModel, requests);
}
} // namespace functional
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index 559d678..22285be 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -67,7 +67,8 @@
private:
void validateModel(const Model& model);
- void validateRequests(const Model& model, const std::vector<Request>& request);
+ void validateRequests(const sp<IPreparedModel>& preparedModel,
+ const std::vector<Request>& requests);
};
// Tag for the generated tests
diff --git a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
index 5225bf7..f4adbab 100644
--- a/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.1/vts/functional/ValidateRequest.cpp
@@ -34,7 +34,6 @@
namespace functional {
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
-using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
using test_helper::for_all;
using test_helper::MixedTyped;
@@ -42,54 +41,6 @@
///////////////////////// UTILITY FUNCTIONS /////////////////////////
-static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
- sp<IPreparedModel>* preparedModel) {
- ASSERT_NE(nullptr, preparedModel);
-
- // see if service can handle model
- bool fullySupportsModel = false;
- Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
- model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
- ASSERT_EQ(ErrorStatus::NONE, status);
- ASSERT_NE(0ul, supported.size());
- fullySupportsModel =
- std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
- });
- ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
-
- // launch prepare model
- sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
- ASSERT_NE(nullptr, preparedModelCallback.get());
- Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
- ASSERT_TRUE(prepareLaunchStatus.isOk());
- ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
-
- // retrieve prepared model
- preparedModelCallback->wait();
- ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
- *preparedModel = preparedModelCallback->getPreparedModel();
-
- // The getSupportedOperations_1_1 call returns a list of operations that are
- // guaranteed not to fail if prepareModel_1_1 is called, and
- // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
- // If a driver has any doubt that it can prepare an operation, it must
- // return false. So here, if a driver isn't sure if it can support an
- // operation, but reports that it successfully prepared the model, the test
- // can continue.
- if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
- ASSERT_EQ(nullptr, preparedModel->get());
- LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
- "prepare model that it does not support.";
- std::cout << "[ ] Unable to test Request validation because vendor service "
- "cannot prepare model that it does not support."
- << std::endl;
- return;
- }
- ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
- ASSERT_NE(nullptr, preparedModel->get());
-}
-
// Primary validation function. This function will take a valid request, apply a
// mutation to it to invalidate the request, then pass it to interface calls
// that use the request. Note that the request here is passed by value, and any
@@ -238,15 +189,8 @@
return requests;
}
-void ValidationTest::validateRequests(const V1_1::Model& model,
+void ValidationTest::validateRequests(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
- // create IPreparedModel
- sp<IPreparedModel> preparedModel;
- ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
- if (preparedModel == nullptr) {
- return;
- }
-
// validate each request
for (const Request& request : requests) {
removeInputTest(preparedModel, request);
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index 11fa693..08069f2 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,6 +18,10 @@
#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+
+#include "Callbacks.h"
+
namespace android {
namespace hardware {
namespace neuralnetworks {
@@ -25,6 +29,56 @@
namespace vts {
namespace functional {
+using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+
+static void createPreparedModel(const sp<IDevice>& device, const V1_1::Model& model,
+ sp<IPreparedModel>* preparedModel) {
+ ASSERT_NE(nullptr, preparedModel);
+
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_1(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedOpsLaunchStatus.isOk());
+
+ // launch prepare model
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ *preparedModel = preparedModelCallback->getPreparedModel();
+
+ // The getSupportedOperations_1_1 call returns a list of operations that are
+ // guaranteed not to fail if prepareModel_1_1 is called, and
+ // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+ // If a driver has any doubt that it can prepare an operation, it must
+ // return false. So here, if a driver isn't sure if it can support an
+ // operation, but reports that it successfully prepared the model, the test
+ // can continue.
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Unable to test Request validation because vendor service "
+ "cannot prepare model that it does not support."
+ << std::endl;
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
+}
+
// A class for test environment setup
NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
@@ -68,9 +122,17 @@
::testing::VtsHalHidlTargetTestBase::TearDown();
}
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
validateModel(model);
- validateRequests(model, request);
+
+ // create IPreparedModel
+ sp<IPreparedModel> preparedModel;
+ ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel));
+ if (preparedModel == nullptr) {
+ return;
+ }
+
+ validateRequests(preparedModel, requests);
}
} // namespace functional
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index cea2b54..f3f587b 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -76,7 +76,8 @@
private:
void validateModel(const Model& model);
- void validateRequests(const Model& model, const std::vector<Request>& request);
+ void validateRequests(const sp<IPreparedModel>& preparedModel,
+ const std::vector<Request>& requests);
};
// Tag for the generated tests
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index c2e8f22..f368ce2 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -1177,20 +1177,21 @@
* https://arxiv.org/pdf/1607.06450.pdf
*
* The operation has the following independently optional inputs:
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+ * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+ * have values or neither of them have values (i.e., all set to null). If
+ * they have values, the peephole optimization is used.
* * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
- * (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
- * bias (\f$b_i\f$) either all have values, or none of them have values
- * (i.e., all set to null). If they have no values, coupling of input and
- * forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
- * is calculated using the following equation instead.
+ * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values. If they have no values, coupling of input
+ * and forget gates (CIFG) is used, in which case the input gate
+ * (\f$i_t\f$) is calculated using the following equation instead.
* \f{eqnarray*}{
* i_t = 1 - f_t
* \f}
- * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
- * (\f$W_{co}\f$) either both have values or neither of them have values.
- * If they have values, the peephole optimization is used. Additionally,
- * if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
- * required to have values for peephole optimization.
+ * In case peephole optimization is used and CIFG is not used
+ * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+ * cell-to-input weights must have no value.
* * The projection weights (\f$W_{proj}\f$) is required only for the
* recurrent projection layer, and should otherwise have no value.
* * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index bf91560..4411b90 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -45,9 +45,9 @@
using ::android::nn::allocateSharedMemory;
using ::test_helper::MixedTypedExample;
-namespace {
+namespace float32_model {
-// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of mobilenet.
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of float32 mobilenet.
#include "examples/mobilenet_224_gender_basic_fixed.example.cpp"
#include "vts_models/mobilenet_224_gender_basic_fixed.model.cpp"
@@ -55,6 +55,44 @@
[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+ float outputValue = 1.0f + static_cast<float>(len);
+ return {{.operands = {
+ // Input
+ {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
+ // Output
+ {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
+}
+
+} // namespace float32_model
+
+namespace quant8_model {
+
+// In frameworks/ml/nn/runtime/test/generated/, creates a hidl model of quant8 mobilenet.
+#include "examples/mobilenet_quantized.example.cpp"
+#include "vts_models/mobilenet_quantized.model.cpp"
+
+// Prevent the compiler from complaining about an otherwise unused function.
+[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
+[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
+
+// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
+// This function assumes the operation is always ADD.
+std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+ uint8_t outputValue = 1 + static_cast<uint8_t>(len);
+ return {{.operands = {// Input
+ {.operandDimensions = {{0, {1}}}, .quant8AsymmOperands = {{0, {1}}}},
+ // Output
+ {.operandDimensions = {{0, {1}}},
+ .quant8AsymmOperands = {{0, {outputValue}}}}}}};
+}
+
+} // namespace quant8_model
+
+namespace {
+
enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
// Creates cache handles based on provided file groups.
@@ -101,14 +139,18 @@
// ↑ ↑ ↑ ↑
// [1] [1] [1] [1]
//
-Model createLargeTestModel(OperationType op, uint32_t len) {
+// This function assumes the operation is either ADD or MUL.
+template <typename CppType, OperandType operandType>
+Model createLargeTestModelImpl(OperationType op, uint32_t len) {
+ EXPECT_TRUE(op == OperationType::ADD || op == OperationType::MUL);
+
// Model operations and operands.
std::vector<Operation> operations(len);
std::vector<Operand> operands(len * 2 + 2);
// The constant buffer pool. This contains the activation scalar, followed by the
// per-operation constant operands.
- std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(float));
+ std::vector<uint8_t> operandValues(sizeof(int32_t) + len * sizeof(CppType));
// The activation scalar, value = 0.
operands[0] = {
@@ -122,7 +164,26 @@
};
memset(operandValues.data(), 0, sizeof(int32_t));
- const float floatBufferValue = 1.0f;
+ // The buffer value of the constant second operand. The logical value is always 1.0f.
+ CppType bufferValue;
+ // The scale of the first and second operand.
+ float scale1, scale2;
+ if (operandType == OperandType::TENSOR_FLOAT32) {
+ bufferValue = 1.0f;
+ scale1 = 0.0f;
+ scale2 = 0.0f;
+ } else if (op == OperationType::ADD) {
+ bufferValue = 1;
+ scale1 = 1.0f;
+ scale2 = 1.0f;
+ } else {
+ // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale,
+ // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point.
+ bufferValue = 2;
+ scale1 = 1.0f;
+ scale2 = 0.5f;
+ }
+
for (uint32_t i = 0; i < len; i++) {
const uint32_t firstInputIndex = i * 2 + 1;
const uint32_t secondInputIndex = firstInputIndex + 1;
@@ -130,10 +191,10 @@
// The first operation input.
operands[firstInputIndex] = {
- .type = OperandType::TENSOR_FLOAT32,
+ .type = operandType,
.dimensions = {1},
.numberOfConsumers = 1,
- .scale = 0.0f,
+ .scale = scale1,
.zeroPoint = 0,
.lifetime = (i == 0 ? OperandLifeTime::MODEL_INPUT
: OperandLifeTime::TEMPORARY_VARIABLE),
@@ -142,18 +203,18 @@
// The second operation input, value = 1.
operands[secondInputIndex] = {
- .type = OperandType::TENSOR_FLOAT32,
+ .type = operandType,
.dimensions = {1},
.numberOfConsumers = 1,
- .scale = 0.0f,
+ .scale = scale2,
.zeroPoint = 0,
.lifetime = OperandLifeTime::CONSTANT_COPY,
.location = {.poolIndex = 0,
- .offset = static_cast<uint32_t>(i * sizeof(float) + sizeof(int32_t)),
- .length = sizeof(float)},
+ .offset = static_cast<uint32_t>(i * sizeof(CppType) + sizeof(int32_t)),
+ .length = sizeof(CppType)},
};
- memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(float), &floatBufferValue,
- sizeof(float));
+ memcpy(operandValues.data() + sizeof(int32_t) + i * sizeof(CppType), &bufferValue,
+ sizeof(CppType));
// The operation. All operations share the same activation scalar.
// The output operand is created as an input in the next iteration of the loop, in the case
@@ -168,10 +229,10 @@
// The model output.
operands.back() = {
- .type = OperandType::TENSOR_FLOAT32,
+ .type = operandType,
.dimensions = {1},
.numberOfConsumers = 0,
- .scale = 0.0f,
+ .scale = scale1,
.zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_OUTPUT,
.location = {},
@@ -191,22 +252,13 @@
};
}
-// MixedTypedExample is defined in frameworks/ml/nn/tools/test_generator/include/TestHarness.h.
-// This function assumes the operation is always ADD.
-std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
- float outputValue = 1.0f + static_cast<float>(len);
- return {{.operands = {
- // Input
- {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {1.0f}}}},
- // Output
- {.operandDimensions = {{0, {1}}}, .float32Operands = {{0, {outputValue}}}}}}};
-};
-
} // namespace
// Tag for the compilation caching tests.
-class CompilationCachingTest : public NeuralnetworksHidlTest {
+class CompilationCachingTestBase : public NeuralnetworksHidlTest {
protected:
+ CompilationCachingTestBase(OperandType type) : kOperandType(type) {}
+
void SetUp() override {
NeuralnetworksHidlTest::SetUp();
ASSERT_NE(device.get(), nullptr);
@@ -263,6 +315,40 @@
NeuralnetworksHidlTest::TearDown();
}
+ // Model and examples creators. According to kOperandType, the following methods will return
+ // either float32 model/examples or the quant8 variant.
+ Model createTestModel() {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return float32_model::createTestModel();
+ } else {
+ return quant8_model::createTestModel();
+ }
+ }
+
+ std::vector<MixedTypedExample> get_examples() {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return float32_model::get_examples();
+ } else {
+ return quant8_model::get_examples();
+ }
+ }
+
+ Model createLargeTestModel(OperationType op, uint32_t len) {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return createLargeTestModelImpl<float, OperandType::TENSOR_FLOAT32>(op, len);
+ } else {
+ return createLargeTestModelImpl<uint8_t, OperandType::TENSOR_QUANT8_ASYMM>(op, len);
+ }
+ }
+
+ std::vector<MixedTypedExample> getLargeModelExamples(uint32_t len) {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return float32_model::getLargeModelExamples(len);
+ } else {
+ return quant8_model::getLargeModelExamples(len);
+ }
+ }
+
// See if the service can handle the model.
bool isModelFullySupported(const V1_2::Model& model) {
bool fullySupportsModel = false;
@@ -366,9 +452,20 @@
uint32_t mNumModelCache;
uint32_t mNumDataCache;
uint32_t mIsCachingSupported;
+
+ // The primary data type of the testModel.
+ const OperandType kOperandType;
};
-TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
+// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
+// pass running with float32 models and the second pass running with quant8 models.
+class CompilationCachingTest : public CompilationCachingTestBase,
+ public ::testing::WithParamInterface<OperandType> {
+ protected:
+ CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
+};
+
+TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -409,7 +506,7 @@
/*testDynamicOutputShape=*/false);
}
-TEST_F(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
+TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -472,7 +569,7 @@
/*testDynamicOutputShape=*/false);
}
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -584,7 +681,7 @@
}
}
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -664,7 +761,7 @@
}
}
-TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -776,7 +873,7 @@
}
}
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -856,7 +953,7 @@
}
}
-TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -914,7 +1011,7 @@
}
}
-TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
// Create test HIDL model and compile.
const Model testModel = createTestModel();
if (checkEarlyTermination(testModel)) return;
@@ -990,7 +1087,7 @@
constexpr uint32_t kLargeModelSize = 100;
constexpr uint32_t kNumIterationsTOCTOU = 100;
-TEST_F(CompilationCachingTest, SaveToCache_TOCTOU) {
+TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
@@ -1053,7 +1150,7 @@
}
}
-TEST_F(CompilationCachingTest, PrepareFromCache_TOCTOU) {
+TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
@@ -1116,7 +1213,7 @@
}
}
-TEST_F(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
+TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
if (!mIsCachingSupported) return;
// Create test models and check if fully supported by the service.
@@ -1164,11 +1261,19 @@
}
}
-class CompilationCachingSecurityTest : public CompilationCachingTest,
- public ::testing::WithParamInterface<uint32_t> {
+static const auto kOperandTypeChoices =
+ ::testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
+
+class CompilationCachingSecurityTest
+ : public CompilationCachingTestBase,
+ public ::testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
protected:
+ CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
+
void SetUp() {
- CompilationCachingTest::SetUp();
+ CompilationCachingTestBase::SetUp();
generator.seed(kSeed);
}
@@ -1254,7 +1359,7 @@
}
}
- const uint32_t kSeed = GetParam();
+ const uint32_t kSeed = std::get<1>(GetParam());
std::mt19937 generator;
};
@@ -1302,7 +1407,7 @@
}
INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
- ::testing::Range(0U, 10U));
+ ::testing::Combine(kOperandTypeChoices, ::testing::Range(0U, 10U)));
} // namespace functional
} // namespace vts
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index 8bb4934..8c6391e 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,7 +39,13 @@
using ::android::nn::ResultChannelReceiver;
using ExecutionBurstCallback = ::android::nn::ExecutionBurstController::ExecutionBurstCallback;
+// This constant value represents the length of an FMQ that is large enough to
+// return a result from a burst execution for all of the generated test cases.
constexpr size_t kExecutionBurstChannelLength = 1024;
+
+// This constant value represents a length of an FMQ that is not large enough
+// to return a result from a burst execution for some of the generated test
+// cases.
constexpr size_t kExecutionBurstChannelSmallLength = 8;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -51,7 +57,8 @@
static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurstCallback>& callback,
std::unique_ptr<RequestChannelSender>* sender,
std::unique_ptr<ResultChannelReceiver>* receiver,
- sp<IBurstContext>* context) {
+ sp<IBurstContext>* context,
+ size_t resultChannelLength = kExecutionBurstChannelLength) {
ASSERT_NE(nullptr, preparedModel.get());
ASSERT_NE(nullptr, sender);
ASSERT_NE(nullptr, receiver);
@@ -61,7 +68,7 @@
auto [fmqRequestChannel, fmqRequestDescriptor] =
RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
auto [fmqResultChannel, fmqResultDescriptor] =
- ResultChannelReceiver::create(kExecutionBurstChannelLength, /*blocking=*/true);
+ ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
ASSERT_NE(nullptr, fmqRequestChannel.get());
ASSERT_NE(nullptr, fmqResultChannel.get());
ASSERT_NE(nullptr, fmqRequestDescriptor);
@@ -87,38 +94,25 @@
}
static void createBurstWithResultChannelLength(
- const sp<IPreparedModel>& preparedModel,
- std::shared_ptr<ExecutionBurstController>* controller, size_t resultChannelLength) {
+ const sp<IPreparedModel>& preparedModel, size_t resultChannelLength,
+ std::shared_ptr<ExecutionBurstController>* controller) {
ASSERT_NE(nullptr, preparedModel.get());
ASSERT_NE(nullptr, controller);
// create FMQ objects
- auto [fmqRequestChannel, fmqRequestDescriptor] =
- RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
- auto [fmqResultChannel, fmqResultDescriptor] =
- ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
- ASSERT_NE(nullptr, fmqRequestChannel.get());
- ASSERT_NE(nullptr, fmqResultChannel.get());
- ASSERT_NE(nullptr, fmqRequestDescriptor);
- ASSERT_NE(nullptr, fmqResultDescriptor);
-
- // configure burst
+ std::unique_ptr<RequestChannelSender> sender;
+ std::unique_ptr<ResultChannelReceiver> receiver;
sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
- ErrorStatus errorStatus;
- sp<IBurstContext> burstContext;
- const Return<void> ret = preparedModel->configureExecutionBurst(
- callback, *fmqRequestDescriptor, *fmqResultDescriptor,
- [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
- errorStatus = status;
- burstContext = context;
- });
- ASSERT_TRUE(ret.isOk());
- ASSERT_EQ(ErrorStatus::NONE, errorStatus);
- ASSERT_NE(nullptr, burstContext.get());
+ sp<IBurstContext> context;
+ ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context,
+ resultChannelLength));
+ ASSERT_NE(nullptr, sender.get());
+ ASSERT_NE(nullptr, receiver.get());
+ ASSERT_NE(nullptr, context.get());
// return values
- *controller = std::make_shared<ExecutionBurstController>(
- std::move(fmqRequestChannel), std::move(fmqResultChannel), burstContext, callback);
+ *controller = std::make_shared<ExecutionBurstController>(std::move(sender), std::move(receiver),
+ context, callback);
}
// Primary validation function. This function will take a valid serialized
@@ -139,7 +133,7 @@
SCOPED_TRACE(message);
// send invalid packet
- sender->sendPacket(serialized);
+ ASSERT_TRUE(sender->sendPacket(serialized));
// receive error
auto results = receiver->getBlocking();
@@ -150,27 +144,34 @@
EXPECT_TRUE(badTiming(timing));
}
-static std::vector<FmqRequestDatum> createUniqueDatum() {
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// creates pre-set invalid packet entries for convenience.
+static std::vector<FmqRequestDatum> createBadRequestPacketEntries() {
const FmqRequestDatum::PacketInformation packetInformation = {
/*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10,
/*.numberOfPools=*/10};
const FmqRequestDatum::OperandInformation operandInformation = {
/*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10};
const int32_t invalidPoolIdentifier = std::numeric_limits<int32_t>::max();
- std::vector<FmqRequestDatum> unique(7);
- unique[0].packetInformation(packetInformation);
- unique[1].inputOperandInformation(operandInformation);
- unique[2].inputOperandDimensionValue(0);
- unique[3].outputOperandInformation(operandInformation);
- unique[4].outputOperandDimensionValue(0);
- unique[5].poolIdentifier(invalidPoolIdentifier);
- unique[6].measureTiming(MeasureTiming::YES);
- return unique;
+ std::vector<FmqRequestDatum> bad(7);
+ bad[0].packetInformation(packetInformation);
+ bad[1].inputOperandInformation(operandInformation);
+ bad[2].inputOperandDimensionValue(0);
+ bad[3].outputOperandInformation(operandInformation);
+ bad[4].outputOperandDimensionValue(0);
+ bad[5].poolIdentifier(invalidPoolIdentifier);
+ bad[6].measureTiming(MeasureTiming::YES);
+ return bad;
}
-static const std::vector<FmqRequestDatum>& getUniqueDatum() {
- static const std::vector<FmqRequestDatum> unique = createUniqueDatum();
- return unique;
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// retrieves pre-set invalid packet entries for convenience. This function
+// caches these data so they can be reused on subsequent validation checks.
+static const std::vector<FmqRequestDatum>& getBadRequestPacketEntries() {
+ static const std::vector<FmqRequestDatum> bad = createBadRequestPacketEntries();
+ return bad;
}
///////////////////////// REMOVE DATUM ////////////////////////////////////
@@ -190,7 +191,7 @@
static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
const std::vector<FmqRequestDatum>& serialized) {
- const std::vector<FmqRequestDatum>& extra = getUniqueDatum();
+ const std::vector<FmqRequestDatum>& extra = getBadRequestPacketEntries();
for (size_t index = 0; index <= serialized.size(); ++index) {
for (size_t type = 0; type < extra.size(); ++type) {
const std::string message = "addDatum: added datum type " + std::to_string(type) +
@@ -209,17 +210,17 @@
using Discriminator = FmqRequestDatum::hidl_discriminator;
const bool differentValues = (lhs != rhs);
- const bool sameSumType = (lhs.getDiscriminator() == rhs.getDiscriminator());
+ const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator());
const auto discriminator = rhs.getDiscriminator();
const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue ||
discriminator == Discriminator::outputOperandDimensionValue);
- return differentValues && !(sameSumType && isDimensionValue);
+ return differentValues && !(sameDiscriminator && isDimensionValue);
}
static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
const std::vector<FmqRequestDatum>& serialized) {
- const std::vector<FmqRequestDatum>& change = getUniqueDatum();
+ const std::vector<FmqRequestDatum>& change = getBadRequestPacketEntries();
for (size_t index = 0; index < serialized.size(); ++index) {
for (size_t type = 0; type < change.size(); ++type) {
if (interestingCase(serialized[index], change[type])) {
@@ -252,17 +253,17 @@
// validate each request
for (const Request& request : requests) {
// load memory into callback slots
- std::vector<intptr_t> keys(request.pools.size());
- for (size_t i = 0; i < keys.size(); ++i) {
- keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
- }
+ std::vector<intptr_t> keys;
+ keys.reserve(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+ [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
// ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
// subsequent slot validation testing)
- const auto maxElement = std::max_element(slots.begin(), slots.end());
- ASSERT_NE(slots.end(), maxElement);
- ASSERT_NE(std::numeric_limits<int32_t>::max(), *maxElement);
+ ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
+ return slot != std::numeric_limits<int32_t>::max();
+ }));
// serialize the request
const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
@@ -274,18 +275,20 @@
}
}
+// This test validates that when the Result message size exceeds length of the
+// result FMQ, the service instance gracefully fails and returns an error.
static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
const std::vector<Request>& requests) {
// create regular burst
std::shared_ptr<ExecutionBurstController> controllerRegular;
- ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerRegular,
- kExecutionBurstChannelLength));
+ ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+ preparedModel, kExecutionBurstChannelLength, &controllerRegular));
ASSERT_NE(nullptr, controllerRegular.get());
// create burst with small output channel
std::shared_ptr<ExecutionBurstController> controllerSmall;
- ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(preparedModel, &controllerSmall,
- kExecutionBurstChannelSmallLength));
+ ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+ preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
ASSERT_NE(nullptr, controllerSmall.get());
// validate each request
@@ -297,24 +300,25 @@
}
// collect serialized result by running regular burst
- const auto [status1, outputShapes1, timing1] =
+ const auto [statusRegular, outputShapesRegular, timingRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
- // skip test if synchronous output isn't useful
+ // skip test if regular burst output isn't useful for testing a failure
+ // caused by having too small of a length for the result FMQ
const std::vector<FmqResultDatum> serialized =
- ::android::nn::serialize(status1, outputShapes1, timing1);
- if (status1 != ErrorStatus::NONE ||
+ ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+ if (statusRegular != ErrorStatus::NONE ||
serialized.size() <= kExecutionBurstChannelSmallLength) {
continue;
}
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
- const auto [status2, outputShapes2, timing2] =
+ const auto [statusSmall, outputShapesSmall, timingSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
- EXPECT_NE(ErrorStatus::NONE, status2);
- EXPECT_EQ(0u, outputShapes2.size());
- EXPECT_TRUE(badTiming(timing2));
+ EXPECT_NE(ErrorStatus::NONE, statusSmall);
+ EXPECT_EQ(0u, outputShapesSmall.size());
+ EXPECT_TRUE(badTiming(timingSmall));
}
}
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 93182f1..4ddefe8 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -29,7 +29,6 @@
namespace vts {
namespace functional {
-using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using V1_1::ExecutionPreference;
@@ -127,7 +126,7 @@
::testing::VtsHalHidlTargetTestBase::TearDown();
}
-void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& request) {
+void ValidationTest::validateEverything(const Model& model, const std::vector<Request>& requests) {
validateModel(model);
// create IPreparedModel
@@ -137,8 +136,8 @@
return;
}
- validateRequests(preparedModel, request);
- validateBurst(preparedModel, request);
+ validateRequests(preparedModel, requests);
+ validateBurst(preparedModel, requests);
}
sp<IPreparedModel> getPreparedModel_1_2(
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 36e73a4..8d1acbe 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -72,7 +72,7 @@
// Tag for the validation tests
class ValidationTest : public NeuralnetworksHidlTest {
protected:
- void validateEverything(const Model& model, const std::vector<Request>& request);
+ void validateEverything(const Model& model, const std::vector<Request>& requests);
private:
void validateModel(const Model& model);
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index 730d969..a3073ac 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -19,19 +19,25 @@
#define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
+namespace {
+const RadioAccessSpecifier GERAN_SPECIFIER_P900 = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
+ .geranBands = {GeranBands::BAND_P900},
+ .channels = {1, 2}};
+const RadioAccessSpecifier GERAN_SPECIFIER_850 = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
+ .geranBands = {GeranBands::BAND_850},
+ .channels = {128, 129}};
+} // namespace
+
/*
* Test IRadio.startNetworkScan() for the response returned.
*/
TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT, .interval = 60, .specifiers = {specifier}};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850}};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -89,18 +95,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 4,
- .specifiers = {specifier},
- .maxSearchTime = 60,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 1};
+ .type = ScanType::ONE_SHOT,
+ .interval = 4,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 60,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -126,18 +127,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 301,
- .specifiers = {specifier},
- .maxSearchTime = 60,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 1};
+ .type = ScanType::ONE_SHOT,
+ .interval = 301,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 60,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -163,18 +159,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- .maxSearchTime = 59,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 1};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 59,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -200,18 +191,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- .maxSearchTime = 3601,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 1};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 3601,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -237,18 +223,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- .maxSearchTime = 600,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 0};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 600,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 0};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -274,18 +255,13 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- .maxSearchTime = 600,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 11};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 600,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 11};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -311,20 +287,15 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- // Some vendor may not support max search time of 360s.
- // This issue is tracked in b/112205669.
- .maxSearchTime = 300,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 10};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ // Some vendor may not support max search time of 360s.
+ // This issue is tracked in b/112205669.
+ .maxSearchTime = 300,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 10};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -350,21 +321,16 @@
TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
serial = GetRandomSerialNumber();
- RadioAccessSpecifier specifier = {
- .radioAccessNetwork = RadioAccessNetworks::GERAN,
- .geranBands = {GeranBands::BAND_450, GeranBands::BAND_480},
- .channels = {1,2}};
-
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT,
- .interval = 60,
- .specifiers = {specifier},
- // Some vendor may not support max search time of 360s.
- // This issue is tracked in b/112205669.
- .maxSearchTime = 300,
- .incrementalResults = false,
- .incrementalResultsPeriodicity = 10,
- .mccMncs = {"310410"}};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ // Some vendor may not support max search time of 360s.
+ // This issue is tracked in b/112205669.
+ .maxSearchTime = 300,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 10,
+ .mccMncs = {"310410"}};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
@@ -757,6 +723,7 @@
// Check the mcc [0, 999] and mnc [0, 999].
string hidl_mcc;
string hidl_mnc;
+ bool checkMccMnc = true;
int totalIdentitySizeExpected = 1;
::android::hardware::radio::V1_2::CellIdentity cellIdentities =
radioRsp_v1_2->dataRegResp.cellIdentity;
@@ -765,6 +732,7 @@
if (cellInfoType == CellInfoType::NONE) {
// All the fields are 0
totalIdentitySizeExpected = 0;
+ checkMccMnc = false;
} else if (cellInfoType == CellInfoType::GSM) {
EXPECT_EQ(1, cellIdentities.cellIdentityGsm.size());
::android::hardware::radio::V1_2::CellIdentityGsm cig = cellIdentities.cellIdentityGsm[0];
@@ -791,6 +759,7 @@
// CellIndentityCdma has no mcc and mnc.
EXPECT_EQ(CellInfoType::CDMA, cellInfoType);
EXPECT_EQ(1, cellIdentities.cellIdentityCdma.size());
+ checkMccMnc = false;
}
// Check only one CellIdentity is size 1, and others must be 0.
@@ -799,10 +768,13 @@
cellIdentities.cellIdentityLte.size() + cellIdentities.cellIdentityWcdma.size() +
cellIdentities.cellIdentityTdscdma.size());
- int mcc = stoi(hidl_mcc);
- int mnc = stoi(hidl_mnc);
- EXPECT_TRUE(mcc >= 0 && mcc <= 999);
- EXPECT_TRUE(mnc >= 0 && mnc <= 999);
+ // 32 bit system might return invalid mcc and mnc hidl string "\xff\xff..."
+ if (checkMccMnc && hidl_mcc.size() < 4 && hidl_mnc.size() < 4) {
+ int mcc = stoi(hidl_mcc);
+ int mnc = stoi(hidl_mnc);
+ EXPECT_TRUE(mcc >= 0 && mcc <= 999);
+ EXPECT_TRUE(mnc >= 0 && mnc <= 999);
+ }
}
/*
diff --git a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
index 030f489..5b7a06d 100644
--- a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
@@ -33,9 +33,9 @@
EXPECT_EQ(serial, radioRsp_v1_3->rspInfo.serial);
ALOGI("getModemStackStatus, rspInfo.error = %s\n",
toString(radioRsp_v1_3->rspInfo.error).c_str());
- ASSERT_TRUE(CheckAnyOfErrors(
- radioRsp_v1_3->rspInfo.error,
- {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::MODEM_ERR}));
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_3->rspInfo.error,
+ {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE,
+ RadioError::MODEM_ERR, RadioError::REQUEST_NOT_SUPPORTED}));
// checking if getModemStackStatus returns true, as modem was enabled above
if (RadioError::NONE == radioRsp_v1_3->rspInfo.error) {
@@ -50,9 +50,9 @@
EXPECT_EQ(serial, radioRsp_v1_3->rspInfo.serial);
ALOGI("getModemStackStatus, rspInfo.error = %s\n",
toString(radioRsp_v1_3->rspInfo.error).c_str());
- ASSERT_TRUE(CheckAnyOfErrors(
- radioRsp_v1_3->rspInfo.error,
- {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE, RadioError::MODEM_ERR}));
+ ASSERT_TRUE(CheckAnyOfErrors(radioRsp_v1_3->rspInfo.error,
+ {RadioError::NONE, RadioError::RADIO_NOT_AVAILABLE,
+ RadioError::MODEM_ERR, RadioError::REQUEST_NOT_SUPPORTED}));
// verify that enableModem did set isEnabled correctly
EXPECT_EQ(true, radioRsp_v1_3->isModemEnabled);
}