Merge "Use safe_union in FieldSupportedValues"
diff --git a/audio/5.0/IStreamIn.hal b/audio/5.0/IStreamIn.hal
index b042960..e15b034 100644
--- a/audio/5.0/IStreamIn.hal
+++ b/audio/5.0/IStreamIn.hal
@@ -169,6 +169,10 @@
/**
* Specifies the logical microphone (for processing).
*
+ * If the feature is not supported an error should be returned
+ * If multiple microphones are present, this should be treated as a preference
+ * for their combined direction.
+ *
* Optional method
*
* @param Direction constant
@@ -180,6 +184,10 @@
/**
* Specifies the zoom factor for the selected microphone (for processing).
*
+ * If the feature is not supported an error should be returned
+ * If multiple microphones are present, this should be treated as a preference
+ * for their combined field dimension.
+ *
* Optional method
*
* @param the desired field dimension of microphone capture. Range is from -1 (wide angle),
diff --git a/audio/common/5.0/types.hal b/audio/common/5.0/types.hal
index 0cbf35e..e1279ee 100644
--- a/audio/common/5.0/types.hal
+++ b/audio/common/5.0/types.hal
@@ -146,6 +146,7 @@
*/
ECHO_REFERENCE = 1997,
FM_TUNER = 1998,
+ HOTWORD = 1999,
};
typedef int32_t AudioSession;
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index 435e19c..b04d096 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -1139,7 +1139,6 @@
* Indicates which units the car is using to display fuel volume to the user. Eg. Liter or
* Gallon.
*
- * Distance units are defined in VehicleUnit.
* VehiclePropConfig.configArray is used to indicate the supported fuel volume display units.
* Volume units are defined in VehicleUnit.
* For example: configArray[0] = 0x41 // LITER
@@ -1160,7 +1159,6 @@
* Indicates which units the car is using to display tire pressure to the user. Eg. PSI, Bar or
* Kilopascal.
*
- * Distance units are defined in VehicleUnit.
* VehiclePropConfig.configArray is used to indicate the supported pressure display units.
* Pressure units are defined in VehicleUnit.
* For example: configArray[0] = 0x70 // KILOPASCAL
@@ -1182,7 +1180,6 @@
* Indicates which units the car is using to display EV battery information to the user. Eg.
* watt-hours(Wh), kilowatt-hours(kWh) or ampere-hours(Ah).
*
- * Distance units are defined in VehicleUnit.
* VehiclePropConfig.configArray is used to indicate the supported electrical energy units.
* Electrical energy units are defined in VehicleUnit.
* For example: configArray[0] = 0x60 // watt-hours
@@ -1199,6 +1196,22 @@
| VehicleArea:GLOBAL),
/**
+ * Fuel consumption units for display
+ *
+ * Indicates type of units the car is using to display fuel consumption information to user
+ * True indicates units are distance over volume such as MPG.
+ * False indicates units are volume over distance such as L/100KM.
+ *
+ * @change_mode VehiclePropertyChangeMode:ON_CHANGE
+ * @access VehiclePropertyAccess:READ_WRITE
+ */
+ FUEL_CONSUMPTION_UNITS_DISTANCE_OVER_VOLUME = (
+ 0x0604
+ | VehiclePropertyGroup:SYSTEM
+ | VehiclePropertyType:BOOLEAN
+ | VehicleArea:GLOBAL),
+
+ /**
* Outside temperature
*
* @change_mode VehiclePropertyChangeMode:CONTINUOUS
@@ -2588,7 +2601,11 @@
KELVIN = 0x32,
MILLILITER = 0x40,
LITER = 0x41,
+
+ /** deprecated. Use US_GALLON instead. */
GALLON = 0x42,
+ US_GALLON = 0x42,
+ IMPERIAL_GALLON= 0x43,
NANO_SECS = 0x50,
SECS = 0x53,
YEAR = 0x59,
diff --git a/broadcastradio/2.0/default/BroadcastRadio.cpp b/broadcastradio/2.0/default/BroadcastRadio.cpp
index 28a0dd5..88a726f 100644
--- a/broadcastradio/2.0/default/BroadcastRadio.cpp
+++ b/broadcastradio/2.0/default/BroadcastRadio.cpp
@@ -49,6 +49,7 @@
static_cast<uint32_t>(IdentifierType::AMFM_FREQUENCY),
static_cast<uint32_t>(IdentifierType::RDS_PI),
static_cast<uint32_t>(IdentifierType::HD_STATION_ID_EXT),
+ static_cast<uint32_t>(IdentifierType::DAB_SID_EXT),
});
prop.vendorInfo = hidl_vec<VendorKeyValue>({
{"com.google.dummy", "dummy"},
diff --git a/broadcastradio/2.0/default/VirtualRadio.cpp b/broadcastradio/2.0/default/VirtualRadio.cpp
index 0b65979..c59fd8f 100644
--- a/broadcastradio/2.0/default/VirtualRadio.cpp
+++ b/broadcastradio/2.0/default/VirtualRadio.cpp
@@ -28,6 +28,7 @@
using std::mutex;
using std::vector;
using utils::make_selector_amfm;
+using utils::make_selector_dab;
VirtualRadio gAmFmRadio(
"AM/FM radio mock",
@@ -41,6 +42,16 @@
{make_selector_amfm(106100), "106 KMEL", "Drake", "Marvins Room"},
});
+// clang-format off
+VirtualRadio gDabRadio(
+ "DAB radio mock",
+ {
+ {make_selector_dab(12345, 225648), "BBC Radio 1", "Khalid", "Talk"}, // 12B
+ {make_selector_dab(22345, 222064), "Classic FM", "Jean Sibelius", "Andante Festivo"}, // 11D
+ {make_selector_dab(32345, 222064), "Absolute Radio", "Coldplay", "Clocks"}, // 11D
+ });
+// clang-format on
+
VirtualRadio::VirtualRadio(const std::string& name, const vector<VirtualProgram>& initialList)
: mName(name), mPrograms(initialList) {}
diff --git a/broadcastradio/2.0/default/VirtualRadio.h b/broadcastradio/2.0/default/VirtualRadio.h
index 9c07816..6fa70c5 100644
--- a/broadcastradio/2.0/default/VirtualRadio.h
+++ b/broadcastradio/2.0/default/VirtualRadio.h
@@ -52,6 +52,9 @@
/** AM/FM virtual radio space. */
extern VirtualRadio gAmFmRadio;
+/** DAB virtual radio space. */
+extern VirtualRadio gDabRadio;
+
} // namespace implementation
} // namespace V2_0
} // namespace broadcastradio
diff --git a/broadcastradio/2.0/default/service.cpp b/broadcastradio/2.0/default/service.cpp
index af96dad..349aba2 100644
--- a/broadcastradio/2.0/default/service.cpp
+++ b/broadcastradio/2.0/default/service.cpp
@@ -23,6 +23,7 @@
using android::hardware::joinRpcThreadpool;
using android::hardware::broadcastradio::V2_0::implementation::BroadcastRadio;
using android::hardware::broadcastradio::V2_0::implementation::gAmFmRadio;
+using android::hardware::broadcastradio::V2_0::implementation::gDabRadio;
int main() {
android::base::SetDefaultTag("BcRadioDef");
@@ -30,8 +31,13 @@
configureRpcThreadpool(4, true);
BroadcastRadio broadcastRadio(gAmFmRadio);
- auto status = broadcastRadio.registerAsService();
- CHECK_EQ(status, android::OK) << "Failed to register Broadcast Radio HAL implementation";
+ auto amFmStatus = broadcastRadio.registerAsService("amfm");
+ CHECK_EQ(amFmStatus, android::OK)
+ << "Failed to register Broadcast Radio AM/FM HAL implementation";
+
+ BroadcastRadio dabRadio(gDabRadio);
+ auto dabStatus = dabRadio.registerAsService("dab");
+ CHECK_EQ(dabStatus, android::OK) << "Failed to register Broadcast Radio DAB HAL implementation";
joinRpcThreadpool();
return 1; // joinRpcThreadpool shouldn't exit
diff --git a/broadcastradio/common/utils2x/Utils.cpp b/broadcastradio/common/utils2x/Utils.cpp
index 7892653..43f272e 100644
--- a/broadcastradio/common/utils2x/Utils.cpp
+++ b/broadcastradio/common/utils2x/Utils.cpp
@@ -299,6 +299,20 @@
return sel;
}
+ProgramSelector make_selector_dab(uint32_t sidExt, uint32_t ensemble) {
+ ProgramSelector sel = {};
+ // TODO(maryabad): Have a helper function to create the sidExt instead of
+ // passing the whole identifier here. Something like make_dab_sid_ext.
+ sel.primaryId = make_identifier(IdentifierType::DAB_SID_EXT, sidExt);
+ hidl_vec<ProgramIdentifier> secondaryIds = {
+ make_identifier(IdentifierType::DAB_ENSEMBLE, ensemble),
+ // TODO(maryabad): Include frequency here when the helper method to
+ // translate between ensemble and frequency is implemented.
+ };
+ sel.secondaryIds = secondaryIds;
+ return sel;
+}
+
Metadata make_metadata(MetadataKey key, int64_t value) {
Metadata meta = {};
meta.key = static_cast<uint32_t>(key);
diff --git a/broadcastradio/common/utils2x/include/broadcastradio-utils-2x/Utils.h b/broadcastradio/common/utils2x/include/broadcastradio-utils-2x/Utils.h
index c4aecb2..f4e0732 100644
--- a/broadcastradio/common/utils2x/include/broadcastradio-utils-2x/Utils.h
+++ b/broadcastradio/common/utils2x/include/broadcastradio-utils-2x/Utils.h
@@ -126,6 +126,7 @@
V2_0::ProgramIdentifier make_identifier(V2_0::IdentifierType type, uint64_t value);
V2_0::ProgramSelector make_selector_amfm(uint32_t frequency);
+V2_0::ProgramSelector make_selector_dab(uint32_t sidExt, uint32_t ensemble);
V2_0::Metadata make_metadata(V2_0::MetadataKey key, int64_t value);
V2_0::Metadata make_metadata(V2_0::MetadataKey key, std::string value);
diff --git a/current.txt b/current.txt
index b818922..96c1bbf 100644
--- a/current.txt
+++ b/current.txt
@@ -416,11 +416,11 @@
0a911297821854985cfcdb17b63d7948af0f0f51ce8c68cc86367c185bbc772e android.hardware.audio@5.0::IDevicesFactory
ce2e8c6c8559fd42bd69e0dee27b4d9c93cd9b2eff487b4e6b6395b6a1a993d6 android.hardware.audio@5.0::IPrimaryDevice
4a4e5e5d9357004a1256bde8d36010ee00c51cea811a1c1e0dd969a9fc0bf862 android.hardware.audio@5.0::IStream
-e05e48c583de14c1e5a6fa9d48ea50244e3e0924b76b342374e7471dc8007ba9 android.hardware.audio@5.0::IStreamIn
+b9d41ff4031266de1ecef394a8a64de7d857634dd08dc6be855fca2fe3075975 android.hardware.audio@5.0::IStreamIn
9471b12b1c255bb530695720bc4174bd74987b75b1f820854af8944bc8c215c9 android.hardware.audio@5.0::IStreamOut
1b0500367ed2b32a841667ac3200edf3d3a164e8004aca445ff1b085ac831e93 android.hardware.audio@5.0::IStreamOutCallback
83e365479cc77d8717c155e1787ee668cd2ae4c557b467cf75b8e7cd53697ad8 android.hardware.audio@5.0::types
-a0df6961e65444e1ca40a206d7f31304d313e8b7e5b122855e3272ab02720cd4 android.hardware.audio.common@5.0::types
+07d17800b298331e90d4ea5d8ba19a1ae3fe9c1dbff08d9f75fd3ade09496d67 android.hardware.audio.common@5.0::types
f269297866765b95ddd1825676cc8a772f0c7c9863286df596fc302781a42ff5 android.hardware.audio.effect@5.0::IAcousticEchoCancelerEffect
fa187b602d8939644ef708ed7627f2e3deac97899a4bda1de07f2ff126abe243 android.hardware.audio.effect@5.0::IAutomaticGainControlEffect
e1bf864ccb8458c0da1dcc74a2e748b1dca8ac360df590591cf82d98292d7981 android.hardware.audio.effect@5.0::IBassBoostEffect
@@ -464,12 +464,15 @@
7f460e795f5d1ed5e378935f98c6db4d39497de988aef1b4c2a4a07a6c400392 android.hardware.gnss@2.0::IAGnss
2e5ad983734069e84a760004b32da0d09e4170c05380abe27e6eb80e4aa70d5a android.hardware.gnss@2.0::IAGnssCallback
1f4ac068a88a72360280d94a7f6fd7c63813c1eea4891a0eb01394d3e7e775f2 android.hardware.gnss@2.0::IAGnssRil
-6e2f9a44375a0ae0b49ca7d711cb88945189d398535078408269e1e85889061d android.hardware.gnss@2.0::IGnss
-782dfc724272f279985de348c824197357941382f73c0083f0344d8ec594d2a8 android.hardware.gnss@2.0::IGnssCallback
+4deafcdcffa2d002119e7f58810b767a84666e76475aae68e757ec2845d9756d android.hardware.gnss@2.0::IGnss
+db6bdf6dfc5edf6c85d2944976db899227abb51079c893874353c322342c50b6 android.hardware.gnss@2.0::IGnssBatching
+1f89392f1ebb693d8fa6f50324b1635fc79fab246d31900e63998e1b0e17511c android.hardware.gnss@2.0::IGnssBatchingCallback
+b11a5e4a1602d3f408716b6fe2c578a79f060d571aad8e828f9a4426d161fbcf android.hardware.gnss@2.0::IGnssCallback
ecc966c68bddbd95c8dae782b84204cf01c75734675e8769963f3b5106ec128b android.hardware.gnss@2.0::IGnssConfiguration
+b670bae2ab8517336290532e364502b4db9120340d75474ccc8442b1b15d6ab7 android.hardware.gnss@2.0::IGnssDebug
c67759f5d6387d273b66729180d03690e827f0b6b8d4e13ce2ff42d31b224065 android.hardware.gnss@2.0::IGnssMeasurement
-3dd30a3ca77ef5ab109a55ba603ff816ae5019436886093dccf8fd6a068f85f1 android.hardware.gnss@2.0::IGnssMeasurementCallback
-4bcd767dd05304b4722c6521c7ed8d4a05faf6022f228f2c088379c647871f7c android.hardware.gnss@2.0::types
+15e09903748857f4beb5f485784606931fa5a6277cd070baa6d584df485b7948 android.hardware.gnss@2.0::IGnssMeasurementCallback
+a49c973f21ddf41bc402de55d7c8dffacf4dce06b0bbca4f5ffd3b09a471317e android.hardware.gnss@2.0::types
d4cc8d91930d5a1a62deb0d97d398510a115ce3ede2d2978738651b9d01b11c3 android.hardware.gnss.measurement_corrections@1.0::IMeasurementCorrections
3eec9763db9b101644f14175b77c9954047445a468e9c743fd402d472d4aa97e android.hardware.gnss.measurement_corrections@1.0::IMeasurementCorrectionsCallback
6ef12cd95df73f8f80c25eb035d98ca4594f9cee571fdabea838a0b6016dd908 android.hardware.gnss.measurement_corrections@1.0::types
@@ -505,11 +508,11 @@
7d3c292ca75ec3e22a8fd4ae72d2edb0659d280257e763786e766f3429954dd1 android.hardware.media.c2@1.0::types
4880af120fc1640225abdc2c60bda6d79617d73484d5124913c7278af3b11e2d android.hardware.neuralnetworks@1.2::IBurstCallback
19877e466ad8c6ed42b38050b77bd010cf7800ff365fdc8574f45bbfda03a758 android.hardware.neuralnetworks@1.2::IBurstContext
-96249c852dabeefa3a9496ecdfc44681a071c665bfbf88527bf775c88bf1ab1b android.hardware.neuralnetworks@1.2::IDevice
+b83317b66721241887d2770b5ae95fd5af1e77c5daa7530ecb08fae8892f2b43 android.hardware.neuralnetworks@1.2::IDevice
92714960d1a53fc2ec557302b41c7cc93d2636d8364a44bd0f85be0c92927ff8 android.hardware.neuralnetworks@1.2::IExecutionCallback
-83885d366f22ada42c00d8854f0b7e7ba4cf73ddf80bb0d8e168ce132cec57ea android.hardware.neuralnetworks@1.2::IPreparedModel
+36e1064c869965dee533c537cefbe87e54db8bd8cd45be7e0e93e00e8a43863a android.hardware.neuralnetworks@1.2::IPreparedModel
e1c734d1545e1a4ae749ff1dd9704a8e594c59aea7c8363159dc258e93e0df3b android.hardware.neuralnetworks@1.2::IPreparedModelCallback
-114056b3b9303e0e858f28e718ba45722de5678d1d54eec0dcd10788604bf2bb android.hardware.neuralnetworks@1.2::types
+209a5ee694b94328afb2af2768f1fe6a69148e2cbb85ec3c340a36eed818c697 android.hardware.neuralnetworks@1.2::types
cf7a4ba516a638f9b82a249c91fb603042c2d9ca43fd5aad9cf6c0401ed2a5d7 android.hardware.nfc@1.2::INfc
abf98c2ae08bf765db54edc8068e36d52eb558cff6706b6fd7c18c65a1f3fc18 android.hardware.nfc@1.2::types
4cb252dc6372a874aef666b92a6e9529915aa187521a700f0789065c3c702ead android.hardware.power.stats@1.0::IPowerStats
@@ -541,9 +544,10 @@
61bc302e7c974c59b25898c585c6e9685e8a81021b1bed3eedf5224198f2785a android.hardware.usb@1.2::IUsb
46996cd2a1c66261a75a1f6ecada77eeb5861eb264fa39b996548fe0a7f22dd3 android.hardware.usb@1.2::IUsbCallback
3bbaa8cbc5d6b1da21f5509b2b641e05fc7eeca1354751eb1bb3cf37f89aa32f android.hardware.usb@1.2::types
-92c1a726c80970d623b891f7c2f9a989a40a15ee1244092b49f4eb6adcdce4e9 android.hardware.vibrator@1.3::IVibrator
+0f7ff73793548d5154014059b7e0fe9ef6355d32218ace157954d02055f5248b android.hardware.vibrator@1.3::IVibrator
+2e313dc27a1327a29862ab3e085917f75c9e996f7c8df5a0ce37b9a0ed076b80 android.hardware.vibrator@1.3::types
f19832856a3f53ced5ef91d3cc630a57fb7f4d4ce15f364dbed09099b89f6830 android.hardware.wifi@1.3::IWifi
-7c6799c19bfdb3dec016b751556fe246cf7d37191ee7bb82a0091ab9fbf6f2fb android.hardware.wifi@1.3::IWifiChip
+64be084b6e1ef330b75fa916593dc0b94b0ec7a16d5cfaa5a31e6c9143c8288d android.hardware.wifi@1.3::IWifiChip
3bef30e8b61ab050c0f6fd26572712be5ebb7707d624c9aa6c74bbb9d6a5b4a9 android.hardware.wifi@1.3::IWifiStaIface
f3dbd8dd0d6333c005610288a4785d0ef79a72a7bbe6d0a46d46fa89fc886f1e android.hardware.wifi@1.3::types
2fae61e962f68091335f7ff4581fcfe2e28ce7f6132d7a712fa13d7965543e4d android.hardware.wifi.hostapd@1.1::IHostapd
diff --git a/gnss/2.0/Android.bp b/gnss/2.0/Android.bp
index c01ec55..6cfd346 100644
--- a/gnss/2.0/Android.bp
+++ b/gnss/2.0/Android.bp
@@ -12,8 +12,11 @@
"IAGnssCallback.hal",
"IAGnssRil.hal",
"IGnss.hal",
+ "IGnssBatching.hal",
+ "IGnssBatchingCallback.hal",
"IGnssCallback.hal",
"IGnssConfiguration.hal",
+ "IGnssDebug.hal",
"IGnssMeasurement.hal",
"IGnssMeasurementCallback.hal",
],
diff --git a/gnss/2.0/IGnss.hal b/gnss/2.0/IGnss.hal
index 2c149b7..f19f8d0 100644
--- a/gnss/2.0/IGnss.hal
+++ b/gnss/2.0/IGnss.hal
@@ -23,9 +23,11 @@
import GnssLocation;
import IGnssCallback;
import IGnssConfiguration;
+import IGnssDebug;
import IGnssMeasurement;
import IAGnss;
import IAGnssRil;
+import IGnssBatching;
/**
* Represents the standard GNSS (Global Navigation Satellite System) interface.
@@ -55,6 +57,13 @@
getExtensionGnssConfiguration_2_0() generates (IGnssConfiguration gnssConfigurationIface);
/**
+ * This method returns the IGnssDebug interface.
+ *
+ * @return gnssDebugIface Handle to the IGnssDebug interface.
+ */
+ getExtensionGnssDebug_2_0() generates (IGnssDebug gnssDebugIface);
+
+ /**
* This method returns the IAGnss Interface.
*
* The getExtensionAGnss() must return nullptr as the @1.0::IAGnss interface is
@@ -97,6 +106,13 @@
getExtensionVisibilityControl() generates (IGnssVisibilityControl visibilityControlIface);
/**
+ * This method returns the IGnssBatching interface.
+ *
+ * @return batchingIface Handle to the IGnssBatching interface.
+ */
+ getExtensionGnssBatching_2_0() generates (IGnssBatching batchingIface);
+
+ /**
* Injects current location from the best available location provider.
*
* Unlike injectLocation, this method may inject a recent GNSS location from the HAL
diff --git a/gnss/2.0/IGnssBatching.hal b/gnss/2.0/IGnssBatching.hal
new file mode 100644
index 0000000..961fa69
--- /dev/null
+++ b/gnss/2.0/IGnssBatching.hal
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.gnss@2.0;
+
+import @1.0::IGnssBatching;
+import IGnssBatchingCallback;
+
+/**
+ * Extended interface for GNSS Batching support.
+ *
+ * If this interface is supported, this batching request must be able to run in
+ * parallel with, or without, non-batched location requested by the
+ * IGnss start() & stop() - i.e. both requests must be handled independently,
+ * and not interfere with each other.
+ *
+ * For example, if a 1Hz continuous output is underway on the IGnssCallback,
+ * due to an IGnss start() operation,
+ * and then a IGnssBatching start() is called for a location every 10
+ * seconds, the newly added batching request must not disrupt the 1Hz
+ * continuous location output on the IGnssCallback.
+ *
+ * As with GNSS Location outputs, source of location must be GNSS satellite
+ * measurements, optionally using interial and baro sensors to improve
+ * relative motion filtering. No additional absolute positioning information,
+ * such as WiFi derived location, may be mixed with the GNSS information.
+ */
+interface IGnssBatching extends @1.0::IGnssBatching {
+ /**
+ * Opens the interface and provides the callback routines
+ * to the implementation of this interface.
+ *
+ * @param callback Callback interface for IGnssBatching.
+ *
+ * @return success Returns true on success.
+ */
+ init_2_0(IGnssBatchingCallback callback) generates (bool success);
+};
\ No newline at end of file
diff --git a/gnss/2.0/IGnssBatchingCallback.hal b/gnss/2.0/IGnssBatchingCallback.hal
new file mode 100644
index 0000000..4f8b4ec
--- /dev/null
+++ b/gnss/2.0/IGnssBatchingCallback.hal
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.gnss@2.0;
+
+/** The callback interface to report measurements from the HAL. */
+interface IGnssBatchingCallback {
+ /**
+ * Called when a batch of locations is output, by various means, including
+ * a flush request, as well as the buffer becoming full (if appropriate option
+ * is set.)
+ *
+ * All locations returned by this callback must be cleared from the hardware
+ * buffer, such the sequential calls of this callback do not return any
+ * redundant locations. (Same lat/lon, at a new time, is acceptable.)
+ *
+ * The GnssLocation struct in gnss@2.0 is extended to include elapsed realtime
+ * information.
+ *
+ * @param locations GNSS Location information from HAL.
+ */
+ gnssLocationBatchCb(vec<GnssLocation> locations);
+};
diff --git a/gnss/2.0/IGnssCallback.hal b/gnss/2.0/IGnssCallback.hal
index 4c31cf5..a96fd6c 100644
--- a/gnss/2.0/IGnssCallback.hal
+++ b/gnss/2.0/IGnssCallback.hal
@@ -19,6 +19,7 @@
import @1.0::IGnssCallback;
import @1.1::IGnssCallback;
import GnssLocation;
+import GnssConstellationType;
/**
* This interface is required for the HAL to communicate certain information
@@ -94,4 +95,26 @@
* during-call to E911, or up to 5 minutes after end-of-call or text to E911).
*/
gnssRequestLocationCb_2_0(bool independentFromGnss, bool isUserEmergency);
+
+ /** Extends a GnssSvInfo, replacing the GnssConstellationType. */
+ struct GnssSvInfo {
+ /**
+ * GNSS satellite information for a single satellite and frequency.
+ *
+ * In this version of the HAL, the field 'constellation' in the v1_0 struct is deprecated,
+ * and is no longer used by the framework. The constellation type is instead reported in
+ * @2.0::IGnssCallback.GnssSvInfo.constellation.
+ */
+ @1.0::IGnssCallback.GnssSvInfo v1_0;
+
+ /** Defines the constellation of the given SV. */
+ GnssConstellationType constellation;
+ };
+
+ /**
+ * Callback for the HAL to pass a vector of GnssSvInfo back to the client.
+ *
+ * @param svInfo SV status information from HAL.
+ */
+ gnssSvStatusCb_2_0(vec<GnssSvInfo> svInfoList);
};
diff --git a/gnss/2.0/IGnssDebug.hal b/gnss/2.0/IGnssDebug.hal
new file mode 100644
index 0000000..a3138ba
--- /dev/null
+++ b/gnss/2.0/IGnssDebug.hal
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.gnss@2.0;
+
+import @1.0::IGnssDebug;
+
+/** Extended interface for DEBUG support. */
+interface IGnssDebug extends @1.0::IGnssDebug {
+
+ /** Extending SatelliteData, replacing the GnssConstellationType. */
+ struct SatelliteData {
+ /**
+ * GNSS Satellite info.
+ *
+ * In this version of the HAL, the field 'constellation' in the v1_0 struct is deprecated,
+ * and is no longer used by the framework. The constellation type is instead reported in
+ * @2.0::IGnssDebug.SatelliteData.constellation.
+ */
+ @1.0::IGnssDebug.SatelliteData v1_0;
+
+ /** Defines the constellation type of the given SV. */
+ GnssConstellationType constellation;
+ };
+
+ /**
+ * Provides a set of debug information that is filled by the GNSS chipset when the method
+ * getDebugData() is invoked.
+ */
+ struct DebugData {
+ /** Current best known position. */
+ @1.0::IGnssDebug.PositionDebug position;
+
+ /** Current best know time estimate. */
+ @1.0::IGnssDebug.TimeDebug time;
+
+ /**
+ * Provides a list of the available satellite data, for all
+ * satellites and constellations the device can track,
+ * including GnssConstellationType UNKNOWN.
+ */
+ vec<SatelliteData> satelliteDataArray;
+ };
+
+ /**
+ * This methods requests position, time and satellite ephemeris debug information from the HAL.
+ *
+ * @return ret debugData information from GNSS Hal that contains the current best known
+ * position, best known time estimate and a complete list of constellations that the device can
+ * track.
+ */
+ getDebugData_2_0() generates (DebugData debugData);
+};
diff --git a/gnss/2.0/IGnssMeasurementCallback.hal b/gnss/2.0/IGnssMeasurementCallback.hal
index d9751d3..e055f7a 100644
--- a/gnss/2.0/IGnssMeasurementCallback.hal
+++ b/gnss/2.0/IGnssMeasurementCallback.hal
@@ -19,6 +19,7 @@
import @1.0::IGnssMeasurementCallback;
import @1.1::IGnssMeasurementCallback;
import ElapsedRealtime;
+import GnssConstellationType;
/** The callback interface to report measurements from the HAL. */
interface IGnssMeasurementCallback extends @1.1::IGnssMeasurementCallback {
@@ -365,7 +366,8 @@
};
/**
- * Extends a GNSS Measurement, adding a GnssMeasurementCodeType.
+ * Extends a GNSS Measurement, adding a GnssMeasurementCodeType, a GnssMeasurementState, and
+ * replacing the GnssConstellationType.
*/
struct GnssMeasurement {
/**
@@ -380,6 +382,10 @@
* In this version of the HAL, the field 'state' in the v1_1.v1_0 struct is deprecated, and
* is no longer used by the framework. The satellite sync state is instead reported in
* @2.0::IGnssMeasurementCallback.GnssMeasurement.state.
+ *
+ * In this version of the HAL, the field 'constellation' in the v1_1.v1_0 struct is
+ * deprecated, and is no longer used by the framework. The constellation type is instead
+ * reported in @2.0::IGnssMeasurementCallback.GnssMeasurement.constellation.
*/
@1.1::IGnssMeasurementCallback.GnssMeasurement v1_1;
@@ -442,6 +448,11 @@
* This value is mandatory.
*/
bitfield<GnssMeasurementState> state;
+
+ /**
+ * The constellation type of the GNSS measurement.
+ */
+ GnssConstellationType constellation;
};
/**
diff --git a/gnss/2.0/default/Android.bp b/gnss/2.0/default/Android.bp
index 64187e2..0fcd764 100644
--- a/gnss/2.0/default/Android.bp
+++ b/gnss/2.0/default/Android.bp
@@ -25,6 +25,7 @@
"AGnss.cpp",
"AGnssRil.cpp",
"Gnss.cpp",
+ "GnssBatching.cpp",
"GnssMeasurement.cpp",
"GnssMeasurementCorrections.cpp",
"GnssVisibilityControl.cpp",
diff --git a/gnss/2.0/default/Gnss.cpp b/gnss/2.0/default/Gnss.cpp
index 1dfdadb..75c2385 100644
--- a/gnss/2.0/default/Gnss.cpp
+++ b/gnss/2.0/default/Gnss.cpp
@@ -23,6 +23,7 @@
#include "AGnss.h"
#include "AGnssRil.h"
+#include "GnssBatching.h"
#include "GnssConfiguration.h"
#include "GnssMeasurement.h"
#include "GnssMeasurementCorrections.h"
@@ -236,6 +237,11 @@
return new GnssConfiguration{};
}
+Return<sp<V2_0::IGnssDebug>> Gnss::getExtensionGnssDebug_2_0() {
+ // TODO(b/124012850): Implement function.
+ return sp<V2_0::IGnssDebug>{};
+}
+
Return<sp<V2_0::IAGnss>> Gnss::getExtensionAGnss_2_0() {
return new AGnss{};
}
@@ -260,6 +266,10 @@
return new GnssVisibilityControl();
}
+Return<sp<V2_0::IGnssBatching>> Gnss::getExtensionGnssBatching_2_0() {
+ return new GnssBatching();
+}
+
Return<bool> Gnss::setCallback_2_0(const sp<V2_0::IGnssCallback>& callback) {
ALOGD("Gnss::setCallback_2_0");
if (callback == nullptr) {
diff --git a/gnss/2.0/default/Gnss.h b/gnss/2.0/default/Gnss.h
index f02ab0a..72f7797 100644
--- a/gnss/2.0/default/Gnss.h
+++ b/gnss/2.0/default/Gnss.h
@@ -83,6 +83,7 @@
// Methods from V2_0::IGnss follow.
Return<sp<V2_0::IGnssConfiguration>> getExtensionGnssConfiguration_2_0() override;
+ Return<sp<V2_0::IGnssDebug>> getExtensionGnssDebug_2_0() override;
Return<sp<V2_0::IAGnss>> getExtensionAGnss_2_0() override;
Return<sp<V2_0::IAGnssRil>> getExtensionAGnssRil_2_0() override;
Return<sp<V2_0::IGnssMeasurement>> getExtensionGnssMeasurement_2_0() override;
@@ -91,6 +92,7 @@
getExtensionMeasurementCorrections() override;
Return<sp<visibility_control::V1_0::IGnssVisibilityControl>> getExtensionVisibilityControl()
override;
+ Return<sp<V2_0::IGnssBatching>> getExtensionGnssBatching_2_0() override;
Return<bool> injectBestLocation_2_0(const V2_0::GnssLocation& location) override;
private:
diff --git a/gnss/2.0/default/GnssBatching.cpp b/gnss/2.0/default/GnssBatching.cpp
new file mode 100644
index 0000000..d56cdfb
--- /dev/null
+++ b/gnss/2.0/default/GnssBatching.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "GnssBatching"
+
+#include "GnssBatching.h"
+
+namespace android {
+namespace hardware {
+namespace gnss {
+namespace V2_0 {
+namespace implementation {
+
+sp<V2_0::IGnssBatchingCallback> GnssBatching::sCallback = nullptr;
+
+// Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow.
+Return<bool> GnssBatching::init(const sp<V1_0::IGnssBatchingCallback>&) {
+ // TODO implement
+ return bool{};
+}
+
+Return<uint16_t> GnssBatching::getBatchSize() {
+ // TODO implement
+ return uint16_t{};
+}
+
+Return<bool> GnssBatching::start(const V1_0::IGnssBatching::Options&) {
+ // TODO implement
+ return bool{};
+}
+
+Return<void> GnssBatching::flush() {
+ // TODO implement
+ return Void();
+}
+
+Return<bool> GnssBatching::stop() {
+ // TODO implement
+ return bool{};
+}
+
+Return<void> GnssBatching::cleanup() {
+ // TODO implement
+ return Void();
+}
+
+// Methods from V2_0::IGnssBatching follow.
+Return<bool> GnssBatching::init_2_0(const sp<V2_0::IGnssBatchingCallback>& callback) {
+ sCallback = callback;
+ return true;
+}
+
+} // namespace implementation
+} // namespace V2_0
+} // namespace gnss
+} // namespace hardware
+} // namespace android
diff --git a/gnss/2.0/default/GnssBatching.h b/gnss/2.0/default/GnssBatching.h
new file mode 100644
index 0000000..62ac580
--- /dev/null
+++ b/gnss/2.0/default/GnssBatching.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/gnss/2.0/IGnssBatching.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace gnss {
+namespace V2_0 {
+namespace implementation {
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct GnssBatching : public IGnssBatching {
+ // Methods from ::android::hardware::gnss::V1_0::IGnssBatching follow.
+ Return<bool> init(const sp<V1_0::IGnssBatchingCallback>& callback) override;
+ Return<uint16_t> getBatchSize() override;
+ Return<bool> start(const V1_0::IGnssBatching::Options& options) override;
+ Return<void> flush() override;
+ Return<bool> stop() override;
+ Return<void> cleanup() override;
+
+ // Methods from V2_0::IGnssBatching follow.
+ Return<bool> init_2_0(const sp<V2_0::IGnssBatchingCallback>& callback) override;
+
+ private:
+ static sp<IGnssBatchingCallback> sCallback;
+};
+
+} // namespace implementation
+} // namespace V2_0
+} // namespace gnss
+} // namespace hardware
+} // namespace android
diff --git a/gnss/2.0/default/GnssConfiguration.cpp b/gnss/2.0/default/GnssConfiguration.cpp
index 4389dd2..6bf1712 100644
--- a/gnss/2.0/default/GnssConfiguration.cpp
+++ b/gnss/2.0/default/GnssConfiguration.cpp
@@ -33,13 +33,11 @@
}
Return<bool> GnssConfiguration::setSuplVersion(uint32_t) {
- // TODO implement
- return bool{};
+ return true;
}
Return<bool> GnssConfiguration::setSuplMode(hidl_bitfield<SuplMode>) {
- // TODO implement
- return bool{};
+ return true;
}
Return<bool> GnssConfiguration::setGpsLock(hidl_bitfield<GpsLock> gpsLock) {
@@ -49,18 +47,15 @@
}
Return<bool> GnssConfiguration::setLppProfile(hidl_bitfield<LppProfile>) {
- // TODO implement
- return bool{};
+ return true;
}
Return<bool> GnssConfiguration::setGlonassPositioningProtocol(hidl_bitfield<GlonassPosProtocol>) {
- // TODO implement
- return bool{};
+ return true;
}
Return<bool> GnssConfiguration::setEmergencySuplPdn(bool) {
- // TODO implement
- return bool{};
+ return true;
}
// Methods from ::android::hardware::gnss::V1_1::IGnssConfiguration follow.
diff --git a/gnss/2.0/default/GnssMeasurement.cpp b/gnss/2.0/default/GnssMeasurement.cpp
index a62c2dd..93de89c 100644
--- a/gnss/2.0/default/GnssMeasurement.cpp
+++ b/gnss/2.0/default/GnssMeasurement.cpp
@@ -26,7 +26,7 @@
namespace V2_0 {
namespace implementation {
-using GnssConstellationType = V1_0::GnssConstellationType;
+using GnssConstellationType = V2_0::GnssConstellationType;
using GnssMeasurementFlags = V1_0::IGnssMeasurementCallback::GnssMeasurementFlags;
using GnssMeasurementState = V2_0::IGnssMeasurementCallback::GnssMeasurementState;
@@ -46,6 +46,7 @@
}
Return<void> GnssMeasurement::close() {
+ ALOGD("close");
std::unique_lock<std::mutex> lock(mMutex);
stop();
sCallback = nullptr;
@@ -62,6 +63,7 @@
// Methods from V2_0::IGnssMeasurement follow.
Return<V1_0::IGnssMeasurement::GnssMeasurementStatus> GnssMeasurement::setCallback_2_0(
const sp<V2_0::IGnssMeasurementCallback>& callback, bool) {
+ ALOGD("setCallback_2_0");
std::unique_lock<std::mutex> lock(mMutex);
sCallback = callback;
@@ -75,6 +77,7 @@
}
void GnssMeasurement::start() {
+ ALOGD("start");
mIsActive = true;
mThread = std::thread([this]() {
while (mIsActive == true) {
@@ -87,6 +90,7 @@
}
void GnssMeasurement::stop() {
+ ALOGD("stop");
mIsActive = false;
if (mThread.joinable()) {
mThread.join();
@@ -95,26 +99,27 @@
GnssData GnssMeasurement::getMockMeasurement() {
V1_0::IGnssMeasurementCallback::GnssMeasurement measurement_1_0 = {
- .flags = (uint32_t)GnssMeasurementFlags::HAS_CARRIER_FREQUENCY,
- .svid = (int16_t)6,
- .constellation = GnssConstellationType::GLONASS,
- .timeOffsetNs = 0.0,
- .receivedSvTimeInNs = 8195997131077,
- .receivedSvTimeUncertaintyInNs = 15,
- .cN0DbHz = 30.0,
- .pseudorangeRateMps = -484.13739013671875,
- .pseudorangeRateUncertaintyMps = 1.0379999876022339,
- .accumulatedDeltaRangeState = (uint32_t)
- V1_0::IGnssMeasurementCallback::GnssAccumulatedDeltaRangeState::ADR_STATE_UNKNOWN,
- .accumulatedDeltaRangeM = 0.0,
- .accumulatedDeltaRangeUncertaintyM = 0.0,
- .carrierFrequencyHz = 1.59975e+09,
- .multipathIndicator =
- V1_0::IGnssMeasurementCallback::GnssMultipathIndicator::INDICATOR_UNKNOWN};
+ .flags = (uint32_t)GnssMeasurementFlags::HAS_CARRIER_FREQUENCY,
+ .svid = (int16_t)6,
+ .constellation = V1_0::GnssConstellationType::UNKNOWN,
+ .timeOffsetNs = 0.0,
+ .receivedSvTimeInNs = 8195997131077,
+ .receivedSvTimeUncertaintyInNs = 15,
+ .cN0DbHz = 30.0,
+ .pseudorangeRateMps = -484.13739013671875,
+ .pseudorangeRateUncertaintyMps = 1.0379999876022339,
+ .accumulatedDeltaRangeState = (uint32_t)V1_0::IGnssMeasurementCallback::
+ GnssAccumulatedDeltaRangeState::ADR_STATE_UNKNOWN,
+ .accumulatedDeltaRangeM = 0.0,
+ .accumulatedDeltaRangeUncertaintyM = 0.0,
+ .carrierFrequencyHz = 1.59975e+09,
+ .multipathIndicator =
+ V1_0::IGnssMeasurementCallback::GnssMultipathIndicator::INDICATOR_UNKNOWN};
V1_1::IGnssMeasurementCallback::GnssMeasurement measurement_1_1 = {.v1_0 = measurement_1_0};
V2_0::IGnssMeasurementCallback::GnssMeasurement measurement_2_0 = {
.v1_1 = measurement_1_1,
.codeType = "C",
+ .constellation = GnssConstellationType::GLONASS,
.state = GnssMeasurementState::STATE_CODE_LOCK | GnssMeasurementState::STATE_BIT_SYNC |
GnssMeasurementState::STATE_SUBFRAME_SYNC |
GnssMeasurementState::STATE_TOW_DECODED |
diff --git a/gnss/2.0/types.hal b/gnss/2.0/types.hal
index 21b64f9..3865727 100644
--- a/gnss/2.0/types.hal
+++ b/gnss/2.0/types.hal
@@ -72,4 +72,28 @@
* needs to be estimated by syncing the notion of time via PTP or some other mechanism.
*/
ElapsedRealtime elapsedRealtime;
-};
\ No newline at end of file
+};
+
+/**
+ * GNSS constellation type
+ *
+ * This is to specify the navigation satellite system, for example, as listed in Section 3.5 in
+ * RINEX Version 3.04.
+ */
+enum GnssConstellationType : uint8_t {
+ UNKNOWN = 0,
+ /** Global Positioning System. */
+ GPS = 1,
+ /** Satellite-Based Augmentation System. */
+ SBAS = 2,
+ /** Global Navigation Satellite System. */
+ GLONASS = 3,
+ /** Quasi-Zenith Satellite System. */
+ QZSS = 4,
+ /** BeiDou Navigation Satellite System. */
+ BEIDOU = 5,
+ /** Galileo Navigation Satellite System. */
+ GALILEO = 6,
+ /** Indian Regional Navigation Satellite System. */
+ IRNSS = 7,
+};
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.cpp b/gnss/2.0/vts/functional/gnss_hal_test.cpp
index b2b62fc..da6092b 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test.cpp
@@ -26,6 +26,7 @@
GnssHalTest::GnssHalTest()
: info_called_count_(0),
capabilities_called_count_(0),
+ measurement_corrections_capabilities_called_count_(0),
location_called_count_(0),
name_called_count_(0),
notify_count_(0) {}
@@ -33,7 +34,7 @@
void GnssHalTest::SetUp() {
gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
- list_gnss_sv_status_.clear();
+ list_vec_gnss_sv_info_.clear();
ASSERT_NE(gnss_hal_, nullptr);
SetUpGnssCallback();
@@ -43,6 +44,7 @@
// Reset counters
info_called_count_ = 0;
capabilities_called_count_ = 0;
+ measurement_corrections_capabilities_called_count_ = 0;
location_called_count_ = 0;
name_called_count_ = 0;
measurement_called_count_ = 0;
@@ -59,7 +61,7 @@
gnss_cb_ = new GnssCallback(*this);
ASSERT_NE(gnss_cb_, nullptr);
- auto result = gnss_hal_->setCallback_1_1(gnss_cb_);
+ auto result = gnss_hal_->setCallback_2_0(gnss_cb_);
if (!result.isOk()) {
ALOGE("result of failed setCallback %s", result.description().c_str());
}
@@ -77,16 +79,6 @@
EXPECT_EQ(capabilities_called_count_, 1);
EXPECT_EQ(info_called_count_, 1);
EXPECT_EQ(name_called_count_, 1);
-
- // Setup measurement corrections callback.
- auto measurementCorrections = gnss_hal_->getExtensionMeasurementCorrections();
- ASSERT_TRUE(measurementCorrections.isOk());
- sp<IMeasurementCorrections> iMeasurementCorrections = measurementCorrections;
- if (iMeasurementCorrections != nullptr) {
- sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
- new GnssMeasurementCorrectionsCallback(*this);
- iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
- }
}
void GnssHalTest::StopAndClearLocations() {
@@ -193,11 +185,12 @@
status = cv_.wait_for(lock, std::chrono::seconds(timeout_seconds));
if (status == std::cv_status::timeout) return status;
}
+ notify_count_--;
return status;
}
Return<void> GnssHalTest::GnssCallback::gnssSetSystemInfoCb(
- const IGnssCallback::GnssSystemInfo& info) {
+ const IGnssCallback_1_0::GnssSystemInfo& info) {
ALOGI("Info received, year %d", info.yearOfHw);
parent_.info_called_count_++;
parent_.last_info_ = info;
@@ -248,10 +241,9 @@
return Void();
}
-Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb(
- const IGnssCallback::GnssSvStatus& svStatus) {
- ALOGI("GnssSvStatus received");
- parent_.list_gnss_sv_status_.emplace_back(svStatus);
+Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb(const IGnssCallback_1_0::GnssSvStatus&) {
+ ALOGI("gnssSvStatusCb");
+
return Void();
}
@@ -272,3 +264,11 @@
parent_.notify();
return Void();
}
+
+Return<void> GnssHalTest::GnssCallback::gnssSvStatusCb_2_0(
+ const hidl_vec<IGnssCallback_2_0::GnssSvInfo>& svInfoList) {
+ ALOGI("gnssSvStatusCb_2_0. Size = %d", (int)svInfoList.size());
+ parent_.list_vec_gnss_sv_info_.emplace_back(svInfoList);
+ parent_.notify();
+ return Void();
+}
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.h b/gnss/2.0/vts/functional/gnss_hal_test.h
index 7354aea..737815f 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.h
+++ b/gnss/2.0/vts/functional/gnss_hal_test.h
@@ -25,17 +25,20 @@
#include <list>
#include <mutex>
+using android::hardware::hidl_vec;
using android::hardware::Return;
using android::hardware::Void;
using android::hardware::gnss::measurement_corrections::V1_0::IMeasurementCorrectionsCallback;
using android::hardware::gnss::V1_0::GnssLocationFlags;
using android::hardware::gnss::V2_0::IGnss;
-using android::hardware::gnss::V2_0::IGnssCallback;
using GnssLocation_1_0 = android::hardware::gnss::V1_0::GnssLocation;
using GnssLocation_2_0 = android::hardware::gnss::V2_0::GnssLocation;
+using IGnssCallback_1_0 = android::hardware::gnss::V1_0::IGnssCallback;
+using IGnssCallback_2_0 = android::hardware::gnss::V2_0::IGnssCallback;
+
using IGnssMeasurementCallback_1_0 = android::hardware::gnss::V1_0::IGnssMeasurementCallback;
using IGnssMeasurementCallback_1_1 = android::hardware::gnss::V1_1::IGnssMeasurementCallback;
using IGnssMeasurementCallback_2_0 = android::hardware::gnss::V2_0::IGnssMeasurementCallback;
@@ -77,8 +80,8 @@
std::cv_status waitForMeasurementCorrectionsCapabilities(int timeout_seconds);
/* Callback class for data & Event. */
- class GnssCallback : public IGnssCallback {
- public:
+ class GnssCallback : public IGnssCallback_2_0 {
+ public:
GnssHalTest& parent_;
GnssCallback(GnssHalTest& parent) : parent_(parent){};
@@ -86,7 +89,7 @@
virtual ~GnssCallback() = default;
// Dummy callback handlers
- Return<void> gnssStatusCb(const IGnssCallback::GnssStatusValue /* status */) override {
+ Return<void> gnssStatusCb(const IGnssCallback_1_0::GnssStatusValue /* status */) override {
return Void();
}
Return<void> gnssNmeaCb(int64_t /* timestamp */,
@@ -103,8 +106,8 @@
Return<void> gnssNameCb(const android::hardware::hidl_string& name) override;
Return<void> gnssLocationCb(const GnssLocation_1_0& location) override;
Return<void> gnssSetCapabilitesCb(uint32_t capabilities) override;
- Return<void> gnssSetSystemInfoCb(const IGnssCallback::GnssSystemInfo& info) override;
- Return<void> gnssSvStatusCb(const IGnssCallback::GnssSvStatus& svStatus) override;
+ Return<void> gnssSetSystemInfoCb(const IGnssCallback_1_0::GnssSystemInfo& info) override;
+ Return<void> gnssSvStatusCb(const IGnssCallback_1_0::GnssSvStatus& svStatus) override;
// New in v2.0
Return<void> gnssLocationCb_2_0(const GnssLocation_2_0& location) override;
@@ -113,6 +116,8 @@
return Void();
}
Return<void> gnssSetCapabilitiesCb_2_0(uint32_t capabilities) override;
+ Return<void> gnssSvStatusCb_2_0(
+ const hidl_vec<IGnssCallback_2_0::GnssSvInfo>& svInfoList) override;
private:
Return<void> gnssLocationCbImpl(const GnssLocation_2_0& location);
@@ -198,7 +203,7 @@
void SetPositionMode(const int min_interval_msec, const bool low_power_mode);
sp<IGnss> gnss_hal_; // GNSS HAL to call into
- sp<IGnssCallback> gnss_cb_; // Primary callback interface
+ sp<IGnssCallback_2_0> gnss_cb_; // Primary callback interface
// TODO: make these variables thread-safe.
/* Count of calls to set the following items, and the latest item (used by
@@ -211,16 +216,16 @@
int measurement_called_count_;
int name_called_count_;
- IGnssCallback::GnssSystemInfo last_info_;
+ IGnssCallback_1_0::GnssSystemInfo last_info_;
uint32_t last_capabilities_;
uint32_t last_measurement_corrections_capabilities_;
GnssLocation_2_0 last_location_;
IGnssMeasurementCallback_2_0::GnssData last_measurement_;
android::hardware::hidl_string last_name_;
- list<IGnssCallback::GnssSvStatus> list_gnss_sv_status_;
+ list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> list_vec_gnss_sv_info_;
- private:
+ private:
std::mutex mtx_;
std::condition_variable cv_;
int notify_count_;
diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
index f3559c5..0682f84 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
@@ -32,12 +32,15 @@
using IAGnss_2_0 = android::hardware::gnss::V2_0::IAGnss;
using IAGnss_1_0 = android::hardware::gnss::V1_0::IAGnss;
using IAGnssCallback_2_0 = android::hardware::gnss::V2_0::IAGnssCallback;
+using IGnssBatching_V1_0 = android::hardware::gnss::V1_0::IGnssBatching;
+using IGnssBatching_V2_0 = android::hardware::gnss::V2_0::IGnssBatching;
using android::hardware::gnss::common::Utils;
using android::hardware::gnss::measurement_corrections::V1_0::IMeasurementCorrections;
using android::hardware::gnss::measurement_corrections::V1_0::MeasurementCorrections;
using android::hardware::gnss::V1_0::IGnssNi;
using android::hardware::gnss::V2_0::ElapsedRealtimeFlags;
+using android::hardware::gnss::V2_0::GnssConstellationType;
using android::hardware::gnss::V2_0::IGnssCallback;
using android::hardware::gnss::visibility_control::V1_0::IGnssVisibilityControl;
@@ -163,10 +166,13 @@
}
/*
- * TestGnssMeasurementCodeType:
- * Sets a GnssMeasurementCallback, waits for a measurement, and verifies the codeType is valid.
+ * TestGnssMeasurementFields:
+ * Sets a GnssMeasurementCallback, waits for a measurement, and verifies
+ * 1. codeType is valid,
+ * 2. constellation is valid.
+ * 3. state is valid.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementCodeType) {
+TEST_F(GnssHalTest, TestGnssMeasurementFields) {
const int kFirstGnssMeasurementTimeoutSeconds = 10;
auto gnssMeasurement = gnss_hal_->getExtensionGnssMeasurement_2_0();
@@ -189,7 +195,23 @@
EXPECT_EQ(measurement_called_count_, 1);
ASSERT_TRUE(last_measurement_.measurements.size() > 0);
for (auto measurement : last_measurement_.measurements) {
+ // Verify CodeType is valid.
ASSERT_NE(measurement.codeType, "");
+
+ // Verify ConstellationType is valid.
+ ASSERT_TRUE(static_cast<uint8_t>(measurement.constellation) >=
+ static_cast<uint8_t>(GnssConstellationType::UNKNOWN) &&
+ static_cast<uint8_t>(measurement.constellation) <=
+ static_cast<uint8_t>(GnssConstellationType::IRNSS));
+
+ // Verify State is valid.
+ ASSERT_TRUE(
+ static_cast<uint32_t>(measurement.state) >=
+ static_cast<uint32_t>(IGnssMeasurementCallback_2_0::GnssMeasurementState::
+ STATE_UNKNOWN) &&
+ static_cast<uint32_t>(measurement.state) <=
+ static_cast<uint32_t>(IGnssMeasurementCallback_2_0::GnssMeasurementState::
+ STATE_2ND_CODE_LOCK));
}
iGnssMeasurement->close();
@@ -272,6 +294,7 @@
* capability flag is set.
*/
TEST_F(GnssHalTest, TestGnssMeasurementCorrectionsCapabilities) {
+ // Setup measurement corrections callback.
auto measurementCorrections = gnss_hal_->getExtensionMeasurementCorrections();
ASSERT_TRUE(measurementCorrections.isOk());
sp<IMeasurementCorrections> iMeasurementCorrections = measurementCorrections;
@@ -279,6 +302,10 @@
return;
}
+ sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
+ new GnssMeasurementCorrectionsCallback(*this);
+ iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
+
const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5;
waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0);
@@ -301,6 +328,10 @@
return;
}
+ sp<IMeasurementCorrectionsCallback> iMeasurementCorrectionsCallback =
+ new GnssMeasurementCorrectionsCallback(*this);
+ iMeasurementCorrections->setCallback(iMeasurementCorrectionsCallback);
+
const int kMeasurementCorrectionsCapabilitiesTimeoutSeconds = 5;
waitForMeasurementCorrectionsCapabilities(kMeasurementCorrectionsCapabilitiesTimeoutSeconds);
ASSERT_TRUE(measurement_corrections_capabilities_called_count_ > 0);
@@ -337,9 +368,9 @@
wait(kFirstGnssMeasurementTimeoutSeconds);
EXPECT_EQ(measurement_called_count_, 1);
- ASSERT_TRUE((int)last_measurement_.elapsedRealtime.flags >= 0 &&
- (int)last_measurement_.elapsedRealtime.flags <=
- (int)ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS);
+ ASSERT_TRUE((int)last_measurement_.elapsedRealtime.flags <=
+ (int)(ElapsedRealtimeFlags::HAS_TIMESTAMP_NS |
+ ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS));
// We expect a non-zero timestamp when set.
if (last_measurement_.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
@@ -352,9 +383,9 @@
TEST_F(GnssHalTest, TestGnssLocationElapsedRealtime) {
StartAndCheckFirstLocation();
- ASSERT_TRUE((int)last_location_.elapsedRealtime.flags >= 0 &&
- (int)last_location_.elapsedRealtime.flags <=
- (int)ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS);
+ ASSERT_TRUE((int)last_location_.elapsedRealtime.flags <=
+ (int)(ElapsedRealtimeFlags::HAS_TIMESTAMP_NS |
+ ElapsedRealtimeFlags::HAS_TIME_UNCERTAINTY_NS));
// We expect a non-zero timestamp when set.
if (last_location_.elapsedRealtime.flags & ElapsedRealtimeFlags::HAS_TIMESTAMP_NS) {
@@ -370,3 +401,20 @@
gnss_hal_->injectBestLocation_2_0(last_location_);
StopAndClearLocations();
}
+
+/*
+ * TestGnssBatchingExtension:
+ * Gets the GnssBatchingExtension and verifies that it supports either the @1.0::IGnssBatching
+ * or @2.0::IGnssBatching extension.
+ */
+TEST_F(GnssHalTest, TestGnssBatchingExtension) {
+ auto gnssBatching_V2_0 = gnss_hal_->getExtensionGnssBatching_2_0();
+ ASSERT_TRUE(gnssBatching_V2_0.isOk());
+
+ auto gnssBatching_V1_0 = gnss_hal_->getExtensionGnssBatching();
+ ASSERT_TRUE(gnssBatching_V1_0.isOk());
+
+ sp<IGnssBatching_V1_0> iGnssBatching_V1_0 = gnssBatching_V1_0;
+ sp<IGnssBatching_V2_0> iGnssBatching_V2_0 = gnssBatching_V2_0;
+ ASSERT_TRUE(iGnssBatching_V1_0 != nullptr || iGnssBatching_V2_0 != nullptr);
+}
diff --git a/keymaster/4.0/support/Keymaster.cpp b/keymaster/4.0/support/Keymaster.cpp
index 9325cc0..e8db074 100644
--- a/keymaster/4.0/support/Keymaster.cpp
+++ b/keymaster/4.0/support/Keymaster.cpp
@@ -106,6 +106,19 @@
return result;
}
+void Keymaster::logIfKeymasterVendorError(ErrorCode ec) const {
+ static constexpr int32_t k_keymaster_vendor_error_code_range_max = -10000;
+ if (static_cast<int32_t>(ec) <= k_keymaster_vendor_error_code_range_max) {
+ const auto& versionInfo = halVersion();
+ LOG(ERROR) << "Keymaster reported error: " << static_cast<int32_t>(ec) << "\n"
+ << "NOTE: This is an error in the vendor specific error range.\n"
+ << " Refer to the vendor of the implementation for details.\n"
+ << " Implementation name: " << versionInfo.keymasterName << "\n"
+ << " Vendor name: " << versionInfo.authorName << "\n"
+ << " MajorVersion: " << versionInfo.majorVersion;
+ }
+}
+
Keymaster::KeymasterSet Keymaster::enumerateAvailableDevices() {
auto serviceManager = IServiceManager::getService();
CHECK(serviceManager) << "Could not retrieve ServiceManager";
diff --git a/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h b/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
index 458053a..43a34b0 100644
--- a/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
+++ b/keymaster/4.0/support/include/keymasterV4_0/Keymaster.h
@@ -65,6 +65,12 @@
const hidl_string& instanceName() const { return instanceName_; }
/**
+ * If ec is in the vendor error code range (<-10000), logs the fact to logcat.
+ * There are no side effects otherwise.
+ */
+ void logIfKeymasterVendorError(ErrorCode ec) const;
+
+ /**
* Returns all available Keymaster3 and Keymaster4 instances, in order of most secure to least
* secure (as defined by VersionResult::operator<).
*/
diff --git a/keymaster/4.0/support/keymaster_utils.cpp b/keymaster/4.0/support/keymaster_utils.cpp
index 729e1c1..e35fdd3 100644
--- a/keymaster/4.0/support/keymaster_utils.cpp
+++ b/keymaster/4.0/support/keymaster_utils.cpp
@@ -21,7 +21,9 @@
namespace hardware {
inline static bool operator<(const hidl_vec<uint8_t>& a, const hidl_vec<uint8_t>& b) {
- return memcmp(a.data(), b.data(), std::min(a.size(), b.size())) == -1;
+ auto result = memcmp(a.data(), b.data(), std::min(a.size(), b.size()));
+ if (!result) return a.size() < b.size();
+ return result < 0;
}
template <size_t SIZE>
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index f5cb0d7..106f332 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -52,6 +52,7 @@
using ::test_helper::MixedTyped;
using ::test_helper::MixedTypedExample;
using ::test_helper::resize_accordingly;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
template <typename T>
void copy_back_(std::map<int, std::vector<T>>* dst, const std::vector<RequestArgument>& ra,
@@ -540,7 +541,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal
index b9fa388..d83f9e6 100644
--- a/neuralnetworks/1.2/IDevice.hal
+++ b/neuralnetworks/1.2/IDevice.hal
@@ -76,6 +76,17 @@
getType() generates (ErrorStatus status, DeviceType type);
/**
+ * Gets the capabilities of a driver.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_2() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
* Gets information about extensions supported by the driver implementation.
*
* All extension operations and operands must be fully supported for the
@@ -113,44 +124,83 @@
generates (ErrorStatus status, vec<bool> supportedOperations);
/**
- * Gets whether the driver supports compilation caching.
+ * Gets the caching requirements of the driver implementation.
*
- * isCachingSupported indicates whether the driver supports compilation caching.
- * Even if so, the driver may still choose not to cache certain compiled models.
+ * There are two types of cache file descriptors provided to the driver: model cache
+ * and data cache.
*
- * If the device reports the caching is not supported, the user may avoid calling
- * IDevice::prepareModelFromCache and IPreparedModel::saveToCache.
+ * The data cache is for caching constant data, possibly including preprocessed
+ * and transformed tensor buffers. Any modification to the data cache should
+ * have no worse effect than generating bad output values at execution time.
+ *
+ * The model cache is for caching security-sensitive data such as compiled
+ * executable machine code in the device's native binary format. A modification
+ * to the model cache may affect the driver's execution behavior, and a malicious
+ * client could make use of this to execute beyond the granted permission. Thus,
+ * the driver must always check whether the model cache is corrupted before
+ * preparing the model from cache.
+ *
+ * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver
+ * implementation needs to cache a single prepared model. Returning 0 for both types
+ * indicates compilation caching is not supported by this driver. The driver may
+ * still choose not to cache certain compiled models even if it reports that caching
+ * is supported.
+ *
+ * If the device reports that caching is not supported, the user may avoid calling
+ * IDevice::prepareModelFromCache or providing cache file descriptors to
+ * IDevice::prepareModel_1_2.
*
* @return status Error status of the call, must be:
* - NONE if successful
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
- * @return supported A boolean indicating whether the driver supports compilation
- * caching. Even on returning true, the driver may still choose
- * not to cache certain compiled models.
+ * @return numModelCache An unsigned integer indicating how many files for model cache
+ * the driver needs to cache a single prepared model. It must
+ * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
+ * @return numDataCache An unsigned integer indicating how many files for data cache
+ * the driver needs to cache a single prepared model. It must
+ * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES.
*/
- isCachingSupported() generates (ErrorStatus status, bool supported);
+ getNumberOfCacheFilesNeeded()
+ generates (ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache);
/**
- * Creates a prepared model for execution.
+ * Asynchronously creates a prepared model for execution and optionally saves it
+ * into cache files.
*
- * prepareModel is used to make any necessary transformations or alternative
+ * prepareModel is used to make any necessary transformations to or alternative
* representations to a model for execution, possibly including
* transformations on the constant data, optimization on the model's graph,
* or compilation into the device's native binary format. The model itself
* is not changed.
*
+ * Optionally, caching information may be provided for the driver to save
+ * the prepared model to cache files for faster model compilation time
+ * when the same model preparation is requested in the future. There are
+ * two types of cache file handles provided to the driver: model cache
+ * and data cache. For more information on the two types of cache handles,
+ * refer to getNumberOfCacheFilesNeeded.
+ *
+ * The file descriptors must be opened with read and write permission. A file may
+ * have any size, and the corresponding file descriptor may have any offset. The
+ * driver must truncate a file to zero size before writing to that file. The file
+ * descriptors may be closed by the client once the asynchronous preparation has
+ * finished. The driver must dup a file descriptor if it wants to get access to
+ * the cache file later.
+ *
* The model is prepared asynchronously with respect to the caller. The
- * prepareModel function must verify the inputs to the prepareModel function
- * are correct. If there is an error, prepareModel must immediately invoke
+ * prepareModel function must verify the inputs to the preparedModel function
+ * related to preparing the model (as opposed to saving the prepared model to
+ * cache) are correct. If there is an error, prepareModel must immediately invoke
* the callback with the appropriate ErrorStatus value and nullptr for the
- * IPreparedModel, then return with the same ErrorStatus. If the inputs to
- * the prepareModel function are valid and there is no error, prepareModel
- * must launch an asynchronous task to prepare the model in the background,
- * and immediately return from prepareModel with ErrorStatus::NONE. If the
- * asynchronous task fails to launch, prepareModel must immediately invoke
- * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
- * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
+ * IPreparedModel, then return with the same ErrorStatus. If the inputs to the
+ * prepareModel function that are related to preparing the model are valid and
+ * there is no error, prepareModel must launch an asynchronous task
+ * to prepare the model in the background, and immediately return from
+ * prepareModel with ErrorStatus::NONE. If the asynchronous task fails to launch,
+ * prepareModel must immediately invoke the callback with
+ * ErrorStatus::GENERAL_FAILURE and nullptr for the IPreparedModel, then return
+ * with ErrorStatus::GENERAL_FAILURE.
*
* When the asynchronous task has finished preparing the model, it must
* immediately invoke the callback function provided as an input to
@@ -160,6 +210,14 @@
* the callback object must be invoked with the appropriate ErrorStatus
* value and nullptr for the IPreparedModel.
*
+ * Optionally, the driver may save the prepared model to cache during the
+ * asynchronous preparation. Any error that occurs when saving to cache must
+ * not affect the status of preparing the model. Even if the input arguments
+ * related to the cache may be invalid, or the driver may fail to save to cache,
+ * the prepareModel function must finish preparing the model. The driver
+ * may choose not to save to cache even if the caching information is
+ * provided and valid.
+ *
* The only information that may be unknown to the model at this stage is
* the shape of the tensors, which may only be known at execution time. As
* such, some driver services may return partially prepared models, where
@@ -173,6 +231,26 @@
* @param model The model to be prepared for execution.
* @param preference Indicates the intended execution behavior of a prepared
* model.
+ * @param modelCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the security-sensitive cache. The length of
+ * the vector must either be 0 indicating that caching information is not provided,
+ * or match the numModelCache returned from getNumberOfCacheFilesNeeded. The cache
+ * handles will be provided in the same order when retrieving the
+ * preparedModel from cache files with prepareModelFromCache.
+ * @param dataCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the constants' cache. The length of
+ * the vector must either be 0 indicating that caching information is not provided,
+ * or match the numDataCache returned from getNumberOfCacheFilesNeeded. The cache
+ * handles will be provided in the same order when retrieving the
+ * preparedModel from cache files with prepareModelFromCache.
+ * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
+ * identifying the prepared model. The same token will be provided when retrieving
+ * the prepared model from the cache files with prepareModelFromCache.
+ * Tokens should be chosen to have a low rate of collision for a particular
+ * application. The driver cannot detect a collision; a collision will result
+ * in a failed execution or in a successful execution that produces incorrect
+ * output values. If both modelCache and dataCache are empty indicating that
+ * caching information is not provided, this token must be ignored.
* @param callback A callback object used to return the error status of
* preparing the model for execution and the prepared model if
* successful, nullptr otherwise. The callback object's notify function
@@ -182,9 +260,12 @@
* - NONE if preparation task is successfully launched
* - DEVICE_UNAVAILABLE if driver is offline or busy
* - GENERAL_FAILURE if there is an unspecified error
- * - INVALID_ARGUMENT if one of the input arguments is invalid
+ * - INVALID_ARGUMENT if one of the input arguments related to preparing the
+ * model is invalid
*/
prepareModel_1_2(Model model, ExecutionPreference preference,
+ vec<handle> modelCache, vec<handle> dataCache,
+ uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
generates (ErrorStatus status);
@@ -192,22 +273,17 @@
* Creates a prepared model from cache files for execution.
*
* prepareModelFromCache is used to retrieve a prepared model directly from
- * cache files to avoid slow model compilation time. There are exactly two
- * cache file descriptors provided to the driver: modelCache and dataCache.
+ * cache files to avoid slow model compilation time. There are
+ * two types of cache file handles provided to the driver: model cache
+ * and data cache. For more information on the two types of cache handles,
+ * refer to getNumberOfCacheFilesNeeded.
*
- * The dataCache is for caching constant data, possibly including preprocessed
- * and transformed tensor buffers. Any modification to the dataCache should
- * have no worse effect than generating bad output values at execution time.
- *
- * The modelCache is for caching security-sensitive data such as compiled
- * executable machine code in the device's native binary format. A modification
- * to the modelCache may affect the driver's execution behavior, and a malicious
- * client could make use of this to execute beyond the granted permission. Thus,
- * the driver must always check whether the modelCache is corrupted before preparing
- * the model from cache.
- *
- * The two file descriptors may be closed by the client once the asynchronous
- * preparation has finished. The driver has to copy all the data it needs.
+ * The file descriptors must be opened with read and write permission. A file may
+ * have any size, and the corresponding file descriptor may have any offset. The
+ * driver must truncate a file to zero size before writing to that file. The file
+ * descriptors may be closed by the client once the asynchronous preparation has
+ * finished. The driver must dup a file descriptor if it wants to get access to
+ * the cache file later.
*
* The model is prepared asynchronously with respect to the caller. The
* prepareModelFromCache function must verify the inputs to the
@@ -241,13 +317,17 @@
* used with different shapes of inputs on different (possibly concurrent)
* executions.
*
- * @param modelCache A handle holding exactly one cache file descriptor for the
- * security-sensitive cache.
- * @param dataCache A handle holding exactly one cache file descriptor for the
- * constants' cache.
+ * @param modelCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the security-sensitive cache. The length of
+ * the vector must match the numModelCache returned from getNumberOfCacheFilesNeeded.
+ * The cache handles will be provided in the same order as with prepareModel_1_2.
+ * @param dataCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the constants' cache. The length of the vector
+ * must match the numDataCache returned from getNumberOfCacheFilesNeeded.
+ * The cache handles will be provided in the same order as with prepareModel_1_2.
* @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
* identifying the prepared model. It is the same token provided when saving
- * the cache files with IPreparedModel::saveToCache. Tokens should be chosen
+ * the cache files with prepareModel_1_2. Tokens should be chosen
* to have a low rate of collision for a particular application. The driver
* cannot detect a collision; a collision will result in a failed execution
* or in a successful execution that produces incorrect output values.
@@ -263,7 +343,7 @@
* unspecified error
* - INVALID_ARGUMENT if one of the input arguments is invalid
*/
- prepareModelFromCache(handle modelCache, handle dataCache,
+ prepareModelFromCache(vec<handle> modelCache, vec<handle> dataCache,
uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
IPreparedModelCallback callback)
generates (ErrorStatus status);
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
index 757d5f1..5d2d80f 100644
--- a/neuralnetworks/1.2/IPreparedModel.hal
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -157,62 +157,4 @@
fmq_sync<FmqRequestDatum> requestChannel,
fmq_sync<FmqResultDatum> resultChannel)
generates (ErrorStatus status, IBurstContext context);
-
- /*
- * Saves the prepared model to cache files.
- *
- * saveToCache is used to save a prepared model to cache files for faster
- * model compilation time when the same model preparation is requested in
- * the future. There are exactly two cache file descriptors provided to the
- * driver: modelCache and dataCache.
- *
- * The dataCache is for caching constant data, possibly including preprocessed
- * and transformed tensor buffers. Any modification to the dataCache should
- * have no worse effect than generating bad output values at execution time.
- *
- * The modelCache is for caching security-sensitive data such as compiled
- * executable machine code in the device's native binary format. A modification
- * to the modelCache may affect the driver's execution behavior, and a malicious
- * client could make use of this to execute beyond the granted permission. Thus,
- * the driver must always check whether the modelCache is corrupted before preparing
- * the model from cache.
- *
- * The two file descriptors must point to two zero-length files with offset
- * positioned at the beginning of the file. The file descriptors may be closed
- * by the client once the method has returned.
- *
- * If the driver decides not to save the prepared model without looking at the
- * input arguments to the saveToCache function, saveToCache must return with
- * ErrorStatus::GENERAL_FAILURE. Otherwise, the saveToCache function must verify
- * the input arguments to the saveToCache function are valid, and return with
- * ErrorStatus::INVALID_ARGUMENT if not. If the inputs are valid but the driver
- * could not save the prepared model, saveToCache must return with the appropriate
- * ErrorStatus. Otherwise, it must write the cache files and return
- * ErrorStatus::NONE. Unless saveToCache returns ErrorStatus::NONE, the contents
- * of the cache files are undefined.
- *
- * @param modelCache A handle holding exactly one cache file descriptor for the
- * security-sensitive cache.
- * @param dataCache A handle holding exactly one cache file descriptor for the
- * constants' cache.
- * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
- * identifying the prepared model. The same token will be provided
- * when retrieving the prepared model from cache files with
- * IDevice::prepareModelFromCache. Tokens should be chosen to have
- * a low rate of collision for a particular application. The driver
- * cannot detect a collision; a collision will result in a failed
- * execution or in a successful execution that produces incorrect
- * output values.
- * @return status Error status of saveToCache, must be:
- * - NONE if saveToCache is performed successfully
- * - DEVICE_UNAVAILABLE if driver is offline or busy
- * - GENERAL_FAILURE if the driver could not save the
- * prepared model or if there is an unspecified error
- * - INVALID_ARGUMENT if one of the input arguments is invalid,
- * unless the driver decides not to save the prepared model
- * without looking at the input arguments
- */
- saveToCache(handle modelCache, handle dataCache,
- uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token)
- generates (ErrorStatus status);
};
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index f2e02b8..8c57796 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -30,6 +30,11 @@
* The byte size of the cache token.
*/
BYTE_SIZE_OF_CACHE_TOKEN = 32,
+
+ /**
+ * The maximum number of files for each type of cache in compilation caching.
+ */
+ MAX_NUMBER_OF_CACHE_FILES = 32,
};
enum OperandType : @1.0::OperandType {
@@ -182,6 +187,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -231,7 +240,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -257,7 +267,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -304,6 +315,7 @@
* Before API level 29, all input tensors of
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* must have the same scale and zeroPoint as the output tensor.
+ * Since API level 29, zero-sized tensors are supported.
* * n: An {@link OperandType::INT32} scalar, specifying the
* concatenation axis.
*
@@ -361,7 +373,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input.
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
@@ -408,7 +421,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input.
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
@@ -450,11 +464,10 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out]. Before API level 29,
+ * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
+ * following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
*
* Available since API level 27.
*/
@@ -600,11 +613,10 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out]. Before API level 29,
+ * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
+ * following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
*
* Available since API level 27.
*/
@@ -672,7 +684,7 @@
* Supported tensor rank: up to 4
*
* Inputs:
- * * 0: A tensor.
+ * * 0: A tensor. Since API level 29, this tensor may be zero-sized.
*
* Outputs:
* * 0: A tensor with the same shape as input0.
@@ -765,7 +777,8 @@
* [batch_size, input_size], where "input_size" corresponds to the
* number of inputs to the layer, matching the second dimension of
* weights, and "batch_size" is calculated by dividing the number of
- * elements by "input_size".
+ * elements by "input_size". Since API level 29, zero batch_size is
+ * supported for this tensor.
* * 1: A 2-D tensor, specifying the weights, of shape
* [num_units, input_size], where "num_units" corresponds to the number
* of output nodes.
@@ -780,10 +793,10 @@
* invoke on the result.
*
* Outputs:
- * * 0: The output tensor, of shape [batch_size, num_units]. For output
- * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied:
- * output_scale > input_scale * filter_scale.
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before API
+ * level 29, For output tensor of {@link
+ * OperandType::TENSOR_QUANT8_ASYMM}, the following condition must be
+ * satisfied: output_scale > input_scale * filter_scale.
*
* Available since API level 27.
*/
@@ -861,6 +874,7 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
*
* Supported tensor rank: up to 4
* Tensors with rank less than 4 are only supported since API level 29.
@@ -875,6 +889,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
*
* Available since API level 27.
*/
@@ -905,7 +921,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -931,7 +948,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -1021,7 +1039,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1333,7 +1352,8 @@
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -1359,7 +1379,8 @@
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -1406,6 +1427,10 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -1441,7 +1466,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1465,7 +1491,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1489,7 +1516,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1541,9 +1569,12 @@
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
*
- * Inputs:
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 2: An {@link OperandType::INT32} scalar, specifying the output
@@ -1552,6 +1583,24 @@
* Set to true to specify NCHW data layout for input0 and output0.
* Available since API level 29.
*
+ * Inputs (resizing by scale, since API level 29):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
@@ -1637,7 +1686,8 @@
* Tensors with rank other than 2 or 4 are only supported since API level 29.
*
* Inputs:
- * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since
+ * API level 29, this tensor may be zero-sized.
* * 1: A scalar, specifying the positive scaling factor for the exponent,
* beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
@@ -1795,7 +1845,8 @@
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
@@ -1862,6 +1913,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -2095,6 +2150,10 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
* * {@link OperandType::TENSOR_FLOAT32}
@@ -2135,6 +2194,7 @@
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since API level 29, this tensor may be zero-sized.
* * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
* the permutation of the dimensions of the input tensor.
*
@@ -2231,7 +2291,8 @@
* * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
* bounding box proposals, each line with format [x1, y1, x2, y2].
* For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
- * the zeroPoint must be 0 and the scale must be 0.125.
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
* * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
* bounding box delta for each region of interest and each class. The
* bounding box deltas are organized in the following order
@@ -2240,10 +2301,12 @@
* and height, dw and dh is the log-scale relative correction factor
* for the width and height. For input0 of type
* {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
* * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
* each image in the batch, each line with format
* [image_height, image_width].
@@ -2272,113 +2335,113 @@
* Inputs:
* * 0: The input.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
* where "max_time" is the number of timesteps (sequence length),
* "batch_size" corresponds to the batching dimension, and
* "input_size" is the size of the input.
* * 1: The forward input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size], where “num_units”
- * corresponds to the number of cell units.
+ * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+ * corresponds to the number of forward cell units.
* * 2: The forward input-to-forget weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 3: The forward input-to-cell weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 4: The forward input-to-output weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 5: The forward recurrent-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, output_size], where “output_size”
- * corresponds to either the number of cell units (i.e., “num_units”),
- * or the second dimension of the “projection_weights”, if defined.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the “fw_projection_weights”, if defined.
* * 6: The forward recurrent-to-forget weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 7: The forward recurrent-to-cell weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 8: The forward recurrent-to-output weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
* * 9: The forward cell-to-input weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 10: The forward cell-to-forget weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 11: The forward cell-to-output weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 12: The forward input gate bias. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 13: The forward forget gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 14: The forward cell gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 15: The forward output gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [fw_num_units].
* * 16: The forward projection weights. Optional.
- * A 2-D tensor of shape [output_size, num_units].
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
* * 17: The forward projection bias. Optional.
- * A 1-D tensor of shape [output_size].
+ * A 1-D tensor of shape [fw_output_size].
* * 18: The backward input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size], where “num_units”
- * corresponds to the number of cell units.
+ * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+ * corresponds to the number of backward cell units.
* * 19: The backward input-to-forget weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 20: The backward input-to-cell weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 21: The backward input-to-output weights.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 22: The backward recurrent-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, output_size], where “output_size”
- * corresponds to either the number of cell units (i.e., “num_units”),
- * or the second dimension of the “projection_weights”, if defined.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+ * corresponds to either the number of cell units (i.e., “bw_num_units”),
+ * or the second dimension of the “bw_projection_weights”, if defined.
* * 23: The backward recurrent-to-forget weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 24: The backward recurrent-to-cell weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 25: The backward recurrent-to-output weights.
- * A 2-D tensor of shape [num_units, output_size].
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
* * 26: The backward cell-to-input weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 27: The backward cell-to-forget weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 28: The backward cell-to-output weights. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 29: The backward input gate bias. Optional.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 30: The backward forget gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 31: The backward cell gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 32: The backward output gate bias.
- * A 1-D tensor of shape [num_units].
+ * A 1-D tensor of shape [bw_num_units].
* * 33: The backward projection weights. Optional.
- * A 2-D tensor of shape [output_size, num_units].
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
* * 34: The backward projection bias. Optional.
- * A 1-D tensor of shape [output_size].
+ * A 1-D tensor of shape [bw_output_size].
* * 35: The forward input activation state.
- * A 2-D tensor of shape [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, bw_output_size].
* * 36: The forward input cell state.
- * A 2-D tensor of shape [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, bw_num_units].
* * 37: The backward input activation state.
- * A 2-D tensor of shape [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, bw_output_size].
* * 38: The backward input cell state.
- * A 2-D tensor of shape [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, bw_num_units].
* * 39: The auxiliary input. Optional.
* A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
* corresponds to the batching dimension, and “input_size” is the size
* of the input.
* * 40: The forward auxiliary input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 41: The forward auxiliary input-to-forget weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 42: The forward auxiliary input-to-cell weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 43: The forward auxiliary input-to-output weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [fw_num_units, input_size].
* * 44: The backward auxiliary input-to-input weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 45: The backward auxiliary input-to-forget weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 46: The backward auxiliary input-to-cell weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 47: The backward auxiliary input-to-output weights. Optional.
- * A 2-D tensor of shape [num_units, input_size].
+ * A 2-D tensor of shape [bw_num_units, input_size].
* * 48: The activation function.
* A value indicating the activation function:
* <ul>
@@ -2410,16 +2473,46 @@
* * 52: time_major
* An {@link OperandType::BOOL} scalar specifying the shape format
* of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
*
* Outputs:
* * 0: The forward output.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
* * 1: The backward output. Unused if merge_outputs is true.
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
*
* Available since API level 29.
*/
@@ -2547,10 +2640,17 @@
/**
* Greedily selects a subset of bounding boxes in descending order of score.
*
- * This op applies hard NMS algorithm to each class. In each loop of
- * execution, the box with maximum score gets selected, and any boxes with
- * the intersection-over-union (IOU) greater than a threshold are removed
- * from the pending set.
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
*
* Axis-aligned bounding boxes are represented by its upper-left corner
* coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
@@ -2564,25 +2664,34 @@
* Inputs:
* * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
* of each bounding box proposal. The boxes are grouped by batches in the
- * first dimension.
+ * first dimension. Zero num_rois is supported for this tensor.
* * 1: A 2-D Tensor specifying the bounding boxes of shape
* [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
* The boxes are grouped by batches in the first dimension. The sequential
* order of the boxes corresponds with input0. For input0 of type
* {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
* {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
- * scale of 0.125.
+ * scale of 0.125. Zero num_rois is supported for this tensor.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
* the same batch index are grouped together.
* * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
* with scores lower than the threshold are filtered before sending
* to the NMS algorithm.
- * * 4: An {@link OperandType::FLOAT32} scalar, specifying the IoU
- * threshold.
- * * 5: An {@link OperandType::INT32} scalar, specifying the maximum
+ * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
* number of selected bounding boxes for each image. Set to a negative
* value for unlimited number of output bounding boxes.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
*
* Outputs:
* * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
@@ -2600,8 +2709,8 @@
* [num_output_rois], specifying the class of each output box. The
* sequential order of the boxes corresponds with output0.
* * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
- * [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
*
* Available since API level 29.
*/
@@ -2937,8 +3046,8 @@
* For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
* scale must be 0.125 and the zero point must be 0.
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
- * [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
*
* Available since API level 29.
*/
@@ -3122,11 +3231,7 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out].
*
* Available since API level 29.
*/
@@ -3608,7 +3713,7 @@
* Supported tensor rank: from 1
*
* Inputs:
- * * 0: A tensor.
+ * * 0: A tensor, may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0, but with
@@ -3940,10 +4045,12 @@
* the regions of interest, each line with format [x1, y1, x2, y2].
* For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
* this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
- * with zeroPoint of 0 and scale of 0.125.
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
* * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_rois], specifying the batch index of each box. Boxes with
- * the same batch index are grouped together.
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
* * 3: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 4: An {@link OperandType::INT32} scalar, specifying the output
@@ -4108,7 +4215,7 @@
* Supported tensor rank: from 1
*
* Inputs:
- * * 0: An n-D tensor to take slice from.
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
* * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
* the beginning indices of the slice in each dimension.
* * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
@@ -4331,11 +4438,7 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale (for
- * filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * this condition must be true for all filter scales).
+ * [batches, out_height, out_width, depth_out].
*
* Available since API level 29.
*/
@@ -4367,9 +4470,9 @@
* Inputs:
* * 0: The input (\f$x_t\f$).
* A 3-D tensor of shape:
- * If time-major: [max_time, batch_size, output_size]
- * If batch-major: [batch_size, max_time, output_size]
- * where “max_size” is the number of timesteps (sequence length),
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where “max_time” is the number of timesteps (sequence length),
* “batch_size” corresponds to the batching dimension, and
* “input_size” is the size of the input.
* * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
@@ -4429,16 +4532,16 @@
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* * 23:Time-major if true, batch-major if false.
- * * 24:The input layer normalization weights.
+ * * 24:The input layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at input gate.
- * * 25:The forget layer normalization weights.
+ * * 25:The forget layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at forget gate.
- * * 26:The cell layer normalization weights.
+ * * 26:The cell layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at cell gate.
- * * 27:The output layer normalization weights.
+ * * 27:The output layer normalization weights. Optional.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at output gate.
*
@@ -4526,9 +4629,11 @@
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
*
- * Inputs:
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 2: An {@link OperandType::INT32} scalar, specifying the output
@@ -4536,6 +4641,24 @@
* * 3: An {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
*
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
@@ -4593,6 +4716,39 @@
};
/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
* Describes one operation of the model's graph.
*/
struct Operation {
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 365a750..5c269df 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -25,7 +25,7 @@
namespace vts {
namespace functional {
-using V1_1::Capabilities;
+using V1_0::PerformanceInfo;
// create device test
TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
@@ -37,6 +37,31 @@
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
+// initialization
+TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ using OperandPerformance = Capabilities::OperandPerformance;
+ Return<void> ret = device->getCapabilities_1_2([](ErrorStatus status,
+ const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+
+ auto isPositive = [](const PerformanceInfo& perf) {
+ return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
+ };
+
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto& opPerf = capabilities.operandPerformance;
+ EXPECT_TRUE(std::all_of(
+ opPerf.begin(), opPerf.end(),
+ [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
+ EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ }));
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
// device version test
TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret = device->getVersionString([](ErrorStatus status, const hidl_string& version) {
@@ -77,10 +102,15 @@
EXPECT_TRUE(ret.isOk());
}
-// isCachingSupported test
-TEST_F(NeuralnetworksHidlTest, IsCachingSupported) {
- Return<void> ret = device->isCachingSupported(
- [](ErrorStatus status, bool) { EXPECT_EQ(ErrorStatus::NONE, status); });
+// getNumberOfCacheFilesNeeded test
+TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
+ Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ EXPECT_LE(numModelCache,
+ static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
+ });
EXPECT_TRUE(ret.isOk());
}
} // namespace functional
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index 00989e5..167fc09 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -54,29 +54,39 @@
[[maybe_unused]] auto dummy_createTestModel = createTestModel_dynamic_output_shape;
[[maybe_unused]] auto dummy_get_examples = get_examples_dynamic_output_shape;
-enum class AccessMode { READ_ONLY, WRITE_ONLY };
+enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
-void createCacheHandle(const std::vector<std::string>& files, AccessMode mode,
- hidl_handle* handle) {
- std::vector<int> fds;
- for (const auto& file : files) {
- int fd;
- if (mode == AccessMode::READ_ONLY) {
- fd = open(file.c_str(), O_RDONLY);
- } else if (mode == AccessMode::WRITE_ONLY) {
- fd = open(file.c_str(), O_WRONLY | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR);
- } else {
- FAIL();
+// Creates cache handles based on provided file groups.
+// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups,
+ const std::vector<AccessMode>& mode, hidl_vec<hidl_handle>* handles) {
+ handles->resize(fileGroups.size());
+ for (uint32_t i = 0; i < fileGroups.size(); i++) {
+ std::vector<int> fds;
+ for (const auto& file : fileGroups[i]) {
+ int fd;
+ if (mode[i] == AccessMode::READ_ONLY) {
+ fd = open(file.c_str(), O_RDONLY);
+ } else if (mode[i] == AccessMode::WRITE_ONLY) {
+ fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+ } else if (mode[i] == AccessMode::READ_WRITE) {
+ fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ } else {
+ FAIL();
+ }
+ ASSERT_GE(fd, 0);
+ fds.push_back(fd);
}
- ASSERT_GE(fd, 0);
- fds.push_back(fd);
+ native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
+ ASSERT_NE(cacheNativeHandle, nullptr);
+ std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]);
+ (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true);
}
- native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
- ASSERT_NE(cacheNativeHandle, nullptr);
- for (uint32_t i = 0; i < fds.size(); i++) {
- cacheNativeHandle->data[i] = fds[i];
- }
- handle->setTo(cacheNativeHandle, /*shouldOwn=*/true);
+}
+
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups, AccessMode mode,
+ hidl_vec<hidl_handle>* handles) {
+ createCacheHandles(fileGroups, std::vector<AccessMode>(fileGroups.size(), mode), handles);
}
} // namespace
@@ -88,38 +98,43 @@
NeuralnetworksHidlTest::SetUp();
ASSERT_NE(device.get(), nullptr);
- // Create cache directory. The cache directory and cache files are always created to test
- // the behavior of prepareModelFromCache, even when caching is not supported.
+ // Create cache directory. The cache directory and a temporary cache file is always created
+ // to test the behavior of prepareModelFromCache, even when caching is not supported.
char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
char* cacheDir = mkdtemp(cacheDirTemp);
ASSERT_NE(cacheDir, nullptr);
mCacheDir = cacheDir;
+ mCacheDir.push_back('/');
- // Create empty cache files.
- mCache1 = mCacheDir + "/cache1";
- mCache2 = mCacheDir + "/cache2";
- mCache3 = mCacheDir + "/cache3";
- // A dummy handle, use AccessMode::WRITE_ONLY for createCacheHandle to create files.
- hidl_handle handle;
- createCacheHandle({mCache1, mCache2, mCache3}, AccessMode::WRITE_ONLY, &handle);
-
- // Check if caching is supported.
- bool isCachingSupported;
- Return<void> ret = device->isCachingSupported(
- [&isCachingSupported](ErrorStatus status, bool supported) {
+ Return<void> ret = device->getNumberOfCacheFilesNeeded(
+ [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
- isCachingSupported = supported;
+ mNumModelCache = numModelCache;
+ mNumDataCache = numDataCache;
});
EXPECT_TRUE(ret.isOk());
- if (isCachingSupported) {
- mIsCachingSupported = true;
- } else {
+ mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0;
+
+ // Create empty cache files.
+ mTmpCache = mCacheDir + "tmp";
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ mModelCache.push_back({mCacheDir + "model" + std::to_string(i)});
+ }
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ mDataCache.push_back({mCacheDir + "data" + std::to_string(i)});
+ }
+ // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files.
+ hidl_vec<hidl_handle> modelHandle, dataHandle, tmpHandle;
+ createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle);
+ createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle);
+ createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle);
+
+ if (!mIsCachingSupported) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
"support compilation caching.";
std::cout << "[ ] Early termination of test because vendor service does not "
"support compilation caching."
<< std::endl;
- mIsCachingSupported = false;
}
}
@@ -127,22 +142,49 @@
// The tmp directory is only removed when the driver reports caching not supported,
// otherwise it is kept for debugging purpose.
if (!mIsCachingSupported) {
- remove(mCache1.c_str());
- remove(mCache2.c_str());
- remove(mCache3.c_str());
+ remove(mTmpCache.c_str());
rmdir(mCacheDir.c_str());
}
NeuralnetworksHidlTest::TearDown();
}
- void saveModelToCache(sp<IPreparedModel> preparedModel, const hidl_handle& cache1,
- const hidl_handle& cache2, ErrorStatus* status) {
- // Save IPreparedModel to cache.
+ void saveModelToCache(const V1_2::Model& model, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, bool* supported,
+ sp<IPreparedModel>* preparedModel = nullptr) {
+ if (preparedModel != nullptr) *preparedModel = nullptr;
+
+ // See if service can handle model.
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = device->getSupportedOperations_1_2(
+ model,
+ [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_EQ(supported.size(), model.operations.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
+ *supported = fullySupportsModel;
+ if (!fullySupportsModel) return;
+
+ // Launch prepare model.
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ ASSERT_NE(nullptr, preparedModelCallback.get());
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
- Return<ErrorStatus> saveToCacheStatus =
- preparedModel->saveToCache(cache1, cache2, cacheToken);
- ASSERT_TRUE(saveToCacheStatus.isOk());
- *status = static_cast<ErrorStatus>(saveToCacheStatus);
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModel_1_2(model, ExecutionPreference::FAST_SINGLE_ANSWER, modelCache,
+ dataCache, cacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
+
+ // Retrieve prepared model.
+ preparedModelCallback->wait();
+ ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ if (preparedModel != nullptr) {
+ *preparedModel =
+ V1_2::IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+ .withDefault(nullptr);
+ }
}
bool checkEarlyTermination(ErrorStatus status) {
@@ -157,14 +199,27 @@
return false;
}
- void prepareModelFromCache(const hidl_handle& cache1, const hidl_handle& cache2,
+ bool checkEarlyTermination(bool supported) {
+ if (!supported) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return true;
+ }
+ return false;
+ }
+
+ void prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
// Launch prepare model from cache.
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
- Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModelFromCache(cache1, cache2, cacheToken, preparedModelCallback);
+ Return<ErrorStatus> prepareLaunchStatus = device->prepareModelFromCache(
+ modelCache, dataCache, cacheToken, preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
*preparedModel = nullptr;
@@ -179,49 +234,54 @@
.withDefault(nullptr);
}
+ // Absolute path to the temporary cache directory.
std::string mCacheDir;
- std::string mCache1;
- std::string mCache2;
- std::string mCache3;
+
+ // Groups of file paths for model and data cache in the tmp cache directory, initialized with
+ // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles
+ // and the inner vector is for fds held by each handle.
+ std::vector<std::vector<std::string>> mModelCache;
+ std::vector<std::vector<std::string>> mDataCache;
+
+ // A separate temporary file path in the tmp cache directory.
+ std::string mTmpCache;
+
uint8_t mToken[static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
- bool mIsCachingSupported;
+ uint32_t mNumModelCache;
+ uint32_t mNumDataCache;
+ uint32_t mIsCachingSupported;
};
TEST_F(CompilationCachingTest, CacheSavingAndRetrieval) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (!mIsCachingSupported) {
- EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- } else {
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Retrieve preparedModel from cache.
{
preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (!mIsCachingSupported) {
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
} else {
ASSERT_EQ(status, ErrorStatus::NONE);
ASSERT_NE(preparedModel, nullptr);
@@ -238,41 +298,54 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (!mIsCachingSupported) {
- EXPECT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- } else {
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ uint8_t dummyBytes[] = {0, 0};
+ // Write a dummy integer to the cache.
+ // The driver should be able to handle non-empty cache and non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes,
+ sizeof(dummyBytes)),
+ sizeof(dummyBytes));
}
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_EQ(
+ write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
+ sizeof(dummyBytes));
+ }
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Retrieve preparedModel from cache.
{
preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_GE(read(cache1.getNativeHandle()->data[0], &dummyByte, 1), 0);
- ASSERT_GE(read(cache2.getNativeHandle()->data[0], &dummyByte, 1), 0);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ // Advance the offset of each handle by one byte.
+ // The driver should be able to handle non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (!mIsCachingSupported) {
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
} else {
ASSERT_EQ(status, ErrorStatus::NONE);
ASSERT_NE(preparedModel, nullptr);
@@ -285,234 +358,512 @@
/*testDynamicOutputShape=*/false);
}
+TEST_F(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for model cache.
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for data cache.
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ Model testModel = createTestModel();
+
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
TEST_F(CompilationCachingTest, SaveToCacheInvalidNumFd) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // cache1 with invalid NumFd.
- {
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1, mCache3}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
}
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid NumFd.
- {
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2, mCache3}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
}
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
- // cache1 with invalid NumFd.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1, mCache3}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (status != ErrorStatus::GENERAL_FAILURE) {
ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
- ASSERT_EQ(preparedModel, nullptr);
}
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid NumFd.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2, mCache3}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
if (status != ErrorStatus::GENERAL_FAILURE) {
ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
- ASSERT_EQ(preparedModel, nullptr);
}
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
- // cache1 with invalid access mode.
- {
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid access mode.
- {
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(testModel, modelCache, dataCache, &supported, &preparedModel);
+ if (checkEarlyTermination(supported)) return;
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ generated_tests::EvaluatePreparedModel(preparedModel, [](int) { return false; },
+ get_examples(),
+ testModel.relaxComputationFloat32toFloat16,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
}
}
TEST_F(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (status != ErrorStatus::GENERAL_FAILURE) {
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
- // cache1 with invalid access mode.
- {
- preparedModel = nullptr;
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
- // cache2 with invalid access mode.
- {
- preparedModel = nullptr;
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
}
-TEST_F(CompilationCachingTest, SaveToCacheInvalidOffset) {
- // Create test HIDL model and compile.
- Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
-
- // cache1 with invalid file descriptor offset.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-
- // cache2 with invalid file descriptor offset.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Advance offset by one byte.
- ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-}
-
-TEST_F(CompilationCachingTest, SaveToCacheInvalidFileSize) {
- // Create test HIDL model and compile.
- Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
-
- // cache1 with invalid file size.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Write one byte and seek back to the beginning.
- ASSERT_EQ(write(cache1.getNativeHandle()->data[0], &dummyByte, 1), 1);
- ASSERT_EQ(lseek(cache1.getNativeHandle()->data[0], 0, SEEK_SET), 0);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-
- // cache2 with invalid file size.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- uint8_t dummyByte = 0;
- // Write one byte and seek back to the beginning.
- ASSERT_EQ(write(cache2.getNativeHandle()->data[0], &dummyByte, 1), 1);
- ASSERT_EQ(lseek(cache2.getNativeHandle()->data[0], 0, SEEK_SET), 0);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- }
-}
-
class CompilationCachingSecurityTest : public CompilationCachingTest,
public ::testing::WithParamInterface<uint32_t> {
protected:
@@ -537,44 +888,44 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // Save the compilation to cache.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
- // Randomly flip one single bit of the cache entry.
- FILE* pFile = fopen(mCache1.c_str(), "r+");
- ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
- long int fileSize = ftell(pFile);
- ASSERT_GT(fileSize, 0);
- ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
- int readByte = fgetc(pFile);
- ASSERT_NE(readByte, EOF);
- ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
- ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
- fclose(pFile);
+ // Randomly flip one single bit of the cache entry.
+ FILE* pFile = fopen(mModelCache[i][0].c_str(), "r+");
+ ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
+ long int fileSize = ftell(pFile);
+ if (fileSize == 0) {
+ fclose(pFile);
+ continue;
+ }
+ ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
+ int readByte = fgetc(pFile);
+ ASSERT_NE(readByte, EOF);
+ ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
+ ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
+ fclose(pFile);
- // Retrieve preparedModel from cache, expect failure.
- {
- preparedModel = nullptr;
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- ASSERT_EQ(preparedModel, nullptr);
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
}
}
@@ -583,40 +934,37 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
- // Save the compilation to cache.
- {
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
- }
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ // Save the compilation to cache.
+ {
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
+ }
- // Randomly append bytes to the cache entry.
- FILE* pFile = fopen(mCache1.c_str(), "a");
- uint32_t appendLength = getRandomInt(1, 256);
- for (uint32_t i = 0; i < appendLength; i++) {
- ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
- }
- fclose(pFile);
+ // Randomly append bytes to the cache entry.
+ FILE* pFile = fopen(mModelCache[i][0].c_str(), "a");
+ uint32_t appendLength = getRandomInt(1, 256);
+ for (uint32_t i = 0; i < appendLength; i++) {
+ ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
+ }
+ fclose(pFile);
- // Retrieve preparedModel from cache, expect failure.
- {
- preparedModel = nullptr;
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
- ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
- ASSERT_EQ(preparedModel, nullptr);
+ // Retrieve preparedModel from cache, expect failure.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
}
}
@@ -625,20 +973,15 @@
// Create test HIDL model and compile.
Model testModel = createTestModel();
- sp<IPreparedModel> preparedModel = nullptr;
- generated_tests::PrepareModel(device, testModel, &preparedModel);
- // Terminate early if the driver cannot prepare the model.
- if (preparedModel == nullptr) return;
// Save the compilation to cache.
{
- ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::WRITE_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::WRITE_ONLY, &cache2);
- saveModelToCache(preparedModel, cache1, cache2, &status);
- if (checkEarlyTermination(status)) return;
- ASSERT_EQ(status, ErrorStatus::NONE);
+ bool supported;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(testModel, modelCache, dataCache, &supported);
+ if (checkEarlyTermination(supported)) return;
}
// Randomly flip one single bit in mToken.
@@ -647,12 +990,12 @@
// Retrieve the preparedModel from cache, expect failure.
{
- preparedModel = nullptr;
+ sp<IPreparedModel> preparedModel = nullptr;
ErrorStatus status;
- hidl_handle cache1, cache2;
- createCacheHandle({mCache1}, AccessMode::READ_ONLY, &cache1);
- createCacheHandle({mCache2}, AccessMode::READ_ONLY, &cache2);
- prepareModelFromCache(cache1, cache2, &preparedModel, &status);
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
ASSERT_EQ(preparedModel, nullptr);
}
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
index c2330b5..2988211 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp
@@ -33,6 +33,7 @@
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
///////////////////////// UTILITY FUNCTIONS /////////////////////////
@@ -54,7 +55,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus =
- device->prepareModel_1_2(model, preference, preparedModelCallback);
+ device->prepareModel_1_2(model, preference, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index d411da4..b15f657 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -37,6 +37,7 @@
using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
using ::android::hardware::neuralnetworks::V1_2::implementation::PreparedModelCallback;
using ::android::hidl::memory::V1_0::IMemory;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
using test_helper::for_all;
using test_helper::MixedTyped;
using test_helper::MixedTypedExample;
@@ -66,7 +67,8 @@
sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
ASSERT_NE(nullptr, preparedModelCallback.get());
Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2(
- model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
ASSERT_TRUE(prepareLaunchStatus.isOk());
ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
diff --git a/thermal/2.0/default/Thermal.cpp b/thermal/2.0/default/Thermal.cpp
index 0ef4b63..bbbecb8 100644
--- a/thermal/2.0/default/Thermal.cpp
+++ b/thermal/2.0/default/Thermal.cpp
@@ -38,46 +38,47 @@
std::set<sp<IThermalChangedCallback>> gCallbacks;
static const Temperature_1_0 kTemp_1_0 = {
- .type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>(TemperatureType::CPU),
- .name = "test temperature sensor",
- .currentValue = 98.6,
- .throttlingThreshold = 58,
- .shutdownThreshold = 60.0,
- .vrThrottlingThreshold = 59.0,
+ .type = static_cast<::android::hardware::thermal::V1_0::TemperatureType>(
+ TemperatureType::SKIN),
+ .name = "test temperature sensor",
+ .currentValue = 30.8,
+ .throttlingThreshold = 48.0,
+ .shutdownThreshold = 60.0,
+ .vrThrottlingThreshold = 49.0,
};
static const Temperature_2_0 kTemp_2_0 = {
- .type = TemperatureType::SKIN,
- .name = "test temperature sensor",
- .value = 98.6,
- .throttlingStatus = ThrottlingSeverity::CRITICAL,
+ .type = TemperatureType::SKIN,
+ .name = "test temperature sensor",
+ .value = 30.8,
+ .throttlingStatus = ThrottlingSeverity::NONE,
};
static const TemperatureThreshold kTempThreshold = {
- .type = TemperatureType::SKIN,
- .name = "test temperature sensor",
- .hotThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
- .coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
- .vrThrottlingThreshold = NAN,
+ .type = TemperatureType::SKIN,
+ .name = "test temperature sensor",
+ .hotThrottlingThresholds = {{NAN, NAN, NAN, 48.0, NAN, NAN, 60.0}},
+ .coldThrottlingThresholds = {{NAN, NAN, NAN, NAN, NAN, NAN, NAN}},
+ .vrThrottlingThreshold = 49.0,
};
static const CoolingDevice_1_0 kCooling_1_0 = {
- .type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM,
- .name = "test cooling device",
- .currentValue = 100.0,
+ .type = ::android::hardware::thermal::V1_0::CoolingType::FAN_RPM,
+ .name = "test cooling device",
+ .currentValue = 100.0,
};
static const CoolingDevice_2_0 kCooling_2_0 = {
- .type = CoolingType::CPU,
- .name = "test cooling device",
- .value = 1,
+ .type = CoolingType::FAN,
+ .name = "test cooling device",
+ .value = 100,
};
static const CpuUsage kCpuUsage = {
- .name = "cpu_name",
- .active = 0,
- .total = 0,
- .isOnline = true,
+ .name = "cpu_name",
+ .active = 0,
+ .total = 0,
+ .isOnline = true,
};
// Methods from ::android::hardware::thermal::V1_0::IThermal follow.
diff --git a/vibrator/1.3/Android.bp b/vibrator/1.3/Android.bp
index 28370d6..a2ff784 100644
--- a/vibrator/1.3/Android.bp
+++ b/vibrator/1.3/Android.bp
@@ -8,6 +8,7 @@
},
srcs: [
"IVibrator.hal",
+ "types.hal",
],
interfaces: [
"android.hardware.vibrator@1.0",
diff --git a/vibrator/1.3/IVibrator.hal b/vibrator/1.3/IVibrator.hal
index 01c2801..1c870ee 100644
--- a/vibrator/1.3/IVibrator.hal
+++ b/vibrator/1.3/IVibrator.hal
@@ -16,6 +16,7 @@
package android.hardware.vibrator@1.3;
+import @1.0::EffectStrength;
import @1.0::Status;
import @1.2::IVibrator;
@@ -41,4 +42,18 @@
* not supported by the device.
*/
setExternalControl(bool enabled) generates (Status status);
+
+ /**
+ * Fire off a predefined haptic event.
+ *
+ * @param event The type of haptic event to trigger.
+ * @return status Whether the effect was successfully performed or not. Must
+ * return Status::UNSUPPORTED_OPERATION if the effect is not supported.
+ * @return lengthMs The length of time the event is expected to take in
+ * milliseconds. This doesn't need to be perfectly accurate, but should be a reasonable
+ * approximation. Should be a positive, non-zero value if the returned status is Status::OK,
+ * and set to 0 otherwise.
+ */
+ perform_1_3(Effect effect, EffectStrength strength)
+ generates (Status status, uint32_t lengthMs);
};
diff --git a/vibrator/1.3/example/Vibrator.cpp b/vibrator/1.3/example/Vibrator.cpp
index bb9a057..0cb37e6 100644
--- a/vibrator/1.3/example/Vibrator.cpp
+++ b/vibrator/1.3/example/Vibrator.cpp
@@ -74,22 +74,9 @@
// Methods from ::android::hardware::vibrator::V1_2::IVibrator follow.
-Return<void> Vibrator::perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
- uint8_t amplitude;
- uint32_t ms;
- Status status;
-
- ALOGI("Perform: Effect %s\n", effectToName(effect));
-
- amplitude = strengthToAmplitude(strength);
- setAmplitude(amplitude);
-
- ms = effectToMs(effect);
- status = activate(ms);
-
- _hidl_cb(status, ms);
-
- return Void();
+Return<void> Vibrator::perform_1_2(V1_2::Effect effect, EffectStrength strength,
+ perform_cb _hidl_cb) {
+ return perform_1_3(static_cast<V1_3::Effect>(effect), strength, _hidl_cb);
}
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
@@ -110,6 +97,32 @@
}
}
+Return<void> Vibrator::perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
+ uint8_t amplitude;
+ uint32_t ms;
+ Status status = Status::OK;
+
+ ALOGI("Perform: Effect %s\n", effectToName(effect).c_str());
+
+ amplitude = strengthToAmplitude(strength, &status);
+ if (status != Status::OK) {
+ _hidl_cb(status, 0);
+ return Void();
+ }
+ setAmplitude(amplitude);
+
+ ms = effectToMs(effect, &status);
+ if (status != Status::OK) {
+ _hidl_cb(status, 0);
+ return Void();
+ }
+ status = activate(ms);
+
+ _hidl_cb(status, ms);
+
+ return Void();
+}
+
// Private methods follow.
Status Vibrator::enable(bool enabled) {
@@ -173,17 +186,18 @@
static_cast<Vibrator*>(sigval.sival_ptr)->timeout();
}
-const char* Vibrator::effectToName(Effect effect) {
- return toString(effect).c_str();
+const std::string Vibrator::effectToName(Effect effect) {
+ return toString(effect);
}
-uint32_t Vibrator::effectToMs(Effect effect) {
+uint32_t Vibrator::effectToMs(Effect effect, Status* status) {
switch (effect) {
case Effect::CLICK:
return 10;
case Effect::DOUBLE_CLICK:
return 15;
case Effect::TICK:
+ case Effect::TEXTURE_TICK:
return 5;
case Effect::THUD:
return 5;
@@ -222,9 +236,11 @@
case Effect::RINGTONE_15:
return 30000;
}
+ *status = Status::UNSUPPORTED_OPERATION;
+ return 0;
}
-uint8_t Vibrator::strengthToAmplitude(EffectStrength strength) {
+uint8_t Vibrator::strengthToAmplitude(EffectStrength strength, Status* status) {
switch (strength) {
case EffectStrength::LIGHT:
return 128;
@@ -233,6 +249,8 @@
case EffectStrength::STRONG:
return 255;
}
+ *status = Status::UNSUPPORTED_OPERATION;
+ return 0;
}
} // namespace implementation
diff --git a/vibrator/1.3/example/Vibrator.h b/vibrator/1.3/example/Vibrator.h
index a931b63..64e8e1b 100644
--- a/vibrator/1.3/example/Vibrator.h
+++ b/vibrator/1.3/example/Vibrator.h
@@ -27,7 +27,6 @@
using android::hardware::vibrator::V1_0::EffectStrength;
using android::hardware::vibrator::V1_0::Status;
-using android::hardware::vibrator::V1_2::Effect;
class Vibrator : public IVibrator {
public:
@@ -46,11 +45,13 @@
perform_cb _hidl_cb) override;
// Methods from ::android::hardware::vibrator::V1_2::IVibrator follow.
- Return<void> perform_1_2(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
+ Return<void> perform_1_2(V1_2::Effect effect, EffectStrength strength,
+ perform_cb _hidl_cb) override;
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
Return<bool> supportsExternalControl() override;
Return<Status> setExternalControl(bool enabled) override;
+ Return<void> perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
private:
Status enable(bool enabled);
@@ -58,9 +59,9 @@
void timeout();
static void timerCallback(union sigval sigval);
- static const char* effectToName(Effect effect);
- static uint32_t effectToMs(Effect effect);
- static uint8_t strengthToAmplitude(EffectStrength strength);
+ static const std::string effectToName(Effect effect);
+ static uint32_t effectToMs(Effect effect, Status* status);
+ static uint8_t strengthToAmplitude(EffectStrength strength, Status* status);
private:
bool mEnabled{false};
diff --git a/vibrator/1.3/types.hal b/vibrator/1.3/types.hal
new file mode 100644
index 0000000..ceb62a5
--- /dev/null
+++ b/vibrator/1.3/types.hal
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator@1.3;
+
+import @1.2::Effect;
+
+enum Effect : @1.2::Effect {
+ /**
+ * A soft tick effect meant to be played as a texture.
+ *
+ * A soft, short sensation like the tick of a clock. Unlike regular effects, texture effects
+ * are expected to be played multiple times in quick succession, replicating a specific
+ * texture to the user as a form of haptic feedback.
+ */
+ TEXTURE_TICK
+};
diff --git a/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp b/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp
index a67d1dc..818f9c7 100644
--- a/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp
+++ b/vibrator/1.3/vts/functional/VtsHalVibratorV1_3TargetTest.cpp
@@ -24,9 +24,16 @@
#include <unistd.h>
using ::android::sp;
+using ::android::hardware::hidl_enum_range;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::vibrator::V1_0::EffectStrength;
using ::android::hardware::vibrator::V1_0::Status;
+using ::android::hardware::vibrator::V1_3::Effect;
using ::android::hardware::vibrator::V1_3::IVibrator;
+#define EXPECT_OK(ret) ASSERT_TRUE((ret).isOk())
+
// Test environment for Vibrator HIDL HAL.
class VibratorHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
public:
@@ -71,6 +78,74 @@
}
}
+static void validatePerformEffectUnsupportedOperation(Status status, uint32_t lengthMs) {
+ ASSERT_EQ(Status::UNSUPPORTED_OPERATION, status);
+ ASSERT_EQ(static_cast<uint32_t>(0), lengthMs)
+ << "Effects that return UNSUPPORTED_OPERATION must have a duration of zero";
+}
+
+static void validatePerformEffect(Status status, uint32_t lengthMs) {
+ ASSERT_TRUE(status == Status::OK || status == Status::UNSUPPORTED_OPERATION);
+ if (status == Status::OK) {
+ ASSERT_LT(static_cast<uint32_t>(0), lengthMs)
+ << "Effects that return OK must return a positive duration";
+ } else {
+ validatePerformEffectUnsupportedOperation(status, lengthMs);
+ }
+}
+
+/*
+ * Test to make sure effects within the valid range return are either supported and return OK with
+ * a valid duration, or are unsupported and return UNSUPPORTED_OPERATION with a duration of 0.
+ */
+TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3) {
+ for (const auto& effect : hidl_enum_range<Effect>()) {
+ for (const auto& strength : hidl_enum_range<EffectStrength>()) {
+ EXPECT_OK(vibrator->perform_1_3(effect, strength, validatePerformEffect));
+ }
+ }
+}
+
+/*
+ * Test to make sure effect values above the valid range are rejected.
+ */
+TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_AboveValidRange) {
+ Effect effect = *std::prev(hidl_enum_range<Effect>().end());
+ Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) + 1);
+ EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure effect values below the valid range are rejected.
+ */
+TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadEffects_BelowValidRange) {
+ Effect effect = *hidl_enum_range<Effect>().begin();
+ Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) - 1);
+ EXPECT_OK(vibrator->perform_1_3(badEffect, EffectStrength::LIGHT,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure strength values above the valid range are rejected.
+ */
+TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_AboveValidRange) {
+ EffectStrength strength = *std::prev(hidl_enum_range<EffectStrength>().end());
+ EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) + 1);
+ EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure strength values below the valid range are rejected.
+ */
+TEST_F(VibratorHidlTest_1_3, PerformEffect_1_3_BadStrength_BelowValidRange) {
+ EffectStrength strength = *hidl_enum_range<EffectStrength>().begin();
+ EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) - 1);
+ EXPECT_OK(vibrator->perform_1_3(Effect::THUD, badStrength,
+ validatePerformEffectUnsupportedOperation));
+}
+
int main(int argc, char** argv) {
::testing::AddGlobalTestEnvironment(VibratorHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
diff --git a/wifi/1.3/IWifiChip.hal b/wifi/1.3/IWifiChip.hal
index fc6dbac..72cee89 100644
--- a/wifi/1.3/IWifiChip.hal
+++ b/wifi/1.3/IWifiChip.hal
@@ -65,10 +65,14 @@
/**
* API to set the wifi latency mode
*
- * Latency mode determines whether or not to optimize for reducing wifi
- * latency as a tradeoff with other wifi functionality such as scanning,
- * roaming, etc. This optimization is suitable for some applications such
- * as gaming and virtual reality applications.
+ * The latency mode is a hint to the HAL to enable or disable Wi-Fi latency
+ * optimization. The optimization should be enabled if the mode is set to |LOW|
+ * and should be disabled if the mode is set to |NORMAL|.
+ * Wi-Fi latency optimization may trade-off latency against other Wi-Fi
+ * functionality such as scanning, roaming, etc. but it should not result in
+ * completely halting this functionality.
+ *
+ * The low latency mode targets applications such as gaming and virtual reality.
*/
setLatencyMode(LatencyMode mode) generates (WifiStatus status);
diff --git a/wifi/1.3/default/tests/mock_wifi_legacy_hal.h b/wifi/1.3/default/tests/mock_wifi_legacy_hal.h
index deb3a5a..65fd115 100644
--- a/wifi/1.3/default/tests/mock_wifi_legacy_hal.h
+++ b/wifi/1.3/default/tests/mock_wifi_legacy_hal.h
@@ -39,6 +39,10 @@
MOCK_METHOD2(registerRadioModeChangeCallbackHandler,
wifi_error(const std::string&,
const on_radio_mode_change_callback&));
+ MOCK_METHOD1(getFirmwareVersion, std::pair<wifi_error, std::string>(
+ const std::string& iface_name));
+ MOCK_METHOD1(getDriverVersion, std::pair<wifi_error, std::string>(
+ const std::string& iface_name));
MOCK_METHOD2(nanRegisterCallbackHandlers,
wifi_error(const std::string&, const NanCallbackHandlers&));
MOCK_METHOD2(nanDisableRequest,
diff --git a/wifi/1.3/default/wifi_legacy_hal.h b/wifi/1.3/default/wifi_legacy_hal.h
index 70a919f..4d6beb3 100644
--- a/wifi/1.3/default/wifi_legacy_hal.h
+++ b/wifi/1.3/default/wifi_legacy_hal.h
@@ -184,9 +184,9 @@
// Checks if legacy HAL has successfully started
bool isStarted();
// Wrappers for all the functions in the legacy HAL function table.
- std::pair<wifi_error, std::string> getDriverVersion(
+ virtual std::pair<wifi_error, std::string> getDriverVersion(
const std::string& iface_name);
- std::pair<wifi_error, std::string> getFirmwareVersion(
+ virtual std::pair<wifi_error, std::string> getFirmwareVersion(
const std::string& iface_name);
std::pair<wifi_error, std::vector<uint8_t>> requestDriverMemoryDump(
const std::string& iface_name);