Merge "Convert VtsHalBluetoothAudioV2_0TargetTest to be parameterized test"
diff --git a/audio/common/6.0/Android.bp b/audio/common/6.0/Android.bp
index 1a4e054..94f1cf8 100644
--- a/audio/common/6.0/Android.bp
+++ b/audio/common/6.0/Android.bp
@@ -12,6 +12,6 @@
interfaces: [
"android.hidl.safe_union@1.0",
],
- gen_java: false,
+ gen_java: true,
gen_java_constants: true,
}
diff --git a/audio/core/all-versions/default/Android.bp b/audio/core/all-versions/default/Android.bp
index 007ad85..6f18d1d 100644
--- a/audio/core/all-versions/default/Android.bp
+++ b/audio/core/all-versions/default/Android.bp
@@ -19,6 +19,7 @@
export_include_dirs: ["include"],
shared_libs: [
+ "libaudiofoundation",
"libbase",
"libcutils",
"libfmq",
diff --git a/audio/core/all-versions/default/Stream.cpp b/audio/core/all-versions/default/Stream.cpp
index 5f24a5d..74e5945 100644
--- a/audio/core/all-versions/default/Stream.cpp
+++ b/audio/core/all-versions/default/Stream.cpp
@@ -26,9 +26,8 @@
#include <android/log.h>
#include <hardware/audio.h>
#include <hardware/audio_effect.h>
+#include <media/AudioContainers.h>
#include <media/TypeConverter.h>
-#include <utils/SortedVector.h>
-#include <utils/Vector.h>
namespace android {
namespace hardware {
@@ -100,11 +99,11 @@
Result result =
getParam(AudioParameter::keyStreamSupportedSamplingRates, &halListValue, context);
hidl_vec<uint32_t> sampleRates;
- SortedVector<uint32_t> halSampleRates;
+ SampleRateSet halSampleRates;
if (result == Result::OK) {
halSampleRates =
samplingRatesFromString(halListValue.string(), AudioParameter::valueListSeparator);
- sampleRates.setToExternal(halSampleRates.editArray(), halSampleRates.size());
+ sampleRates = hidl_vec<uint32_t>(halSampleRates.begin(), halSampleRates.end());
// Legacy get_parameter does not return a status_t, thus can not advertise of failure.
// Note that this method must succeed (non empty list) if the format is supported.
if (sampleRates.size() == 0) {
@@ -126,13 +125,14 @@
String8 halListValue;
Result result = getParam(AudioParameter::keyStreamSupportedChannels, &halListValue, context);
hidl_vec<AudioChannelBitfield> channelMasks;
- SortedVector<audio_channel_mask_t> halChannelMasks;
+ ChannelMaskSet halChannelMasks;
if (result == Result::OK) {
halChannelMasks =
channelMasksFromString(halListValue.string(), AudioParameter::valueListSeparator);
channelMasks.resize(halChannelMasks.size());
- for (size_t i = 0; i < halChannelMasks.size(); ++i) {
- channelMasks[i] = AudioChannelBitfield(halChannelMasks[i]);
+ size_t i = 0;
+ for (auto channelMask : halChannelMasks) {
+ channelMasks[i++] = AudioChannelBitfield(channelMask);
}
// Legacy get_parameter does not return a status_t, thus can not advertise of failure.
// Note that this method must succeed (non empty list) if the format is supported.
@@ -168,7 +168,7 @@
String8 halListValue;
Result result = getParam(AudioParameter::keyStreamSupportedFormats, &halListValue);
hidl_vec<AudioFormat> formats;
- Vector<audio_format_t> halFormats;
+ FormatVector halFormats;
if (result == Result::OK) {
halFormats = formatsFromString(halListValue.string(), AudioParameter::valueListSeparator);
formats.resize(halFormats.size());
diff --git a/audio/core/all-versions/vts/functional/Android.bp b/audio/core/all-versions/vts/functional/Android.bp
index 3286ecb..73af7f4 100644
--- a/audio/core/all-versions/vts/functional/Android.bp
+++ b/audio/core/all-versions/vts/functional/Android.bp
@@ -24,6 +24,7 @@
"libxml2",
],
shared_libs: [
+ "libaudiofoundation",
"libfmq",
],
header_libs: [
diff --git a/biometrics/face/1.0/vts/functional/Android.bp b/biometrics/face/1.0/vts/functional/Android.bp
index fa68c4e..f2598a7 100644
--- a/biometrics/face/1.0/vts/functional/Android.bp
+++ b/biometrics/face/1.0/vts/functional/Android.bp
@@ -19,6 +19,6 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalBiometricsFaceV1_0TargetTest.cpp"],
static_libs: ["android.hardware.biometrics.face@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
index a4e95ed..7ac44a4 100644
--- a/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
+++ b/biometrics/face/1.0/vts/functional/VtsHalBiometricsFaceV1_0TargetTest.cpp
@@ -20,9 +20,10 @@
#include <android/hardware/biometrics/face/1.0/IBiometricsFaceClientCallback.h>
#include <VtsHalHidlTargetCallbackBase.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <chrono>
#include <cstdint>
@@ -124,27 +125,11 @@
}
};
-// Test environment for the BiometricsFace HAL.
-class FaceHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // Get the test environment singleton.
- static FaceHidlEnvironment* Instance() {
- static FaceHidlEnvironment* instance = new FaceHidlEnvironment;
- return instance;
- }
-
- void registerTestServices() override { registerTestService<IBiometricsFace>(); }
-
- private:
- FaceHidlEnvironment() = default;
-};
-
// Test class for the BiometricsFace HAL.
-class FaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class FaceHidlTest : public ::testing::TestWithParam<std::string> {
public:
void SetUp() override {
- mService = ::testing::VtsHalHidlTargetTestBase::getService<IBiometricsFace>(
- FaceHidlEnvironment::Instance()->getServiceName<IBiometricsFace>());
+ mService = IBiometricsFace::getService(GetParam());
ASSERT_NE(mService, nullptr);
mCallback = new FaceCallback();
mCallback->SetWaitTimeoutDefault(kTimeout);
@@ -167,7 +152,7 @@
// generateChallenge should always return a unique, cryptographically secure,
// non-zero number.
-TEST_F(FaceHidlTest, GenerateChallengeTest) {
+TEST_P(FaceHidlTest, GenerateChallengeTest) {
std::map<uint64_t, int> m;
for (int i = 0; i < kGenerateChallengeIterations; ++i) {
Return<void> ret =
@@ -182,7 +167,7 @@
}
// enroll with an invalid (all zeroes) HAT should fail.
-TEST_F(FaceHidlTest, EnrollZeroHatTest) {
+TEST_P(FaceHidlTest, EnrollZeroHatTest) {
// Filling HAT with zeros
hidl_vec<uint8_t> token(69);
for (size_t i = 0; i < 69; i++) {
@@ -200,7 +185,7 @@
}
// enroll with an invalid HAT should fail.
-TEST_F(FaceHidlTest, EnrollGarbageHatTest) {
+TEST_P(FaceHidlTest, EnrollGarbageHatTest) {
// Filling HAT with pseudorandom invalid data.
// Using default seed to make the test reproducible.
std::mt19937 gen(std::mt19937::default_seed);
@@ -221,7 +206,7 @@
}
// setFeature with an invalid (all zeros) HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureZeroHatTest) {
+TEST_P(FaceHidlTest, SetFeatureZeroHatTest) {
hidl_vec<uint8_t> token(69);
for (size_t i = 0; i < 69; i++) {
token[i] = 0;
@@ -232,7 +217,7 @@
}
// setFeature with an invalid HAT should fail.
-TEST_F(FaceHidlTest, SetFeatureGarbageHatTest) {
+TEST_P(FaceHidlTest, SetFeatureGarbageHatTest) {
// Filling HAT with pseudorandom invalid data.
// Using default seed to make the test reproducible.
std::mt19937 gen(std::mt19937::default_seed);
@@ -254,16 +239,16 @@
ASSERT_TRUE(res.isOk());
}
-TEST_F(FaceHidlTest, GetFeatureRequireAttentionTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireAttentionTest) {
assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_ATTENTION);
}
-TEST_F(FaceHidlTest, GetFeatureRequireDiversityTest) {
+TEST_P(FaceHidlTest, GetFeatureRequireDiversityTest) {
assertGetFeatureFails(mService, 0 /* faceId */, Feature::REQUIRE_DIVERSITY);
}
// revokeChallenge should always return within the timeout
-TEST_F(FaceHidlTest, RevokeChallengeTest) {
+TEST_P(FaceHidlTest, RevokeChallengeTest) {
auto start = std::chrono::system_clock::now();
Return<Status> ret = mService->revokeChallenge();
auto elapsed = std::chrono::system_clock::now() - start;
@@ -272,14 +257,14 @@
}
// The call to getAuthenticatorId should succeed.
-TEST_F(FaceHidlTest, GetAuthenticatorIdTest) {
+TEST_P(FaceHidlTest, GetAuthenticatorIdTest) {
Return<void> ret = mService->getAuthenticatorId(
[](const OptionalUint64& res) { ASSERT_EQ(Status::OK, res.status); });
ASSERT_TRUE(ret.isOk());
}
// The call to enumerate should succeed.
-TEST_F(FaceHidlTest, EnumerateTest) {
+TEST_P(FaceHidlTest, EnumerateTest) {
Return<Status> ret = mService->enumerate();
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
auto res = mCallback->WaitForCallback(kCallbackNameOnEnumerate);
@@ -288,21 +273,21 @@
}
// The call to remove should succeed for any faceId
-TEST_F(FaceHidlTest, RemoveFaceTest) {
+TEST_P(FaceHidlTest, RemoveFaceTest) {
// Remove a face
Return<Status> ret = mService->remove(kFaceId);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
}
// Remove should accept 0 to delete all faces
-TEST_F(FaceHidlTest, RemoveAllFacesTest) {
+TEST_P(FaceHidlTest, RemoveAllFacesTest) {
// Remove all faces
Return<Status> ret = mService->remove(0);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
}
// Active user should successfully set to a writable location.
-TEST_F(FaceHidlTest, SetActiveUserTest) {
+TEST_P(FaceHidlTest, SetActiveUserTest) {
// Create an active user
Return<Status> ret = mService->setActiveUser(2, kFacedataDir);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -313,7 +298,7 @@
}
// Active user should fail to set to an unwritable location.
-TEST_F(FaceHidlTest, SetActiveUserUnwritableTest) {
+TEST_P(FaceHidlTest, SetActiveUserUnwritableTest) {
// Create an active user to an unwritable location (device root dir)
Return<Status> ret = mService->setActiveUser(3, "/");
ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -324,7 +309,7 @@
}
// Active user should fail to set to a null location.
-TEST_F(FaceHidlTest, SetActiveUserNullTest) {
+TEST_P(FaceHidlTest, SetActiveUserNullTest) {
// Create an active user to a null location.
Return<Status> ret = mService->setActiveUser(4, nullptr);
ASSERT_NE(Status::OK, static_cast<Status>(ret));
@@ -336,7 +321,7 @@
// Cancel should always return CANCELED from any starting state including
// the IDLE state.
-TEST_F(FaceHidlTest, CancelTest) {
+TEST_P(FaceHidlTest, CancelTest) {
Return<Status> ret = mService->cancel();
// check that we were able to make an IPC request successfully
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -347,7 +332,7 @@
EXPECT_EQ(FaceError::CANCELED, res.args->error);
}
-TEST_F(FaceHidlTest, OnLockoutChangedTest) {
+TEST_P(FaceHidlTest, OnLockoutChangedTest) {
// Update active user and ensure onLockoutChanged was called.
Return<Status> ret = mService->setActiveUser(kUserId + 1, kFacedataDir);
ASSERT_EQ(Status::OK, static_cast<Status>(ret));
@@ -359,11 +344,7 @@
} // anonymous namespace
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(FaceHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- FaceHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, FaceHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IBiometricsFace::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/cas/1.2/Android.bp b/cas/1.2/Android.bp
index af98b2e..fbb38b0 100644
--- a/cas/1.2/Android.bp
+++ b/cas/1.2/Android.bp
@@ -7,10 +7,10 @@
enabled: true,
},
srcs: [
+ "types.hal",
"ICas.hal",
"ICasListener.hal",
"IMediaCasService.hal",
- "types.hal",
],
interfaces: [
"android.hardware.cas@1.0",
diff --git a/compatibility_matrices/Android.bp b/compatibility_matrices/Android.bp
index 799ab07..7883dd9 100644
--- a/compatibility_matrices/Android.bp
+++ b/compatibility_matrices/Android.bp
@@ -83,8 +83,8 @@
"compatibility_matrix.current.xml",
],
kernel_configs: [
- "kernel_config_current_4.9",
"kernel_config_current_4.14",
"kernel_config_current_4.19",
+ "kernel_config_current_5.4",
]
}
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index f0b5966..ed88274 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -367,10 +367,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.radio.config</name>
- <!--
- See compatibility_matrix.4.xml on versioning of radio config HAL.
- -->
- <version>1.1</version>
+ <version>1.3</version>
<interface>
<name>IRadioConfig</name>
<instance>default</instance>
diff --git a/current.txt b/current.txt
index 4bf18fc..9c554e8 100644
--- a/current.txt
+++ b/current.txt
@@ -575,10 +575,10 @@
b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardware.keymaster@4.0::IKeymasterDevice
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardware.neuralnetworks@1.0::types
-9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
+5f6d3097ba84cb63c430787123f4de1b31c11f90b531b98eae9a8623a5ae962a android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
-71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
+2d5483fbf59d5fd2de94665a6df05da5c3d09de67561d0db5e9f09e59e9aea46 android.hardware.neuralnetworks@1.2::types
a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
@@ -597,8 +597,18 @@
9e59fffceed0dd72a9799e04505db5f777bbbea1af0695ba4107ef6d967c6fda android.hardware.neuralnetworks@1.3::IDevice
4a6c3b3556da951b4def21ba579a227c022980fe4465df6cdfbe20628fa75f5a android.hardware.neuralnetworks@1.3::IPreparedModel
94e803236398bed1febb11cc21051bc42ec003700139b099d6c479e02a7ca3c3 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
-b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
+cf1d55e8c68300090747ab90b94c22e4c859b29c84ced68a317c595bb115eab2 android.hardware.neuralnetworks@1.3::types
+3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
+a64467bae843569f0d465c5be7f0c7a5b987985b55a3ef4794dd5afc68538650 android.hardware.wifi.supplicant@1.3::ISupplicant
+44445b8a03d7b9e68b2fbd954672c18a8fce9e32851b0692f4f4ab3407f86ecb android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
+619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
+c9273429fcf98d797d3bb07fdba6f1be95bf960f9255cde169fd1ca4db85f856 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
+9b0a3ab6f4f74b971ed094426d8a443e29b512ff03e1ab50c07156396cdb2483 android.hardware.wifi.supplicant@1.3::types
274fb1254a6d1a97824ec5c880eeefc0e410dc6d3a2a4c34052201169d2b7de0 android.hardware.radio@1.5::types
c8e81d912827a5d49b2ddcdc4eb4556c5d231a899a1dca879309e04210daa4a0 android.hardware.radio@1.5::IRadio
a62a93faf173b14a6175b683ebf61ffa568dc61f81e369d2dce7b1265e86cf2f android.hardware.radio@1.5::IRadioIndication
-260ce05806d753d728f844d405e832179ed7d9b65986ec18fef3d21cf7285587 android.hardware.radio@1.5::IRadioResponse
\ No newline at end of file
+260ce05806d753d728f844d405e832179ed7d9b65986ec18fef3d21cf7285587 android.hardware.radio@1.5::IRadioResponse
+55f0a15642869ec98a55ea0a5ac049d3e1a6245ff7750deb6bcb7182057eee83 android.hardware.radio.config@1.3::types
+b27ab0cd40b0b078cdcd024bfe1061c4c4c065f3519eeb9347fa359a3268a5ae android.hardware.radio.config@1.3::IRadioConfig
+742360c775313438b0f82256eac62fb5bbc76a6ae6f388573f3aa142fb2c1eea android.hardware.radio.config@1.3::IRadioConfigIndication
+7683fed9d253956071f18b152e6be657719536f98d9b534433d5e411bcde5061 android.hardware.radio.config@1.3::IRadioConfigResponse
diff --git a/drm/1.0/Android.bp b/drm/1.0/Android.bp
index a950c57..9049af2 100644
--- a/drm/1.0/Android.bp
+++ b/drm/1.0/Android.bp
@@ -17,5 +17,5 @@
interfaces: [
"android.hidl.base@1.0",
],
- gen_java: false,
+ gen_java: true,
}
diff --git a/gatekeeper/1.0/vts/functional/Android.bp b/gatekeeper/1.0/vts/functional/Android.bp
index f55e5d2..a115285 100644
--- a/gatekeeper/1.0/vts/functional/Android.bp
+++ b/gatekeeper/1.0/vts/functional/Android.bp
@@ -19,5 +19,5 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalGatekeeperV1_0TargetTest.cpp"],
static_libs: ["android.hardware.gatekeeper@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp b/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
index 715e9fc..afc737c 100644
--- a/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
+++ b/gatekeeper/1.0/vts/functional/VtsHalGatekeeperV1_0TargetTest.cpp
@@ -24,7 +24,10 @@
#include <inttypes.h>
#include <unistd.h>
+#include <gtest/gtest.h>
#include <hardware/hw_auth_token.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <android/log.h>
#include <android/hardware/gatekeeper/1.0/IGatekeeper.h>
@@ -32,9 +35,6 @@
#include <log/log.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::gatekeeper::V1_0::IGatekeeper;
@@ -78,22 +78,8 @@
return auth_token;
}
-// Test environment for Gatekeeper HIDL HAL.
-class GatekeeperHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static GatekeeperHidlEnvironment* Instance() {
- static GatekeeperHidlEnvironment* instance = new GatekeeperHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IGatekeeper>(); }
- private:
- GatekeeperHidlEnvironment() {}
-};
-
// The main test class for Gatekeeper HIDL HAL.
-class GatekeeperHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class GatekeeperHidlTest : public ::testing::TestWithParam<std::string> {
protected:
void setUid(uint32_t uid) { uid_ = uid; }
@@ -204,8 +190,7 @@
GatekeeperHidlTest() : uid_(0) {}
virtual void SetUp() override {
GatekeeperResponse rsp;
- gatekeeper_ = ::testing::VtsHalHidlTargetTestBase::getService<IGatekeeper>(
- GatekeeperHidlEnvironment::Instance()->getServiceName<IGatekeeper>());
+ gatekeeper_ = IGatekeeper::getService(GetParam());
ASSERT_NE(nullptr, gatekeeper_.get());
doDeleteAllUsers(rsp);
}
@@ -219,7 +204,7 @@
/**
* Ensure we can enroll new password
*/
-TEST_F(GatekeeperHidlTest, EnrollSuccess) {
+TEST_P(GatekeeperHidlTest, EnrollSuccess) {
hidl_vec<uint8_t> password;
GatekeeperResponse rsp;
ALOGI("Testing Enroll (expected success)");
@@ -231,7 +216,7 @@
/**
* Ensure we can not enroll empty password
*/
-TEST_F(GatekeeperHidlTest, EnrollNoPassword) {
+TEST_P(GatekeeperHidlTest, EnrollNoPassword) {
hidl_vec<uint8_t> password;
GatekeeperResponse rsp;
ALOGI("Testing Enroll (expected failure)");
@@ -242,7 +227,7 @@
/**
* Ensure we can successfully verify previously enrolled password
*/
-TEST_F(GatekeeperHidlTest, VerifySuccess) {
+TEST_P(GatekeeperHidlTest, VerifySuccess) {
GatekeeperResponse enrollRsp;
GatekeeperResponse verifyRsp;
hidl_vec<uint8_t> password;
@@ -258,7 +243,7 @@
* Ensure we can securely update password (keep the same
* secure user_id) if we prove we know old password
*/
-TEST_F(GatekeeperHidlTest, TrustedReenroll) {
+TEST_P(GatekeeperHidlTest, TrustedReenroll) {
GatekeeperResponse enrollRsp;
GatekeeperRequest reenrollReq;
GatekeeperResponse reenrollRsp;
@@ -297,7 +282,7 @@
* Ensure we can update password (and get new
* secure user_id) if we don't know old password
*/
-TEST_F(GatekeeperHidlTest, UntrustedReenroll) {
+TEST_P(GatekeeperHidlTest, UntrustedReenroll) {
GatekeeperResponse enrollRsp;
GatekeeperResponse reenrollRsp;
GatekeeperResponse verifyRsp;
@@ -327,7 +312,7 @@
/**
* Ensure we dont get successful verify with invalid data
*/
-TEST_F(GatekeeperHidlTest, VerifyNoData) {
+TEST_P(GatekeeperHidlTest, VerifyNoData) {
hidl_vec<uint8_t> password;
hidl_vec<uint8_t> passwordHandle;
GatekeeperResponse verifyRsp;
@@ -341,7 +326,7 @@
/**
* Ensure we can not verify password after we enrolled it and then deleted user
*/
-TEST_F(GatekeeperHidlTest, DeleteUserTest) {
+TEST_P(GatekeeperHidlTest, DeleteUserTest) {
hidl_vec<uint8_t> password;
GatekeeperResponse enrollRsp;
GatekeeperResponse verifyRsp;
@@ -368,7 +353,7 @@
/**
* Ensure we can not delete a user that does not exist
*/
-TEST_F(GatekeeperHidlTest, DeleteInvalidUserTest) {
+TEST_P(GatekeeperHidlTest, DeleteInvalidUserTest) {
hidl_vec<uint8_t> password;
GatekeeperResponse enrollRsp;
GatekeeperResponse verifyRsp;
@@ -400,7 +385,7 @@
* Ensure we can not verify passwords after we enrolled them and then deleted
* all users
*/
-TEST_F(GatekeeperHidlTest, DeleteAllUsersTest) {
+TEST_P(GatekeeperHidlTest, DeleteAllUsersTest) {
struct UserData {
uint32_t userId;
hidl_vec<uint8_t> password;
@@ -448,11 +433,7 @@
ALOGI("Testing deleteAllUsers done: rsp=%" PRIi32, delAllRsp.code);
}
-int main(int argc, char **argv) {
- ::testing::AddGlobalTestEnvironment(GatekeeperHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- GatekeeperHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, GatekeeperHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IGatekeeper::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/graphics/composer/2.3/utils/command-buffer/include/composer-command-buffer/2.3/ComposerCommandBuffer.h b/graphics/composer/2.3/utils/command-buffer/include/composer-command-buffer/2.3/ComposerCommandBuffer.h
index 11863fa..e1a870e 100644
--- a/graphics/composer/2.3/utils/command-buffer/include/composer-command-buffer/2.3/ComposerCommandBuffer.h
+++ b/graphics/composer/2.3/utils/command-buffer/include/composer-command-buffer/2.3/ComposerCommandBuffer.h
@@ -79,6 +79,7 @@
void setLayerPerFrameMetadataBlobs(
const hidl_vec<IComposerClient::PerFrameMetadataBlob>& metadata) {
+ // in units of uint32_t's
size_t commandLength = 0;
if (metadata.size() > std::numeric_limits<uint32_t>::max()) {
@@ -86,12 +87,12 @@
return;
}
- // number of blobs
- commandLength += metadata.size();
+ // space for numElements
+ commandLength += 1;
for (auto metadataBlob : metadata) {
- commandLength += sizeof(int32_t); // key of metadata blob
- commandLength += 1; // size information of metadata blob
+ commandLength += 1; // key of metadata blob
+ commandLength += 1; // size information of metadata blob
// metadata content size
size_t metadataSize = metadataBlob.blob.size() / sizeof(uint32_t);
diff --git a/health/2.0/default/healthd_common_adapter.cpp b/health/2.0/default/healthd_common_adapter.cpp
index 8fc689d..0b5d869 100644
--- a/health/2.0/default/healthd_common_adapter.cpp
+++ b/health/2.0/default/healthd_common_adapter.cpp
@@ -49,11 +49,14 @@
static std::unique_ptr<HealthLoopAdapter> health_loop;
int healthd_register_event(int fd, void (*handler)(uint32_t), EventWakeup wakeup) {
+ if (!health_loop) return -1;
+
auto wrapped_handler = [handler](auto*, uint32_t epevents) { handler(epevents); };
return health_loop->RegisterEvent(fd, wrapped_handler, wakeup);
}
void healthd_battery_update_internal(bool charger_online) {
+ if (!health_loop) return;
health_loop->AdjustWakealarmPeriods(charger_online);
}
diff --git a/keymaster/4.0/vts/performance/Android.bp b/keymaster/4.0/vts/performance/Android.bp
new file mode 100644
index 0000000..9434bc9
--- /dev/null
+++ b/keymaster/4.0/vts/performance/Android.bp
@@ -0,0 +1,29 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_benchmark {
+ name: "keymaster_benchmark",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "Benchmark.cpp",
+ ],
+ static_libs: [
+ "android.hardware.keymaster@4.0",
+ "libkeymaster4support",
+ "libsoftkeymasterdevice",
+ "libchrome"
+ ],
+}
diff --git a/keymaster/4.0/vts/performance/Benchmark.cpp b/keymaster/4.0/vts/performance/Benchmark.cpp
new file mode 100644
index 0000000..96ef5bf
--- /dev/null
+++ b/keymaster/4.0/vts/performance/Benchmark.cpp
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "keymaster_benchmark"
+
+#include <android/hardware/keymaster/4.0/IKeymasterDevice.h>
+#include <android/hardware/keymaster/4.0/types.h>
+#include <keymaster/keymaster_configuration.h>
+#include <keymasterV4_0/authorization_set.h>
+
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <binder/IServiceManager.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <iostream>
+
+#include <log/log.h>
+#include <utils/StrongPointer.h>
+
+#include <benchmark/benchmark.h>
+#include <hidl/Status.h>
+
+#include <base/command_line.h>
+
+namespace android {
+namespace hardware {
+namespace keymaster {
+namespace V4_0 {
+namespace test {
+
+// libutils:
+using android::OK;
+using android::sp;
+using android::status_t;
+
+// libhidl:
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+
+// IKeymaster:
+using android::IServiceManager;
+using android::hardware::hidl_string;
+using android::hardware::keymaster::V4_0::AuthorizationSet;
+using android::hardware::keymaster::V4_0::AuthorizationSetBuilder;
+using android::hardware::keymaster::V4_0::BlockMode;
+using android::hardware::keymaster::V4_0::ErrorCode;
+using android::hardware::keymaster::V4_0::IKeymasterDevice;
+using android::hardware::keymaster::V4_0::KeyCharacteristics;
+using android::hardware::keymaster::V4_0::SecurityLevel;
+
+// Standard library:
+using std::cerr;
+using std::cout;
+using std::endl;
+using std::optional;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+class HidlBuf : public hidl_vec<uint8_t> {
+ typedef hidl_vec<uint8_t> super;
+
+ public:
+ HidlBuf() {}
+ HidlBuf(const super& other) : super(other) {}
+ HidlBuf(super&& other) : super(std::move(other)) {}
+ explicit HidlBuf(const std::string& other) : HidlBuf() { *this = other; }
+
+ HidlBuf& operator=(const super& other) {
+ super::operator=(other);
+ return *this;
+ }
+
+ HidlBuf& operator=(super&& other) {
+ super::operator=(std::move(other));
+ return *this;
+ }
+
+ HidlBuf& operator=(const string& other) {
+ resize(other.size());
+ std::copy(other.begin(), other.end(), begin());
+ return *this;
+ }
+
+ string to_string() const { return string(reinterpret_cast<const char*>(data()), size()); }
+};
+
+#define SMALL_MESSAGE_SIZE 64
+#define MEDIUM_MESSAGE_SIZE 1024
+#define LARGE_MESSAGE_SIZE 131072
+
+class KeymasterWrapper {
+ private:
+ sp<IKeymasterDevice> keymaster_;
+ SecurityLevel securityLevel_;
+ hidl_string name_;
+ hidl_string author_;
+ HidlBuf key_blob_;
+ KeyCharacteristics key_characteristics_;
+ ErrorCode error_;
+ string key_transform_;
+ string keymaster_name_;
+ uint32_t os_version_;
+ uint32_t os_patch_level_;
+ std::vector<string> message_cache_;
+
+ bool GenerateKey(const AuthorizationSet& authSet) {
+ return (keymaster_
+ ->generateKey(
+ authSet.hidl_data(),
+ [&](ErrorCode hidl_error, const hidl_vec<uint8_t>& hidl_key_blob,
+ const KeyCharacteristics& hidl_key_characteristics) {
+ error_ = hidl_error;
+ key_blob_ = hidl_key_blob;
+ key_characteristics_ = std::move(hidl_key_characteristics);
+ })
+ .isOk() &&
+ error_ == ErrorCode::OK);
+ }
+
+ bool GenerateKey(Algorithm algorithm, int keySize, Digest digest = Digest::NONE,
+ PaddingMode padding = PaddingMode::NONE, optional<BlockMode> blockMode = {}) {
+ AuthorizationSetBuilder authSet = AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .Authorization(TAG_PURPOSE, KeyPurpose::ENCRYPT)
+ .Authorization(TAG_PURPOSE, KeyPurpose::DECRYPT)
+ .Authorization(TAG_PURPOSE, KeyPurpose::SIGN)
+ .Authorization(TAG_PURPOSE, KeyPurpose::VERIFY)
+ .Authorization(TAG_KEY_SIZE, keySize)
+ .Authorization(TAG_ALGORITHM, algorithm)
+ .Digest(digest)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)
+ .Padding(padding);
+ if (blockMode) {
+ authSet.BlockMode(*blockMode);
+ }
+ if (algorithm == Algorithm::RSA) {
+ authSet.Authorization(TAG_RSA_PUBLIC_EXPONENT, 65537U);
+ }
+ return GenerateKey(authSet);
+ }
+
+ KeymasterWrapper(const sp<IKeymasterDevice> keymaster) {
+ os_version_ = ::keymaster::GetOsVersion();
+ os_patch_level_ = ::keymaster::GetOsPatchlevel();
+ keymaster_ = keymaster;
+ keymaster_->getHardwareInfo([&](SecurityLevel securityLevel, const hidl_string& name,
+ const hidl_string& author) {
+ securityLevel_ = securityLevel;
+ name_ = name;
+ author_ = author;
+ });
+
+ message_cache_.push_back(string(SMALL_MESSAGE_SIZE, 'x'));
+ message_cache_.push_back(string(MEDIUM_MESSAGE_SIZE, 'x'));
+ message_cache_.push_back(string(LARGE_MESSAGE_SIZE, 'x'));
+ }
+
+ public:
+ static KeymasterWrapper* newInstance(const std::string& keymaster_name) {
+ auto keymaster = IKeymasterDevice::getService(keymaster_name);
+ if (!keymaster) {
+ std::cerr << "Error: unable to find keymaster service named " << keymaster_name
+ << std::endl;
+ return nullptr;
+ }
+ return new KeymasterWrapper(keymaster);
+ }
+
+ bool GenerateKey(string transform, int keySize, bool sign = false) {
+ if (transform == key_transform_) {
+ return true;
+ } else if (key_transform_ != "") {
+ // Deleting old key first
+ if (!DeleteKey()) {
+ return false;
+ }
+ }
+ optional<Algorithm> algorithm = getAlgorithm(transform);
+ if (!algorithm) {
+ cerr << "Error: invalid algorithm " << transform << endl;
+ return false;
+ }
+ key_transform_ = transform;
+ return GenerateKey(*algorithm, keySize, getDigest(transform), getPadding(transform, sign),
+ getBlockMode(transform));
+ }
+
+ bool DeleteKey() {
+ key_blob_ = HidlBuf();
+ key_transform_ = "";
+ return keymaster_->deleteKey(key_blob_).isOk();
+ }
+
+ AuthorizationSet getOperationParams(string transform, bool sign = false) {
+ AuthorizationSetBuilder builder = AuthorizationSetBuilder()
+ .Padding(getPadding(transform, sign))
+ .Authorization(TAG_MAC_LENGTH, 128)
+ .Digest(getDigest(transform));
+ optional<BlockMode> blockMode = getBlockMode(transform);
+ if (blockMode) {
+ builder.BlockMode(*blockMode);
+ }
+ return std::move(builder);
+ }
+
+ optional<OperationHandle> EncryptBegin(AuthorizationSet& in_params,
+ AuthorizationSet* out_params = new AuthorizationSet) {
+ return Begin(KeyPurpose::ENCRYPT, in_params, out_params);
+ }
+
+ optional<OperationHandle> DecryptBegin(AuthorizationSet& in_params,
+ AuthorizationSet* out_params = new AuthorizationSet) {
+ return Begin(KeyPurpose::DECRYPT, in_params, out_params);
+ }
+
+ optional<OperationHandle> SignBegin(AuthorizationSet& in_params,
+ AuthorizationSet* out_params = new AuthorizationSet) {
+ return Begin(KeyPurpose::SIGN, in_params, out_params);
+ }
+
+ optional<OperationHandle> VerifyBegin(AuthorizationSet& in_params,
+ AuthorizationSet* out_params = new AuthorizationSet) {
+ return Begin(KeyPurpose::VERIFY, in_params, out_params);
+ }
+
+ optional<OperationHandle> Begin(KeyPurpose operation, const AuthorizationSet& in_params,
+ AuthorizationSet* out_params) {
+ OperationHandle op_handle;
+ if (!keymaster_
+ ->begin(operation, key_blob_, in_params.hidl_data(), HardwareAuthToken(),
+ [&](ErrorCode hidl_error,
+ const hidl_vec<KeyParameter>& hidl_out_params,
+ uint64_t hidl_op_handle) {
+ error_ = hidl_error;
+ out_params->push_back(AuthorizationSet(hidl_out_params));
+ op_handle = hidl_op_handle;
+ })
+ .isOk() ||
+ error_ != ErrorCode::OK) {
+ keymaster_->abort(op_handle);
+ return {};
+ }
+ return op_handle;
+ }
+
+ optional<string> ProcessMessage(const OperationHandle& op_handle, const string& message,
+ const AuthorizationSet& in_params,
+ AuthorizationSet* out_params = new AuthorizationSet,
+ const string& signature = "") {
+ static const int HIDL_BUFFER_LIMIT = 1 << 14; // 16KB
+
+ string output;
+ size_t input_consumed = 0;
+ while (message.length() - input_consumed > 0) {
+ if (!keymaster_
+ ->update(op_handle, in_params.hidl_data(),
+ HidlBuf(message.substr(input_consumed, HIDL_BUFFER_LIMIT)),
+ HardwareAuthToken(), VerificationToken(),
+ [&](ErrorCode hidl_error, uint32_t hidl_input_consumed,
+ const hidl_vec<KeyParameter>& hidl_out_params,
+ const HidlBuf& hidl_output) {
+ error_ = hidl_error;
+ out_params->push_back(AuthorizationSet(hidl_out_params));
+ output.append(hidl_output.to_string());
+ input_consumed += hidl_input_consumed;
+ })
+ .isOk() ||
+ error_ != ErrorCode::OK) {
+ keymaster_->abort(op_handle);
+ return {};
+ }
+ }
+
+ if (!keymaster_
+ ->finish(op_handle, in_params.hidl_data(),
+ HidlBuf(message.substr(input_consumed)), HidlBuf(signature),
+ HardwareAuthToken(), VerificationToken(),
+ [&](ErrorCode hidl_error,
+ const hidl_vec<KeyParameter>& hidl_out_params,
+ const HidlBuf& hidl_output) {
+ error_ = hidl_error;
+ out_params->push_back(AuthorizationSet(hidl_out_params));
+ output.append(hidl_output.to_string());
+ })
+ .isOk() ||
+ error_ != ErrorCode::OK) {
+ keymaster_->abort(op_handle);
+ return {};
+ }
+
+ return output;
+ }
+
+ int getError() { return static_cast<int>(error_); }
+
+ const string getHardwareName() { return name_; }
+
+ SecurityLevel getSecurityLevel() { return securityLevel_; }
+
+ const string& GenerateMessage(int size) {
+ for (const string& message : message_cache_) {
+ if (message.size() == size) {
+ return message;
+ }
+ }
+ string message = string(size, 'x');
+ message_cache_.push_back(message);
+ return std::move(message);
+ }
+
+ optional<BlockMode> getBlockMode(string transform) {
+ if (transform.find("/ECB") != string::npos) {
+ return BlockMode::ECB;
+ } else if (transform.find("/CBC") != string::npos) {
+ return BlockMode::CBC;
+ } else if (transform.find("/CTR") != string::npos) {
+ return BlockMode::CTR;
+ } else if (transform.find("/GCM") != string::npos) {
+ return BlockMode::GCM;
+ }
+ return {};
+ }
+
+ PaddingMode getPadding(string transform, bool sign) {
+ if (transform.find("/PKCS7") != string::npos) {
+ return PaddingMode::PKCS7;
+ } else if (transform.find("/PSS") != string::npos) {
+ return PaddingMode::RSA_PSS;
+ } else if (transform.find("/OAEP") != string::npos) {
+ return PaddingMode::RSA_OAEP;
+ } else if (transform.find("/PKCS1") != string::npos) {
+ return sign ? PaddingMode::RSA_PKCS1_1_5_SIGN : PaddingMode::RSA_PKCS1_1_5_ENCRYPT;
+ } else if (sign && transform.find("RSA") != string::npos) {
+ // RSA defaults to PKCS1 for sign
+ return PaddingMode::RSA_PKCS1_1_5_SIGN;
+ }
+ return PaddingMode::NONE;
+ }
+
+ optional<Algorithm> getAlgorithm(string transform) {
+ if (transform.find("AES") != string::npos) {
+ return Algorithm::AES;
+ } else if (transform.find("Hmac") != string::npos) {
+ return Algorithm::HMAC;
+ } else if (transform.find("DESede") != string::npos) {
+ return Algorithm::TRIPLE_DES;
+ } else if (transform.find("RSA") != string::npos) {
+ return Algorithm::RSA;
+ } else if (transform.find("EC") != string::npos) {
+ return Algorithm::EC;
+ }
+ cerr << "Can't find algorithm for " << transform << endl;
+ return {};
+ }
+
+ Digest getDigest(string transform) {
+ if (transform.find("MD5") != string::npos) {
+ return Digest::MD5;
+ } else if (transform.find("SHA1") != string::npos ||
+ transform.find("SHA-1") != string::npos) {
+ return Digest::SHA1;
+ } else if (transform.find("SHA224") != string::npos) {
+ return Digest::SHA_2_224;
+ } else if (transform.find("SHA256") != string::npos) {
+ return Digest::SHA_2_256;
+ } else if (transform.find("SHA384") != string::npos) {
+ return Digest::SHA_2_384;
+ } else if (transform.find("SHA512") != string::npos) {
+ return Digest::SHA_2_512;
+ } else if (transform.find("RSA") != string::npos &&
+ transform.find("OAEP") != string::npos) {
+ return Digest::SHA1;
+ }
+ return Digest::NONE;
+ }
+};
+
+KeymasterWrapper* keymaster;
+
+static void settings(benchmark::internal::Benchmark* benchmark) {
+ benchmark->Unit(benchmark::kMillisecond);
+}
+
+static void addDefaultLabel(benchmark::State& state) {
+ string secLevel;
+ switch (keymaster->getSecurityLevel()) {
+ case SecurityLevel::STRONGBOX:
+ secLevel = "STRONGBOX";
+ break;
+ case SecurityLevel::SOFTWARE:
+ secLevel = "SOFTWARE";
+ break;
+ case SecurityLevel::TRUSTED_ENVIRONMENT:
+ secLevel = "TEE";
+ break;
+ }
+ state.SetLabel("hardware_name:" + keymaster->getHardwareName() + " sec_level:" + secLevel);
+}
+
+// clang-format off
+#define BENCHMARK_KM(func, transform, keySize) \
+ BENCHMARK_CAPTURE(func, transform/keySize, #transform "/" #keySize, keySize)->Apply(settings);
+#define BENCHMARK_KM_MSG(func, transform, keySize, msgSize) \
+ BENCHMARK_CAPTURE(func, transform/keySize/msgSize, #transform "/" #keySize "/" #msgSize, \
+ keySize, msgSize) \
+ ->Apply(settings);
+
+#define BENCHMARK_KM_ALL_MSGS(func, transform, keySize) \
+ BENCHMARK_KM_MSG(func, transform, keySize, SMALL_MESSAGE_SIZE) \
+ BENCHMARK_KM_MSG(func, transform, keySize, MEDIUM_MESSAGE_SIZE) \
+ BENCHMARK_KM_MSG(func, transform, keySize, LARGE_MESSAGE_SIZE)
+
+#define BENCHMARK_KM_CIPHER(transform, keySize, msgSize) \
+ BENCHMARK_KM_MSG(encrypt, transform, keySize, msgSize) \
+ BENCHMARK_KM_MSG(decrypt, transform, keySize, msgSize)
+
+#define BENCHMARK_KM_CIPHER_ALL_MSGS(transform, keySize) \
+ BENCHMARK_KM_ALL_MSGS(encrypt, transform, keySize) \
+ BENCHMARK_KM_ALL_MSGS(decrypt, transform, keySize)
+
+#define BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, keySize) \
+ BENCHMARK_KM_ALL_MSGS(sign, transform, keySize) \
+ BENCHMARK_KM_ALL_MSGS(verify, transform, keySize)
+// clang-format on
+
+/*
+ * ============= KeyGen TESTS ==================
+ */
+static void keygen(benchmark::State& state, string transform, int keySize) {
+ addDefaultLabel(state);
+ for (auto _ : state) {
+ keymaster->GenerateKey(transform, keySize);
+ state.PauseTiming();
+ keymaster->DeleteKey();
+ state.ResumeTiming();
+ }
+}
+
+BENCHMARK_KM(keygen, AES, 128);
+BENCHMARK_KM(keygen, AES, 256);
+
+BENCHMARK_KM(keygen, RSA, 2048);
+BENCHMARK_KM(keygen, RSA, 3072);
+BENCHMARK_KM(keygen, RSA, 4096);
+
+BENCHMARK_KM(keygen, EC, 224);
+BENCHMARK_KM(keygen, EC, 256);
+BENCHMARK_KM(keygen, EC, 384);
+BENCHMARK_KM(keygen, EC, 521);
+
+BENCHMARK_KM(keygen, DESede, 168);
+
+BENCHMARK_KM(keygen, Hmac, 64);
+BENCHMARK_KM(keygen, Hmac, 128);
+BENCHMARK_KM(keygen, Hmac, 256);
+BENCHMARK_KM(keygen, Hmac, 512);
+BENCHMARK_KM(keygen, Hmac, 1024);
+BENCHMARK_KM(keygen, Hmac, 2048);
+BENCHMARK_KM(keygen, Hmac, 4096);
+BENCHMARK_KM(keygen, Hmac, 8192);
+
+/*
+ * ============= SIGNATURE TESTS ==================
+ */
+
+static void sign(benchmark::State& state, string transform, int keySize, int msgSize) {
+ addDefaultLabel(state);
+ if (!keymaster->GenerateKey(transform, keySize, true)) {
+ state.SkipWithError(
+ ("Key generation error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ auto params = keymaster->getOperationParams(transform, true);
+ string message = keymaster->GenerateMessage(msgSize);
+
+ for (auto _ : state) {
+ state.PauseTiming();
+ auto opHandle = keymaster->SignBegin(params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Error beginning sign, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ state.ResumeTiming();
+ if (!keymaster->ProcessMessage(*opHandle, message, params)) {
+ state.SkipWithError(("Sign error, " + std::to_string(keymaster->getError())).c_str());
+ break;
+ }
+ }
+}
+
+static void verify(benchmark::State& state, string transform, int keySize, int msgSize) {
+ addDefaultLabel(state);
+ if (!keymaster->GenerateKey(transform, keySize, true)) {
+ state.SkipWithError(
+ ("Key generation error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ AuthorizationSet out_params;
+ AuthorizationSet in_params = keymaster->getOperationParams(transform, true);
+ string message = keymaster->GenerateMessage(msgSize);
+ auto opHandle = keymaster->SignBegin(in_params, &out_params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Error beginning sign, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ optional<string> signature =
+ keymaster->ProcessMessage(*opHandle, message, in_params, &out_params);
+ if (!signature) {
+ state.SkipWithError(("Sign error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ in_params.push_back(out_params);
+ for (auto _ : state) {
+ state.PauseTiming();
+ opHandle = keymaster->VerifyBegin(in_params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Verify begin error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ state.ResumeTiming();
+ if (!keymaster->ProcessMessage(*opHandle, message, in_params, &out_params, *signature)) {
+ state.SkipWithError(("Verify error, " + std::to_string(keymaster->getError())).c_str());
+ break;
+ }
+ }
+}
+
+// clang-format off
+#define BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(transform) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 64) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 128) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 256) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 512) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 1024) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 2024) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 4096) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 8192)
+
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA1)
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA256)
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA224)
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA256)
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA384)
+BENCHMARK_KM_SIGNATURE_ALL_HMAC_KEYS(HmacSHA512)
+
+#define BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(transform) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 224) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 256) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 384) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 521)
+
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(NONEwithECDSA);
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(SHA1withECDSA);
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(SHA224withECDSA);
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(SHA256withECDSA);
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(SHA384withECDSA);
+BENCHMARK_KM_SIGNATURE_ALL_ECDSA_KEYS(SHA512withECDSA);
+
+#define BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(transform) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 2048) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 3072) \
+ BENCHMARK_KM_SIGNATURE_ALL_MSGS(transform, 4096)
+
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(MD5withRSA);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA1withRSA);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA224withRSA);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA384withRSA);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA512withRSA);
+
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(MD5withRSA/PSS);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA1withRSA/PSS);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA224withRSA/PSS);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA384withRSA/PSS);
+BENCHMARK_KM_SIGNATURE_ALL_RSA_KEYS(SHA512withRSA/PSS);
+// clang-format on
+
+/*
+ * ============= CIPHER TESTS ==================
+ */
+
+static void encrypt(benchmark::State& state, string transform, int keySize, int msgSize) {
+ addDefaultLabel(state);
+ if (!keymaster->GenerateKey(transform, keySize)) {
+ state.SkipWithError(
+ ("Key generation error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ auto params = keymaster->getOperationParams(transform);
+ string message = keymaster->GenerateMessage(msgSize);
+
+ for (auto _ : state) {
+ state.PauseTiming();
+ auto opHandle = keymaster->EncryptBegin(params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Encryption begin error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ state.ResumeTiming();
+ if (!keymaster->ProcessMessage(*opHandle, message, params)) {
+ state.SkipWithError(
+ ("Encryption error, " + std::to_string(keymaster->getError())).c_str());
+ break;
+ }
+ }
+}
+
+static void decrypt(benchmark::State& state, string transform, int keySize, int msgSize) {
+ addDefaultLabel(state);
+ if (!keymaster->GenerateKey(transform, keySize)) {
+ state.SkipWithError(
+ ("Key generation error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ AuthorizationSet out_params;
+ AuthorizationSet in_params = keymaster->getOperationParams(transform);
+ string message = keymaster->GenerateMessage(msgSize);
+ auto opHandle = keymaster->EncryptBegin(in_params, &out_params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Encryption begin error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ auto encryptedMessage = keymaster->ProcessMessage(*opHandle, message, in_params, &out_params);
+ if (!encryptedMessage) {
+ state.SkipWithError(("Encryption error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ in_params.push_back(out_params);
+ for (auto _ : state) {
+ state.PauseTiming();
+ opHandle = keymaster->DecryptBegin(in_params);
+ if (!opHandle) {
+ state.SkipWithError(
+ ("Decryption begin error, " + std::to_string(keymaster->getError())).c_str());
+ return;
+ }
+ state.ResumeTiming();
+ if (!keymaster->ProcessMessage(*opHandle, *encryptedMessage, in_params)) {
+ state.SkipWithError(
+ ("Decryption error, " + std::to_string(keymaster->getError())).c_str());
+ break;
+ }
+ }
+}
+
+// clang-format off
+// AES
+#define BENCHMARK_KM_CIPHER_ALL_AES_KEYS(transform) \
+ BENCHMARK_KM_CIPHER_ALL_MSGS(transform, 128) \
+ BENCHMARK_KM_CIPHER_ALL_MSGS(transform, 256)
+
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/CBC/NoPadding);
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/CBC/PKCS7Padding);
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/CTR/NoPadding);
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/ECB/NoPadding);
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/ECB/PKCS7Padding);
+BENCHMARK_KM_CIPHER_ALL_AES_KEYS(AES/GCM/NoPadding);
+
+// Triple DES
+BENCHMARK_KM_CIPHER_ALL_MSGS(DESede/CBC/NoPadding, 168);
+BENCHMARK_KM_CIPHER_ALL_MSGS(DESede/CBC/PKCS7Padding, 168);
+BENCHMARK_KM_CIPHER_ALL_MSGS(DESede/ECB/NoPadding, 168);
+BENCHMARK_KM_CIPHER_ALL_MSGS(DESede/ECB/PKCS7Padding, 168);
+
+#define BENCHMARK_KM_CIPHER_ALL_RSA_KEYS(transform, msgSize) \
+ BENCHMARK_KM_CIPHER(transform, 2048, msgSize) \
+ BENCHMARK_KM_CIPHER(transform, 3072, msgSize) \
+ BENCHMARK_KM_CIPHER(transform, 4096, msgSize)
+
+BENCHMARK_KM_CIPHER_ALL_RSA_KEYS(RSA/ECB/NoPadding, SMALL_MESSAGE_SIZE);
+BENCHMARK_KM_CIPHER_ALL_RSA_KEYS(RSA/ECB/PKCS1Padding, SMALL_MESSAGE_SIZE);
+BENCHMARK_KM_CIPHER_ALL_RSA_KEYS(RSA/ECB/OAEPPadding, SMALL_MESSAGE_SIZE);
+// clang-format on
+
+} // namespace test
+} // namespace V4_0
+} // namespace keymaster
+} // namespace hardware
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::benchmark::Initialize(&argc, argv);
+ base::CommandLine::Init(argc, argv);
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ auto service_name = command_line->GetSwitchValueASCII("service_name");
+ if (service_name.empty()) {
+ service_name = "default";
+ }
+ android::hardware::keymaster::V4_0::test::keymaster =
+ android::hardware::keymaster::V4_0::test::KeymasterWrapper::newInstance(service_name);
+ if (!android::hardware::keymaster::V4_0::test::keymaster) {
+ return 1;
+ }
+ ::benchmark::RunSpecifiedBenchmarks();
+}
\ No newline at end of file
diff --git a/keymaster/4.0/vts/performance/README b/keymaster/4.0/vts/performance/README
new file mode 100644
index 0000000..57d984a
--- /dev/null
+++ b/keymaster/4.0/vts/performance/README
@@ -0,0 +1,19 @@
+# Keymaster Benchmark
+
+The Keymaster Benchmark is a standalone tool for measuring the performance of keymaster implementations.
+
+## Building
+
+Build:
+`m keymaster_benchmark`
+
+Transfer to device/emulator:
+`adb sync data`
+
+The benchmark executable should will be located at `data/benchmarktest/keymaster_benchmark/keymaster_benchmark` on the device.
+
+## Usage
+
+Keymaster Benchmark is built on [Google microbenchmark library](https://github.com/google/benchmark).
+All of the commandline arguments provided by the microbenchmark library are valid, such as `--benchmark_filter=<regex>` or `benchmark_out_format={json|console|csv}`.
+In addition to the command line arguments provided by microbenchmark, `--service_name=<service_name>` is provided allow specification of the keymaster service name, e.g. specify `--service_name=strongbox` to benchmark strongbox.
diff --git a/keymaster/4.1/Android.bp b/keymaster/4.1/Android.bp
index eaa7e41..3b505d8 100644
--- a/keymaster/4.1/Android.bp
+++ b/keymaster/4.1/Android.bp
@@ -12,6 +12,7 @@
"IOperation.hal",
],
interfaces: [
+ "android.hardware.keymaster@3.0",
"android.hardware.keymaster@4.0",
"android.hidl.base@1.0",
],
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 3d78fb6..da7ba78 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -125,7 +125,7 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
- * the scale and zeroPoint must be same as input0.
+ * the scale and zeroPoint must be the same as input0.
*/
MEAN = 31,
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index 837ced5..b111d96 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -1954,7 +1954,7 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
- * the scale and zeroPoint must be same as input0.
+ * the scale and zeroPoint must be the same as input0.
*/
MEAN = @1.1::OperationType:MEAN,
@@ -2448,15 +2448,17 @@
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
- * this scalar must be of type {@link OperandType::FLOAT16}.
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@@ -4124,7 +4126,6 @@
* * 0: A tensor of the same type and shape as input1 and input2.
* For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
* the scale and zeroPoint can be different from inputs' scale and zeroPoint.
- *
*/
SELECT = 84,
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index aacb385..c1bf494 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -58,8 +58,20 @@
using V1_1::ExecutionPreference;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+struct TestConfig {
+ Executor executor;
+ MeasureTiming measureTiming;
+ OutputType outputType;
+};
+
+} // namespace
+
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@@ -194,31 +206,31 @@
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
-enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- Executor executor, MeasureTiming measure, OutputType outputType) {
+ const TestConfig& testConfig) {
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
- if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT &&
+ !isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
- if (outputType == OutputType::INSUFFICIENT) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
- switch (executor) {
+ switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@@ -234,8 +246,8 @@
SCOPED_TRACE("synchronous");
// execute
- Return<ErrorStatus> executionReturnStatus =
- ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@@ -258,14 +270,14 @@
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
- controller->compute(request, measure, keys);
+ controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
- if (outputType != OutputType::FULLY_SPECIFIED &&
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
@@ -274,7 +286,7 @@
<< std::endl;
GTEST_SKIP();
}
- if (measure == MeasureTiming::NO) {
+ if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@@ -283,7 +295,7 @@
}
}
- switch (outputType) {
+ switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@@ -321,44 +333,29 @@
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
bool testDynamicOutputShape) {
+ std::initializer_list<OutputType> outputTypesList;
+ std::initializer_list<MeasureTiming> measureTimingList;
+ std::initializer_list<Executor> executorList;
+
if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
+ outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
} else {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
+ outputTypesList = {OutputType::FULLY_SPECIFIED};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ }
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig = {.executor = executor,
+ .measureTiming = measureTiming,
+ .outputType = outputType};
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
+ }
}
}
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
index 86ab287..84c4813 100644
--- a/neuralnetworks/1.3/types.hal
+++ b/neuralnetworks/1.3/types.hal
@@ -73,6 +73,4544 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+ /**
+ * Adds two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the sum of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its
+ * way forward.
+ *
+ * Example:
+ *
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The sum, a tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ ADD = @1.2::OperationType:ADD,
+
+ /**
+ * Performs a 2-D average pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj}(
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * ) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ AVERAGE_POOL_2D = @1.2::OperationType:AVERAGE_POOL_2D,
+
+ /**
+ * Concatenates the input tensors along the given dimension.
+ *
+ * The input tensors must have identical {@link OperandType} and the same
+ * dimensions except the dimension along the concatenation axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the input section)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0 ~ n-1: The list of n input tensors, of shape
+ * [D0, D1, ..., Daxis(i), ..., Dm].
+ * Before HAL version 1.2, all input tensors of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * must have the same scale and zeroPoint as the output tensor.
+ * Input tensors of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}
+ * are allowed to have different scale and zeroPoint.
+ * Since HAL version 1.2, zero-sized tensors are supported.
+ * * n: An {@link OperandType::INT32} scalar, specifying the
+ * concatenation axis.
+ *
+ * Outputs:
+ * * 0: The output, a tensor of the same {@link OperandType} as the input
+ * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
+ * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint values can be different from
+ * input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint values can be different from input tensors.
+ */
+ CONCATENATION = @1.2::OperationType:CONCATENATION,
+
+ /**
+ * Performs a 2-D convolution operation.
+ *
+ * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
+ * batch of images, applying the filter to each window of each image of the
+ * appropriate size.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * sum_{di, dj, k} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[channel, di, dj, k]
+ * ) + bias[channel]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 12 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 11 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 9 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 8 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied: output_scale > input_scale * filter_scale
+ */
+ CONV_2D = @1.2::OperationType:CONV_2D,
+
+ /**
+ * Performs a depthwise 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [1, filter_height, filter_width, depth_out]
+ * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
+ * applies a different filter to each input channel (expanding from 1
+ * channel to channel_multiplier channels for each), then concatenates the
+ * results together.
+ *
+ * The output has depth_out = depth_in * depth_multiplier channels.
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, k * channel_multiplier + q] =
+ * sum_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ * filter[1, di, dj, k * channel_multiplier + q]
+ * ) + bias[k * channel_multiplier + q]
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Available since HAL version 1.2:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 3.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 13 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 12 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
+ * specifying the filter.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise
+ * multiplier.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on width dimension. If this input is set,
+ * input 10 (dilation factor for height) must be specified as well.
+ * Available since HAL version 1.2.
+ * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
+ * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
+ * cells between each filter element on height dimension. If this input is set,
+ * input 9 (dilation factor for width) must be specified as well.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
+ */
+ DEPTHWISE_CONV_2D = @1.2::OperationType:DEPTHWISE_CONV_2D,
+
+ /**
+ * Rearranges data from depth into blocks of spatial data.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the depth dimension are moved in spatial blocks to the height
+ * and width dimensions. The value block_size indicates the input block size
+ * and how the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The width of the output tensor is input_depth * block_size, whereas the
+ * height is input_height * block_size. The depth of the input tensor must
+ * be divisible by block_size * block_size
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size * block_size must be a divisor
+ * of the input depth.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batch, height*block_size,
+ * width*block_size, depth/(block_size*block_size)].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ DEPTH_TO_SPACE = @1.2::OperationType:DEPTH_TO_SPACE,
+
+ /**
+ * Dequantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = (input - zeroPoint) * scale.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
+ *
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}.
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ DEQUANTIZE = @1.2::OperationType:DEQUANTIZE,
+
+ /**
+ * Looks up sub-tensors in the input tensor.
+ *
+ * This operator takes for input a tensor of values (Values) and
+ * a one-dimensional tensor of selection indices (Lookups).
+ * The output tensor is the concatenation of sub-tensors of Values as
+ * selected by Lookups.
+ *
+ * Think of Values as being sliced along its first dimension:
+ * The entries in Lookups select which slices are concatenated together
+ * to create the output tensor.
+ *
+ * For example, if Values has shape of [40, 200, 300] and
+ * Lookups has shape of [3], all three values found in Lookups are
+ * expected to be between 0 and 39. The resulting tensor must
+ * have shape of [3, 200, 300].
+ *
+ * If a value in Lookups is out of bounds, the operation must fail
+ * and an error must be reported.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The values are indices into the first dimension of Values.
+ * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
+ * extracted.
+ *
+ * Output:
+ * * 0: A n-D tensor with the same rank and shape as the Values
+ * tensor, except for the first dimension which has the same size
+ * as Lookups' only dimension.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input1.
+ */
+ EMBEDDING_LOOKUP = @1.2::OperationType:EMBEDDING_LOOKUP,
+
+ /**
+ * Computes element-wise floor() on the input tensor.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor, of the same {@link OperandType} and dimensions as
+ * the input tensor.
+ */
+ FLOOR = @1.2::OperationType:FLOOR,
+
+ /**
+ * Denotes a fully (densely) connected layer, which connects all elements
+ * in the input tensor with each element in the output tensor.
+ *
+ * This layer implements the operation:
+ *
+ * outputs = activation(inputs * weights’ + bias)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor of at least rank 2, specifying the input. If rank is
+ * greater than 2, then it gets flattened to a 2-D Tensor. The
+ * (flattened) 2-D Tensor is reshaped (if necessary) to
+ * [batch_size, input_size], where "input_size" corresponds to the
+ * number of inputs to the layer, matching the second dimension of
+ * weights, and "batch_size" is calculated by dividing the number of
+ * elements by "input_size".
+ * Since HAL version 1.2, zero batch_size is supported for this tensor.
+ * * 1: A 2-D tensor, specifying the weights, of shape
+ * [num_units, input_size], where "num_units" corresponds to the number
+ * of output nodes.
+ * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
+ * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
+ * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale.
+ * * 3: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
+ */
+ FULLY_CONNECTED = @1.2::OperationType:FULLY_CONNECTED,
+
+ /**
+ * Looks up sub-tensors in the input tensor using a key-value map.
+ *
+ * This operator takes for input a tensor of values (Values),
+ * a one-dimensional tensor of selection values (Lookups) and
+ * a one-dimensional tensor that maps these values to Values
+ * indexes. The output tensor is the concatenation of sub-tensors of
+ * Values as selected by Lookups via Keys.
+ *
+ * Think of Values as being sliced along its outer-most dimension.
+ * The output is a concatenation of selected slices, with one slice
+ * for each entry of Lookups. The slice selected is the one at the
+ * same index as the Maps entry that matches the value in Lookups.
+ *
+ * For a hit, the corresponding sub-tensor of Values is included
+ * in the Output tensor. For a miss, the corresponding sub-tensor in
+ * Output must have zero values.
+ *
+ * For example, if Values has shape of [40, 200, 300],
+ * Keys should have a shape of [40]. If Lookups tensor has shape
+ * of [3], three slices are being concatenated, so the resulting tensor
+ * must have the shape of [3, 200, 300]. If the first entry in Lookups
+ * has the value 123456, that value must be located in Keys tensor.
+ * If the sixth entry of Keys contains 123456, the sixth slice of Values
+ * must be selected. If no entry in Keys has 123456, a slice of zeroes
+ * must be concatenated.
+ *
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
+ * Inputs:
+ * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
+ * shape [ k ].
+ * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [ n ]; Keys and Values pair represent a map, i.e., the ith element
+ * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
+ * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
+ * ascending order.
+ * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
+ * must be n.
+ *
+ * Outputs:
+ * * 0: Output. A tensor with shape [ k …].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input2.
+ * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
+ * hits (True) or not (False).
+ * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
+ * and scale 1.0f.
+ * A non-zero byte represents True, a hit. A zero indicates otherwise.
+ */
+ HASHTABLE_LOOKUP = @1.2::OperationType:HASHTABLE_LOOKUP,
+
+ /**
+ * Applies L2 normalization along the depth dimension.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[batch, row, col, channel] =
+ * input[batch, row, col, channel] /
+ * sqrt(sum_{c} pow(input[batch, row, col, c], 2))
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along dimension dim.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ */
+ L2_NORMALIZATION = @1.2::OperationType:L2_NORMALIZATION,
+
+ /**
+ * Performs an 2-D L2 pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, c] =
+ * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
+ * sum(1))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ */
+ L2_POOL_2D = @1.2::OperationType:L2_POOL_2D,
+
+ /**
+ * Applies Local Response Normalization along the depth dimension.
+ *
+ * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
+ * last dimension), and each vector is normalized independently. Within a
+ * given vector, each component is divided by the weighted, squared sum of
+ * inputs within depth_radius.
+ *
+ * The output is calculated using this formula:
+ *
+ * sqr_sum[a, b, c, d] = sum(
+ * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
+ * output = input / pow((bias + alpha * sqr_sum), beta)
+ *
+ * For input tensor with rank less than 4, independently normalizes each
+ * 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the radius of
+ * the normalization window.
+ * * 2: A scalar, specifying the bias, must not be zero.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 3: A scalar, specifying the scale factor, alpha.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * alpha value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * alpha value must be of {@link OperandType::FLOAT32}.
+ * * 4: A scalar, specifying the exponent, beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 5: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension normalization would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOCAL_RESPONSE_NORMALIZATION = @1.2::OperationType:LOCAL_RESPONSE_NORMALIZATION,
+
+ /**
+ * Computes sigmoid activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = 1 / (1 + exp(-input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the scale must be 1.f / 256 and the zeroPoint must be -128.
+ */
+ LOGISTIC = @1.2::OperationType:LOGISTIC,
+
+ /**
+ * Projects an input to a bit vector via locality senstive hashing.
+ *
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported input tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: Hash functions. Dim.size == 2, DataType: Float.
+ * Tensor[0].Dim[0]: Number of hash functions.
+ * Tensor[0].Dim[1]: Number of projected output bits generated by each
+ * hash function.
+ * If the projection type is Sparse:
+ * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
+ *
+ * * 1: Input. Dim.size >= 1, no restriction on DataType.
+ * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
+ * If not set, each input element is considered to have the same weight
+ * of 1.0.
+ * Tensor[1].Dim[0] == Tensor[2].Dim[0]
+ * * 3: Type:
+ * Sparse:
+ * Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
+ * Computed bit vector is considered to be sparse.
+ * Each output element is an int32 made up of multiple bits
+ * computed from hash functions.
+ *
+ * NOTE: To avoid collisions across hash functions, an offset value
+ * of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
+ * where k is the index of the hash function.
+ *
+ * Value LSHProjectionType_SPARSE_DEPRECATED(=1).
+ * Legacy behavior that does not include the offset value.
+ *
+ * Dense:
+ * Value LSHProjectionType_DENSE(=2).
+ * Computed bit vector is considered to be dense. Each output
+ * element represents a bit and can take the value of either
+ * 0 or 1.
+ *
+ * Outputs:
+ * * 0: If the projection type is Sparse:
+ * Output.Dim == { Tensor[0].Dim[0] }
+ * A tensor of int32 that represents hash signatures.
+ *
+ * If the projection type is Dense:
+ * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+ * A flattened tensor that represents projected bit vectors.
+ * The offset value for sparse projections was added in HAL version 1.2.
+ */
+ LSH_PROJECTION = @1.2::OperationType:LSH_PROJECTION,
+
+ /**
+ * Performs a single time step in a Long Short-Term Memory (LSTM) layer
+ *
+ * The LSTM operation is described by the following equations.
+ *
+ * \f{eqnarray*}{
+ * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
+ * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
+ * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
+ * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
+ * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
+ * & & \\
+ * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
+ * & if\ there\ is\ a\ projection; \\
+ * h_t =& & \\
+ * & o_t \odot g(C_t) & otherwise. \\
+ * \f}
+ * Where:
+ * * \f$x_t\f$ is the input,
+ * * \f$i_t\f$ is the input gate,
+ * * \f$f_t\f$ is the forget gate,
+ * * \f$C_t\f$ is the cell state,
+ * * \f$o_t\f$ is the output,
+ * * \f$h_t\f$ is the output state,
+ * * \f$\sigma\f$ is the logistic sigmoid function,
+ * * \f$g\f$ is the cell input and cell output activation function, usually
+ * \f$tahn\f$,
+ * * \f$W_{xi}\f$ is the input-to-input weight matrix,
+ * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
+ * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
+ * * \f$b_i\f$ is the input gate bias,
+ * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
+ * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
+ * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
+ * * \f$b_f\f$ is the forget gate bias,
+ * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
+ * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
+ * * \f$b_c\f$ is the cell bias,
+ * * \f$W_{xo}\f$ is the input-to-output weight matrix,
+ * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
+ * * \f$W_{co}\f$ is the cell-to-output weight matrix,
+ * * \f$b_o\f$ is the output gate bias,
+ * * \f$W_{proj}\f$ is the projection weight matrix,
+ * * \f$b_{proj}\f$ is the projection bias,
+ * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
+ * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
+ * * \f$\odot\f$ is the
+ * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
+ * Hadamard product</a> that takes two matrices and produces another
+ * matrix, each element of which is the product of the corresponding
+ * elements of the input matrices.
+ *
+ * Since HAL version 1.2 LSTM supports layer normalization.
+ * In case layer normalization is used, the inputs to internal activation
+ * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
+ * following an approach from section 3.1 from
+ * https://arxiv.org/pdf/1607.06450.pdf
+ *
+ * The operation has the following independently optional inputs:
+ * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
+ * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
+ * have values or neither of them have values (i.e., all set to null). If
+ * they have values, the peephole optimization is used.
+ * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
+ * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
+ * or none of them have values. If they have no values, coupling of input
+ * and forget gates (CIFG) is used, in which case the input gate
+ * (\f$i_t\f$) is calculated using the following equation instead.
+ * \f{eqnarray*}{
+ * i_t = 1 - f_t
+ * \f}
+ * In case peephole optimization is used and CIFG is not used
+ * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
+ * cell-to-input weights must have no value.
+ * * The projection weights (\f$W_{proj}\f$) is required only for the
+ * recurrent projection layer, and should otherwise have no value.
+ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
+ * value if the recurrent projection layer exists, and should otherwise
+ * have no value.
+ * * (HAL version 1.2 or later) The four layer normalization weights either all have
+ * values or none of them have values. Additionally, if CIFG is used,
+ * input layer normalization weights tensor is omitted and the other layer
+ * normalization weights either all have values or none of them have
+ * values. Layer normalization is used when the values of all the layer
+ * normalization weights are present.
+ *
+ * References:
+ *
+ * The default non-peephole non-CIFG implementation is based on:
+ * http://www.bioinf.jku.at/publications/older/2604.pdf
+ * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
+ * Computation, 9(8):1735-1780, 1997.
+ *
+ * The peephole implementation and projection layer is based on:
+ * https://research.google.com/pubs/archive/43905.pdf
+ * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
+ * recurrent neural network architectures for large scale acoustic
+ * modeling." INTERSPEECH, 2014.
+ * (However, the concept of peephole optimization was introduced in work
+ * prior to this paper.)
+ *
+ * The coupling of input and forget gate (CIFG) is based on:
+ * http://arxiv.org/pdf/1503.04069.pdf
+ * Greff et al. "LSTM: A Search Space Odyssey"
+ *
+ * The layer normalization is based on:
+ * https://arxiv.org/pdf/1607.06450.pdf
+ * Jimmy Ba et al. "Layer Normalization"
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
+ * tensors have type {@link OperandType::TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * Since HAL version 1.2 there are additional inputs to this op:
+ * * 23:The input layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 24:The forget layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 25:The cell layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 26:The output layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The scratch buffer.
+ * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
+ * [batch_size, num_units * 4] without CIFG.
+ * * 1: The output state (out) (\f$h_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 2: The cell state (out) (\f$C_t\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 3: The output (\f$o_t\f$).
+ * A 2-D tensor of shape [batch_size, output_size]. This is effectively
+ * the same as the current “output state (out)” value.
+ */
+ LSTM = @1.2::OperationType:LSTM,
+
+ /**
+ * Performs an 2-D max pooling operation.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, channel] =
+ * max_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * )
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the filter
+ * width.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the filter
+ * height.
+ * * 6: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 7: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ MAX_POOL_2D = @1.2::OperationType:MAX_POOL_2D,
+
+ /**
+ * Multiplies two tensors, element-wise.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the product of both input tensors, optionally
+ * modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the resulting output is the maximum size along each dimension
+ * of the input operands. It starts with the trailing dimensions, and works
+ * its way forward.
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: The product, a tensor of the same {@link OperandType} as input0.
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the following condition must be satisfied:
+ * output_scale > input1_scale * input2_scale.
+ */
+ MUL = @1.2::OperationType:MUL,
+
+ /**
+ * Computes rectified linear activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = max(0, input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU = @1.2::OperationType:RELU,
+
+ /**
+ * Computes rectified linear 1 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(1.f, max(-1.f, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU1 = @1.2::OperationType:RELU1,
+
+ /**
+ * Computes rectified linear 6 activation on the input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = min(6, max(0, input))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RELU6 = @1.2::OperationType:RELU6,
+
+ /**
+ * Reshapes a tensor.
+ *
+ * Given tensor, this operation returns a tensor that has the same values as
+ * tensor, but with a newly specified shape.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the tensor to be reshaped.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the
+ * shape of the output tensor. The number of elements implied by shape
+ * must be the same as the number of elements in the input tensor.
+ *
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component
+ * of shape can be -1.
+ *
+ * Outputs:
+ * * 0: The output tensor, of shape specified by the input shape.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESHAPE = @1.2::OperationType:RESHAPE,
+
+ /**
+ * Resizes images to given size using the bilinear interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Inputs (resizing by scale, since HAL version 1.2):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_BILINEAR = @1.2::OperationType:RESIZE_BILINEAR,
+
+ /**
+ * A basic recurrent neural network layer.
+ *
+ * This layer implements the operation:
+ * outputs = state = activation(inputs * input_weights +
+ * state * recurrent_weights + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
+ * * 3: bias.
+ * A 1-D tensor of shape [num_units].
+ * * 4: hidden state (in).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 5: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: hidden state (out).
+ * A 2-D tensor of shape [batch_size, num_units].
+ *
+ * * 1: output.
+ * A 2-D tensor of shape [batch_size, num_units]. This is effectively
+ * the same as the current state value.
+ */
+ RNN = @1.2::OperationType:RNN,
+
+ /**
+ * Computes the softmax activation on the input tensor element-wise, per
+ * batch, by normalizing the input vector so the maximum coefficient is
+ * zero.
+ *
+ * The output is calculated using this formula:
+ *
+ * output[batch, i] =
+ * exp((input[batch, i] - max(input[batch, :])) * beta) /
+ * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
+ *
+ * For input tensor with rank other than 2, the activation will be applied
+ * independently on each 1-D slice along specified dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4.
+ * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
+ * {@link OperandType::FLOAT32}.
+ * If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
+ * scalar must be of {@link OperandType::FLOAT16}.
+ * * 2: An optional {@link OperandType::INT32} scalar, default to -1,
+ * specifying the dimension the activation would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 256 and the zeroPoint must be 0.
+ */
+ SOFTMAX = @1.2::OperationType:SOFTMAX,
+
+ /**
+ * Rearranges blocks of spatial data, into depth.
+ *
+ * More specifically, this op outputs a copy of the input tensor where
+ * values from the height and width dimensions are moved to the depth
+ * dimension. The value block_size indicates the input block size and how
+ * the data is moved.
+ *
+ * Chunks of data of size block_size * block_size from depth are rearranged
+ * into non-overlapping blocks of size block_size x block_size.
+ *
+ * The depth of the output tensor is input_depth * block_size * block_size.
+ * The input tensor's height and width must be divisible by block_size.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the block_size.
+ * block_size must be >=1 and block_size must be a divisor of both the
+ * input height and width.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape [batches, height/block_size,
+ * width/block_size, depth_in*block_size*block_size].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPACE_TO_DEPTH = @1.2::OperationType:SPACE_TO_DEPTH,
+
+ /**
+ * SVDF op is a kind of stateful layer derived from the notion that a
+ * densely connected layer that's processing a sequence of input frames can
+ * be approximated by using a singular value decomposition of each of its
+ * nodes. The implementation is based on:
+ *
+ * https://research.google.com/pubs/archive/43813.pdf
+ *
+ * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
+ * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
+ * INTERSPEECH, 2015.
+ *
+ * It processes the incoming input using a 2-stage filtering mechanism:
+ * * stage 1 performs filtering on the "features" dimension, whose outputs
+ * get pushed into a memory of fixed-size memory_size.
+ * * stage 2 performs filtering on the "time" dimension of the memory_size
+ * memoized outputs of stage 1.
+ *
+ * Specifically, for rank 1, this layer implements the operation:
+ *
+ * memory = push(conv1d(inputs, weights_feature, feature_dim,
+ * "PADDING_VALID"));
+ * outputs = activation(memory * weights_time + bias);
+ *
+ * Where:
+ * * “weights_feature” is a weights matrix that processes the inputs (by
+ * convolving the input with every “feature filter”), and whose outputs
+ * get pushed, stacked in order, into the fixed-size “memory” (the oldest
+ * entry gets dropped);
+ * * “weights_time” is a weights matrix that processes the “memory” (by a
+ * batched matrix multiplication on the num_units);
+ * * “bias” is an optional bias vector (added to each output vector in the
+ * batch); and
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Each rank adds a dimension to the weights matrices by means of stacking
+ * the filters.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * All input tensors must be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 1: weights_feature.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
+ * * 2: weights_time.
+ * A 2-D tensor of shape [num_units, memory_size], where “memory_size”
+ * corresponds to the fixed-size of the memory.
+ * * 3: bias.
+ * An optional 1-D tensor of shape [num_units].
+ * * 4: state (in).
+ * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
+ * * 5: rank.
+ * The rank of the SVD approximation.
+ * * 6: fused_activation_function.
+ * An optional {@link FusedActivationFunc} value indicating the
+ * activation function. If “NONE” is specified then it results in a
+ * linear activation.
+ *
+ * Outputs:
+ * * 0: state (out).
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, (memory_size - 1) * num_units * rank].
+ * * 1: output.
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
+ * [batch_size, num_units].
+ */
+ SVDF = @1.2::OperationType:SVDF,
+
+ /**
+ * Computes hyperbolic tangent of input tensor element-wise.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = tanh(input)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4.
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+ * the scale must be 1.f / 128 and the zeroPoint must be 0.
+ */
+ TANH = @1.2::OperationType:TANH,
+
+ /**
+ * BatchToSpace for N-dimensional tensors.
+ *
+ * This operation reshapes the batch dimension (dimension 0) into M + 1
+ * dimensions of shape block_shape + [batch], interleaves these blocks back
+ * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
+ * result with the same rank as the input.
+ *
+ * This is the reverse of SpaceToBatch.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be reshaped
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ BATCH_TO_SPACE_ND = @1.2::OperationType:BATCH_TO_SPACE_ND,
+
+ /**
+ * Element-wise division of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of dividing the first input tensor
+ * by the second, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ DIV = @1.2::OperationType:DIV,
+
+ /**
+ * Computes the mean of elements across dimensions of a tensor.
+ *
+ * Reduces the input tensor along the given dimensions to reduce. Unless
+ * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
+ * in axis. If keep_dims is true, the reduced dimensions are retained with
+ * length 1.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Must be in the range
+ * [-rank(input_tensor), rank(input_tensor)).
+ *
+ * NOTE: When the operation was introduced, the documentation
+ * incorrectly stated that if dimensions were empty, the operation
+ * would reduce across all dimensions. This behavior was never
+ * implemented.
+ *
+ * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ MEAN = @1.2::OperationType:MEAN,
+
+ /**
+ * Pads a tensor.
+ *
+ * This operation pads a tensor according to the specified paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after the
+ * end of dimension i.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ PAD = @1.2::OperationType:PAD,
+
+ /**
+ * SpaceToBatch for N-Dimensional tensors.
+ *
+ * This operation divides "spatial" dimensions [1, ..., M] of the input into
+ * a grid of blocks of shape block_shape, and interleaves these blocks with
+ * the "batch" dimension (0) such that in the output, the spatial dimensions
+ * [1, ..., M] correspond to the position within the grid, and the batch
+ * dimension combines both the position within a spatial block and the
+ * original batch position. Prior to division into blocks, the spatial
+ * dimensions of the input are optionally zero padded according to paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the input.
+ * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
+ * sizes for each spatial dimension of the input tensor. All values
+ * must be >= 1.
+ * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. All values must be
+ * >= 0. The shape of the tensor must be {M, 2}, where M is the number
+ * of spatial dimensions.
+ * padding[i, 0] specifies the number of element to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of element to be padded after the
+ * end of dimension i.
+ * * 3: An optional {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since HAL version 1.2.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
+ */
+ SPACE_TO_BATCH_ND = @1.2::OperationType:SPACE_TO_BATCH_ND,
+
+ /**
+ * Removes dimensions of size 1 from the shape of a tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of the same
+ * {@link OperandType} with all dimensions of size 1 removed. If you don't
+ * want to remove all size 1 dimensions, you can remove specific size 1
+ * dimensions by specifying the axes (input1).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, the tensor to be squeezed.
+ * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * dimensions to squeeze. If specified only squeezes the dimensions
+ * listed. Otherwise, squeezes all dimensions. The dimension index
+ * starts at 0. An error must be reported if squeezing a dimension that
+ * is not 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. Contains the
+ * same data as input, but has one or more dimensions of size 1
+ * removed.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SQUEEZE = @1.2::OperationType:SQUEEZE,
+
+ /**
+ * Extracts a strided slice of a tensor.
+ *
+ * Roughly speaking, this op extracts a slice of size (end - begin) / stride
+ * from the given input tensor. Starting at the location specified by begin
+ * the slice continues by adding stride to the index until all dimensions
+ * are not less than end. Note that a stride can be negative, which causes a
+ * reverse slice.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be sliced.
+ * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * starts of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0).
+ * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * ends of the dimensions of the input tensor to be sliced. The length
+ * must be of rank(input0).
+ * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
+ * strides of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0). The entries must be non-zero.
+ * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
+ * of begin_mask is set, begin[i] is ignored and the fullest possible
+ * range in that dimension is used instead.
+ * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
+ * end_mask is set, end[i] is ignored and the fullest possible range in
+ * that dimension is used instead.
+ * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
+ * ith bit of shrink_axis_mask is set, the ith dimension specification
+ * shrinks the dimensionality by 1, taking on the value at index
+ * begin[i]. In this case, the ith specification must define a
+ * slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
+ * where k is the number of bits set in shrink_axis_mask.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ STRIDED_SLICE = @1.2::OperationType:STRIDED_SLICE,
+
+ /**
+ * Element-wise subtraction of two tensors.
+ *
+ * Takes two input tensors of identical {@link OperandType} and compatible
+ * dimensions. The output is the result of subtracting the second input
+ * tensor from the first one, optionally modified by an activation function.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input1.dimension = {4, 1, 2}
+ * input2.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the first input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0.
+ * * 2: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ SUB = @1.2::OperationType:SUB,
+
+ /**
+ * Transposes the input tensor, permuting the dimensions according to the
+ * perm tensor.
+ *
+ * The returned tensor's dimension i corresponds to the input dimension
+ * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
+ * rank of the input tensor. Hence by default, this operation performs a
+ * regular matrix transpose on 2-D input Tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since HAL version 1.2, this tensor may be zero-sized.
+ * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
+ * the permutation of the dimensions of the input tensor.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TRANSPOSE = @1.2::OperationType:TRANSPOSE,
+
+ /**
+ * Computes the absolute value of a tensor, element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ ABS = @1.2::OperationType:ABS,
+
+ /**
+ * Returns the index of the largest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ // There is no underscore in ARG_MAX to avoid name conflict with
+ // the macro defined in libc/kernel/uapi/linux/limits.h.
+ ARGMAX = @1.2::OperationType:ARGMAX,
+
+ /**
+ * Returns the index of the smallest element along an axis.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
+ */
+ ARGMIN = @1.2::OperationType:ARGMIN, // See ARGMAX for naming discussion.
+
+ /**
+ * Transform axis-aligned bounding box proposals using bounding box deltas.
+ *
+ * Given the positions of bounding box proposals and the corresponding
+ * bounding box deltas for each class, return the refined bounding box
+ * regions. The resulting bounding boxes are cliped against the edges of
+ * the image.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT16_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
+ * bounding box proposals, each line with format [x1, y1, x2, y2].
+ * For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
+ * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
+ * bounding box delta for each region of interest and each class. The
+ * bounding box deltas are organized in the following order
+ * [dx, dy, dw, dh], where dx and dy is the relative correction factor
+ * for the center position of the bounding box with respect to the width
+ * and height, dw and dh is the log-scale relative correction factor
+ * for the width and height. For input0 of type
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be
+ * of {@link OperandType::TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
+ * each image in the batch, each line with format
+ * [image_height, image_width].
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_rois, num_classes * 4], specifying the coordinates of each
+ * output bounding box for each class, with format [x1, y1, x2, y2].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ AXIS_ALIGNED_BBOX_TRANSFORM = @1.2::OperationType:AXIS_ALIGNED_BBOX_TRANSFORM,
+
+ /**
+ * Performs a forward LSTM on the input followed by a backward LSTM.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ *
+ * Inputs:
+ * * 0: The input.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where "max_time" is the number of timesteps (sequence length),
+ * "batch_size" corresponds to the batching dimension, and
+ * "input_size" is the size of the input.
+ * * 1: The forward input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
+ * corresponds to the number of forward cell units.
+ * * 2: The forward input-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 3: The forward input-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 4: The forward input-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 5: The forward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the “fw_projection_weights”, if defined.
+ * * 6: The forward recurrent-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 7: The forward recurrent-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 8: The forward recurrent-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 9: The forward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 10: The forward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 11: The forward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 12: The forward input gate bias. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 13: The forward forget gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 14: The forward cell gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 15: The forward output gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 16: The forward projection weights. Optional.
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
+ * * 17: The forward projection bias. Optional.
+ * A 1-D tensor of shape [fw_output_size].
+ * * 18: The backward input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
+ * corresponds to the number of backward cell units.
+ * * 19: The backward input-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 20: The backward input-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 21: The backward input-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 22: The backward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
+ * corresponds to either the number of cell units (i.e., “bw_num_units”),
+ * or the second dimension of the “bw_projection_weights”, if defined.
+ * * 23: The backward recurrent-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 24: The backward recurrent-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 25: The backward recurrent-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 26: The backward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 27: The backward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 28: The backward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 29: The backward input gate bias. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 30: The backward forget gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 31: The backward cell gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 32: The backward output gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 33: The backward projection weights. Optional.
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
+ * * 34: The backward projection bias. Optional.
+ * A 1-D tensor of shape [bw_output_size].
+ * * 35: The forward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 36: The forward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 37: The backward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 38: The backward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 39: The auxiliary input. Optional.
+ * A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
+ * * 40: The forward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 41: The forward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 42: The forward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 43: The forward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 44: The backward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 45: The backward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 46: The backward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 47: The backward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 48: The activation function.
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 49: The clipping threshold for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
+ * * 50: The clipping threshold for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
+ * this scalar must be of the type {@link OperandType::FLOAT32},
+ * otherwise if all the input tensors have the type
+ * {@link OperandType::TENSOR_FLOAT16}, this scalar must be
+ * of type {@link OperandType::FLOAT16}.
+ * * 51: merge_outputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells should be merged.
+ * * 52: time_major
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The forward output.
+ * A 3-D tensor of shape:
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
+ * * 1: The backward output. Unused if merge_outputs is true.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
+ */
+ BIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs in forward and backward directions.
+ *
+ * This Op unrolls the input along the sequence dimension, and implements
+ * the following operation for each element in the sequence s =
+ * 1...sequence_length:
+ * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
+ * fw_state * fw_recurrent_weights’ + fw_bias)
+ *
+ * And for each element in sequence t = sequence_length : 1
+ * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
+ * bw_state * bw_recurrent_weights’ + bw_bias)
+ *
+ * Where:
+ * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
+ * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
+ * current “state” which itself is the output from the previous time step
+ * computation;
+ * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
+ * batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * The op also supports an auxiliary input. Regular cell feeds one input
+ * into the two RNN cells in the following way:
+ *
+ * INPUT (INPUT_REVERSED)
+ * | |
+ * ---------------------
+ * | FW_RNN BW_RNN |
+ * ---------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * An op with an auxiliary input takes two inputs and feeds them into the
+ * RNN cells in the following way:
+ *
+ * AUX_INPUT (AUX_INPUT_REVERSED)
+ * | |
+ * INPUT | (INPUT_R'D.)|
+ * | | | |
+ * -----------------------
+ * | \ / \ / |
+ * | FW_RNN BW_RNN |
+ * -----------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * While stacking this op on top of itself, this allows to connect both
+ * forward and backward outputs from previous cell to the next cell's
+ * inputs.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to true, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: fwWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 2: fwRecurrentWeights.
+ * A 2-D tensor of shape [fwNumUnits, fwNumUnits].
+ * * 3: fwBias.
+ * A 1-D tensor of shape [fwNumUnits].
+ * * 4: fwHiddenState.
+ * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: bwWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 6: bwRecurrentWeights.
+ * A 2-D tensor of shape [bwNumUnits, bwNumUnits].
+ * * 7: bwBias.
+ * A 1-D tensor of shape [bwNumUnits].
+ * * 8: bwHiddenState
+ * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 9: auxInput.
+ * A 3-D tensor. The shape is the same as of the input 0.
+ * * 10:fwAuxWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 11:bwAuxWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 12:fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 13:timeMajor
+ * An {@link OperandType::BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 14:mergeOutputs
+ * An {@link OperandType::BOOL} scalar specifying if the outputs
+ * from forward and backward cells are separate (if set to false) or
+ * concatenated (if set to true).
+ * Outputs:
+ * * 0: fwOutput.
+ * A 3-D tensor. The first two dimensions of the shape are defined by
+ * the input 6 (timeMajor) and the third dimension is defined by the
+ * input 14 (mergeOutputs). If timeMajor is set to true, then the first
+ * two dimensions are [maxTime, batchSize], otherwise they are set to
+ * [batchSize, maxTime]. If mergeOutputs is set to true, then the third
+ * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
+ * to fwNumUnits.
+ * * 1: bwOutput.
+ * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
+ * this tensor is not produced. The shape is defined by the input 6
+ * (timeMajor). If it is set to true, then the shape is set to
+ * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
+ * [batchSize, maxTime, bwNumUnits].
+ */
+ BIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:BIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Greedily selects a subset of bounding boxes in descending order of score.
+ *
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+ * of each bounding box proposal. The boxes are grouped by batches in the
+ * first dimension. Zero num_rois is supported for this tensor.
+ * * 1: A 2-D Tensor specifying the bounding boxes of shape
+ * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+ * The boxes are grouped by batches in the first dimension. The sequential
+ * order of the boxes corresponds with input0. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
+ * scale of 0.125. Zero num_rois is supported for this tensor.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes
+ * with scores lower than the threshold are filtered before sending
+ * to the NMS algorithm.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of selected bounding boxes for each image. Set to a negative
+ * value for unlimited number of output bounding boxes.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
+ *
+ * Outputs:
+ * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape
+ * [num_output_rois], specifying the score of each output box. The boxes
+ * are grouped by batches, but the sequential order in each batch is not
+ * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the scale and zero point must be the same as input0.
+ * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape
+ * [num_output_rois, 4], specifying the coordinates of each
+ * output bounding box with the same format as input1. The sequential
+ * order of the boxes corresponds with output0. For type of
+ * {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be
+ * 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the class of each output box. The
+ * sequential order of the boxes corresponds with output0.
+ * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ BOX_WITH_NMS_LIMIT = @1.2::OperationType:BOX_WITH_NMS_LIMIT,
+
+ /**
+ * Casts a tensor to a new type.
+ *
+ * This operation ignores the scale and zeroPoint of quanized tensors,
+ * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input
+ * as a tensor of uint8 values.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ */
+ CAST = @1.2::OperationType:CAST,
+
+ /**
+ * Shuffle the channels of the input tensor.
+ *
+ * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
+ * divide the channel dimension into num_groups groups, and reorganize the
+ * channels by grouping channels with the same index in each group.
+ *
+ * Along the channel dimension, the output is calculated using this formula:
+ *
+ * output_channel[k * num_groups + g] = input_channel[g * group_size + k]
+ *
+ * where group_size = num_channels / num_groups
+ *
+ * The number of channels must be divisible by num_groups.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be shuffled.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the dimension
+ * channel shuffle would be performed on. Negative index is used to
+ * specify axis from the end (e.g. -1 for the last axis). Must be in
+ * the range [-n, n).
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ CHANNEL_SHUFFLE = @1.2::OperationType:CHANNEL_SHUFFLE,
+
+ /**
+ * Apply postprocessing steps to bounding box detections.
+ *
+ * Bounding box detections are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
+ * the score of each anchor with each class. Class 0 for each
+ * [batches, num_anchors, 0] is background and will be ignored.
+ * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
+ * the first four values in length_box_encoding specifying the bounding
+ * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
+ * where dy and dx is the linear-scale relative correction factor for the
+ * center position of the bounding box with respect to the width and height,
+ * dh and dw is the log-scale relative correction factor for the width and
+ * height. All the entries in length_box_encoding beyond the first four
+ * values are ignored in this operation.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
+ * ctr_x are the center position of the box, and h and w are the height
+ * and the width.
+ * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dy in bounding box deltas.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dx in bounding box deltas.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dh in bounding box deltas.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling
+ * factor for dw in bounding box deltas.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular
+ * multi-class NMS algorithm that do NMS separately for each class,
+ * set to false for a faster algorithm that only do one single NMS
+ * using the highest class score..
+ * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying
+ * the maximum number of boxes for the output. Boxes with the lowest
+ * scores are discarded to meet the limit.
+ * * 9: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to false, specifying the maximum number of classes per detection.
+ * * 10: An {@link OperandType::INT32} scalar, only used when input7 is
+ * set to true, specifying the maximum number of detections when
+ * applying NMS algorithm for each single class.
+ * * 11: A scalar, score_threshold. Boxes with scores lower than the
+ * threshold are filtered before sending to the NMS algorithm. The
+ * scalar must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
+ * must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 13: An {@link OperandType::BOOL} scalar, set to true to include
+ * background class in the list of label map for the output, set
+ * to false to not include the background. When the background
+ * class is included, it has label 0 and the output classes start
+ * at 1 in the label map, otherwise, the output classes start at 0.
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape
+ * [batches, max_num_detections], specifying the score of each output
+ * detections.
+ * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
+ * coordinates of each output bounding box, with format
+ * [y1, x1, y2, x2].
+ * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [batches, max_num_detections], specifying the class label for each
+ * output detection.
+ * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
+ * specifying the number of valid output detections for each batch.
+ */
+ DETECTION_POSTPROCESSING = @1.2::OperationType:DETECTION_POSTPROCESSING,
+
+ /**
+ * For input tensors x and y, computes x == y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ EQUAL = @1.2::OperationType:EQUAL,
+
+ /**
+ * Computes exponential of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ EXP = @1.2::OperationType:EXP,
+
+ /**
+ * Inserts a dimension of 1 into a tensor's shape.
+ *
+ * Given a tensor input, this operation inserts a dimension of 1 at the
+ * given dimension index of input's shape. The dimension index starts at
+ * zero; if you specify a negative dimension index, it is counted backward
+ * from the end.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: An {@link OperandType::INT32} scalar specifying the dimension
+ * index to expand. Must be in the range [-(n + 1), (n + 1)).
+ *
+ * Outputs:
+ * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
+ * input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ EXPAND_DIMS = @1.2::OperationType:EXPAND_DIMS,
+
+ /**
+ * Gathers values along an axis.
+ *
+ * Produces an output tensor with shape
+ * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
+ * where:
+ * # Vector indices (output is rank(input0)).
+ * output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+ *
+ * # Higher rank indices (output is rank(input0) + rank(indices) - 1).
+ * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor from which to gather values.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis.
+ * Negative index is used to specify axis from the end
+ * (e.g. -1 for the last axis). Must be in the range [-n, n).
+ * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices.
+ * The values must be in the bounds of the corresponding dimensions
+ * of input0.
+ *
+ * Outputs:
+ * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ GATHER = @1.2::OperationType:GATHER,
+
+ /**
+ * Generate aixs-aligned bounding box proposals.
+ *
+ * Bounding box proposals are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor specifying the score of each anchor at each
+ * location. With "NHWC" data layout, the tensor shape is
+ * [batches, height, width, num_anchors]. With "NCHW" data layout,
+ * the tensor shape is [batches, num_anchors, height, width].
+ * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
+ * layout, the tensor shape is [batches, height, width, num_anchors * 4].
+ * With "NCHW" data layout, the tensor shape is
+ * [batches, num_anchors * 4, height, width]. The box deltas are encoded
+ * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
+ * relative correction factor for the center position of the bounding box
+ * with respect to the width and height, dw and dh is the log-scale
+ * relative correction factor for the width and height. The last
+ * dimensions is the channel dimension.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
+ * each image in the batch, with format [image_height, image_width].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, this
+ * tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with
+ * scale of 0.125.
+ * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes before going into the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the maximum
+ * number of boxes returning from the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU
+ * threshold for hard NMS.
+ * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with
+ * height or width lower than the absolute threshold are filtered out.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and input1. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, of shape
+ * [num_output_rois], specifying the score of each output box.
+ * The boxes are grouped by batches, but the sequential order in
+ * each batch is not guaranteed. For type of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scale and zero
+ * point must be the same as input0.
+ * * 1: A tensor of the same {@link OperandType} as input3, of shape
+ * [num_output_rois, 4], specifying the coordinates of each output
+ * bounding box for each class, with format [x1, y1, x2, y2].
+ * The sequential order of the boxes corresponds with output0.
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ */
+ GENERATE_PROPOSALS = @1.2::OperationType:GENERATE_PROPOSALS,
+
+ /**
+ * For input tensors x and y, computes x > y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER = @1.2::OperationType:GREATER,
+ /**
+ * For input tensors x and y, computes x >= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ GREATER_EQUAL = @1.2::OperationType:GREATER_EQUAL,
+
+ /**
+ * Performs a grouped 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
+ * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
+ * applies a group of different filters to each input channel group, then
+ * concatenates the results together.
+ *
+ * Specifically, the input channels are divided into num_groups groups, each with
+ * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
+ * filters are also divided into num_groups groups, i.e. depth_out is divisible
+ * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
+ * input channel group, and the result are concatenated together.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, g * channel_multiplier + q] =
+ * sum_{di, dj, dk} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj,
+ * g * depth_group + dk] *
+ * filter[g * channel_multiplier + q, di, dj, dk]
+ * ) + bias[channel]
+ *
+ * where channel_multiplier = depth_out / num_groups
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (channelDim at
+ * {@link SymmPerChannelQuantParams}) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, specifying the number of
+ groups.
+ * * 10: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the number of
+ * groups.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ GROUPED_CONV_2D = @1.2::OperationType:GROUPED_CONV_2D,
+
+ /**
+ * Localize the maximum keypoints from heatmaps.
+ *
+ * This operation approximates the accurate maximum keypoint scores and
+ * indices after bicubic upscaling by using Taylor expansion up to the
+ * quadratic term.
+ *
+ * The bounding box is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor of shape
+ * [num_boxes, heatmap_size, heatmap_size, num_keypoints],
+ * specifying the heatmaps, the height and width of heatmaps should
+ * be the same, and must be greater than or equal to 2.
+ * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
+ * each with format [x1, y1, x2, y2]. For input0 of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should
+ * be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint
+ * of 0 and scale of 0.125.
+ * * 2: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0, with shape
+ * [num_boxes, num_keypoints], specifying score of the keypoints.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 1: A tensor of the same {@link OperandType} as input1, with shape
+ * [num_boxes, num_keypoints, 2], specifying the location of
+ * the keypoints, the second dimension is organized as
+ * [keypoint_x, keypoint_y].
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ */
+ HEATMAP_MAX_KEYPOINT = @1.2::OperationType:HEATMAP_MAX_KEYPOINT,
+
+ /**
+ * Applies instance normalization to the input tensor.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, h, w, c] =
+ * (input[b, h, w, c] - mean[b, c]) * gamma /
+ * sqrt(var[b, c] + epsilon) + beta
+ *
+ * Where the mean and variance are computed across the spatial dimensions:
+ *
+ * mean[b, c] =
+ * sum_{h, w}(input[b, h, w, c]) / sum(1)
+ *
+ * var[b, c] =
+ * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: A scalar, specifying gamma, the scale applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 2: A scalar, specifying beta, the offset applied to the normalized
+ * tensor. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 3: A scalar, specifying epsilon, the small value added to variance to
+ * avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
+ * * 4: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
+ */
+ INSTANCE_NORMALIZATION = @1.2::OperationType:INSTANCE_NORMALIZATION,
+
+ /**
+ * For input tensors x and y, computes x < y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS = @1.2::OperationType:LESS,
+
+ /**
+ * For input tensors x and y, computes x <= y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LESS_EQUAL = @1.2::OperationType:LESS_EQUAL,
+
+ /**
+ * Computes natural logarithm of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOG = @1.2::OperationType:LOG,
+
+ /**
+ * Returns the truth value of x AND y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_AND = @1.2::OperationType:LOGICAL_AND,
+
+ /**
+ * Computes the truth value of NOT x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ LOGICAL_NOT = @1.2::OperationType:LOGICAL_NOT,
+
+ /**
+ * Returns the truth value of x OR y element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ LOGICAL_OR = @1.2::OperationType:LOGICAL_OR,
+
+ /**
+ * Computes the log softmax activations given logits.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor specifying the input logits.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
+ * value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 2: An {@link OperandType::INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: The output tensor of the same {@link OperandType} and shape as
+ * input0.
+ */
+ LOG_SOFTMAX = @1.2::OperationType:LOG_SOFTMAX,
+
+ /**
+ * Returns the element-wise maximum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MAXIMUM = @1.2::OperationType:MAXIMUM,
+
+ /**
+ * Returns the element-wise minimum of two tensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and compatible dimensions
+ * with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ MINIMUM = @1.2::OperationType:MINIMUM,
+
+ /**
+ * Computes numerical negative value element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ NEG = @1.2::OperationType:NEG,
+
+ /**
+ * For input tensors x and y, computes x != y elementwise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandType} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
+ */
+ NOT_EQUAL = @1.2::OperationType:NOT_EQUAL,
+
+ /**
+ * Pads a tensor with the given constant value according to the specified
+ * paddings.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after
+ * the end of dimension i.
+ * * 2: An scalar specifying the value to use for padding input0.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * pad value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * pad value must be of {@link OperandType::FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the pad value must be of {@link OperandType::INT32}. The
+ * scale and zeroPoint are assumed to be the same as in input0.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ PAD_V2 = @1.2::OperationType:PAD_V2,
+
+ /**
+ * Computes the power of one value to another.
+ *
+ * Given a tensor base and a tensor exponent, this operation computes
+ * base^exponent elementwise.
+ *
+ * This operations supports broadcasting. The size of the output is the
+ * maximum size along each dimension of the input operands. It starts with
+ * the trailing dimensions, and works its way forward.
+ *
+ * For example:
+ * base.dimension = {4, 1, 2}
+ * exponent.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor specifying the base.
+ * * 1: A tensor specifying the exponent.
+ *
+ * Outputs:
+ * * 0: An output tensor.
+ */
+ POW = @1.2::OperationType:POW,
+
+ /**
+ * Parametric Rectified Linear Unit.
+ *
+ * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
+ * is a learned array with the same {@link OperandType} and compatible
+ * dimensions as input x.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input.dimension = {4, 1, 2}
+ * alpha.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A tensor of the same {@link OperandType}, and compatible dimensions
+ * as input0, specifying the alpha.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ */
+ PRELU = @1.2::OperationType:PRELU,
+
+ /**
+ * Quantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = max(0, min(255, round(input / scale) + zeroPoint)
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0, but with
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ */
+ QUANTIZE = @1.2::OperationType:QUANTIZE,
+
+ /**
+ * A version of quantized LSTM, using 16 bit quantization for internal
+ * state.
+ *
+ * There is no projection layer, so cell state size is equal to the output
+ * size.
+ *
+ * Inputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBatches, inputSize] specifying the input to the LSTM
+ * cell. Tensor is quantized with a fixed quantization range of
+ * [-1, 127/128] (scale = 1/128, zeroPoint = 128).
+ * * 1: The input-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-input part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 2: The input-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-forget part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 3: The input-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-cell part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 4: The input-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-output part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 5: The recurrent-to-input weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-input part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 6: The recurrent-to-forget weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-forget
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 7: The recurrent-to-cell weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-cell part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 8: The recurrent-to-output weights.
+ * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-output
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 9: The input gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 10:The forget gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 11:The cell bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 12:The output gate bias.
+ * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] specifying the cell state from the
+ * previous time step of the LSTM cell. It is quantized using a
+ * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
+ * 32768, zeroPoint = 0).
+ * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] specifying the output of the LSTM
+ * cell from previous time-step. Tensor is quantized with a fixed
+ * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
+ * 128).
+ *
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] which contains a cell state from
+ * the current time step. Tensor is quantized using a quantization
+ * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
+ * 0).
+ * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] which contains the output value.
+ * Tensor is quantized with a fixed quantization range of [-1, 127/128]
+ * (scale = 1/128, zeroPoint = 128).
+ */
+ QUANTIZED_16BIT_LSTM = @1.2::OperationType:QUANTIZED_16BIT_LSTM,
+
+ /**
+ * Draws samples from a multinomial distribution.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 2-D tensor with shape [batches, classes], specifying the
+ * unnormalized log-probabilities for all classes.
+ * * 1: A scalar {@link OperandType::INT32}, specifying the number of
+ * independent samples to draw for each row slice.
+ * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2],
+ * specifying seeds used to initialize the random distribution.
+ * Outputs:
+ * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
+ * [batches, samples], containing the drawn samples.
+ */
+ RANDOM_MULTINOMIAL = @1.2::OperationType:RANDOM_MULTINOMIAL,
+
+ /**
+ * Reduces a tensor by computing the "logical and" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ALL = @1.2::OperationType:REDUCE_ALL,
+
+ /**
+ * Reduces a tensor by computing the "logical or" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_ANY = @1.2::OperationType:REDUCE_ANY,
+
+ /**
+ * Reduces a tensor by computing the maximum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MAX = @1.2::OperationType:REDUCE_MAX,
+
+ /**
+ * Reduces a tensor by computing the minimum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and
+ * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ REDUCE_MIN = @1.2::OperationType:REDUCE_MIN,
+
+ /**
+ * Reduces a tensor by multiplying elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_PROD = @1.2::OperationType:REDUCE_PROD,
+
+ /**
+ * Reduces a tensor by summing elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0.
+ */
+ REDUCE_SUM = @1.2::OperationType:REDUCE_SUM,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by average pooling sampling points from bilinear interpolation.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * No rounding is applied in this operation. The sampling points are unified
+ * distributed in the pooling bin and their values are calculated by bilinear
+ * interpolation.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in height dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_height/out_height).
+ * * 8: An {@link OperandType::INT32} scalar, specifying the number of
+ * sampling points in width dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_width/out_width).
+ * * 9: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
+ */
+ ROI_ALIGN = @1.2::OperationType:ROI_ALIGN,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by max-pooling.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Rounding is applied in this operation to ensure integer boundary for
+ * regions of interest and pooling bins.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125.
+ * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandType} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ ROI_POOLING = @1.2::OperationType:ROI_POOLING,
+
+ /**
+ * Computes reciprocal of square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ RSQRT = @1.2::OperationType:RSQRT,
+
+ /**
+ * Using a tensor of booleans c and input tensors x and y select values
+ * elementwise from both input tensors:
+ *
+ * O[i] = C[i] ? x[i] : y[i].
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3)
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a
+ * mask that chooses, based on the value at each element, whether the
+ * corresponding element in the output should be taken from input1 (if
+ * true) or input2 (if false).
+ * * 1: An input tensor of the same shape as input0.
+ * * 2: An input tensor of the same shape and type as input1.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor,
+ * the scales and zeroPoint can be different from input1 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same type and shape as input1 and input2.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ SELECT = @1.2::OperationType:SELECT,
+
+ /**
+ * Computes sin of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SIN = @1.2::OperationType:SIN,
+
+ /**
+ * Extracts a slice of specified size from the input tensor starting at a
+ * specified location.
+ *
+ * The starting location is specified as a 1-D tensor containing offsets
+ * for each dimension. The size is specified as a 1-D tensor containing
+ * either size of a slice along corresponding dimension or -1. In the latter
+ * case, all the remaining elements in dimension are included in the slice.
+ *
+ * A sum of begin offset and a size of a slice must not exceed size of a
+ * corresponding dimension.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
+ * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the beginning indices of the slice in each dimension.
+ * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying
+ * the size of the slice in each dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input containing the slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
+ */
+ SLICE = @1.2::OperationType:SLICE,
+
+ /**
+ * Splits a tensor along a given axis into num_splits subtensors.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to split.
+ * * 1: An {@link OperandType::INT32} scalar specifying the axis along
+ * which to split.
+ * * 2: An {@link OperandType::INT32} scalar indicating the number of
+ * splits along given axis. Must evenly divide axis size.
+ *
+ * Outputs:
+ * * 0 ~ (num_splits - 1): Resulting subtensors.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ SPLIT = @1.2::OperationType:SPLIT,
+
+ /**
+ * Computes square root of x element-wise.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ */
+ SQRT = @1.2::OperationType:SQRT,
+
+ /**
+ * Constructs a tensor by tiling a given tensor.
+ *
+ * This operation creates a new tensor by replicating `input` `multiples`
+ * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
+ * elements, and the values of `input` are replicated `multiples[i]` times
+ * along the i-th dimension.
+ * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}.
+ * The length of multiples must be n.
+ *
+ * Outputs:
+ * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ TILE = @1.2::OperationType:TILE,
+
+ /**
+ * Finds values and indices of the k largest entries for the last dimension.
+ *
+ * Resulting values in each dimensions are sorted in descending order. If
+ * two values are equal, the one with larger index appears first.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of
+ * top elements to look for along the last dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input, containing the k
+ * largest elements along each last dimensional slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
+ * containing the indices of values within the last dimension of input.
+ */
+ TOPK_V2 = @1.2::OperationType:TOPK_V2,
+
+ /**
+ * Performs the transpose of 2-D convolution operation.
+ *
+ * This operation is sometimes called "deconvolution" after Deconvolutional
+ * Networks, but is actually the transpose (gradient) of
+ * {@link OperandType::CONV_2D} rather than an actual deconvolution.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * Supported tensor {@link OperandType} configurations:
+ * * 16 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the left, in the ‘width’ dimension.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the right, in the ‘width’ dimension.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the top, in the ‘height’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the padding on
+ * the bottom, in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 8: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 9: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output
+ * tensor shape.
+ * * 4: An {@link OperandType::INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
+ * * 5: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘width’ dimension.
+ * * 6: An {@link OperandType::INT32} scalar, specifying the stride when
+ * walking through input in the ‘height’ dimension.
+ * * 7: An {@link OperandType::INT32} scalar, and has to be one of the
+ * {@link FusedActivationFunc} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link OperandType::BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ */
+ TRANSPOSE_CONV_2D = @1.2::OperationType:TRANSPOSE_CONV_2D,
+
+ /**
+ * A recurrent neural network specified by an LSTM cell.
+ *
+ * Performs (fully) dynamic unrolling of input.
+ *
+ * This Op unrolls the input along the time dimension, and implements the
+ * following operation for each element in the sequence
+ * s = 1...sequence_length:
+ * outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
+ *
+ * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM},
+ * the "projection" is an optional projection layer from state and output
+ * and the “activation” is the function passed as the
+ * “fused_activation_function” argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where “max_time” is the number of timesteps (sequence length),
+ * “batch_size” corresponds to the batching dimension, and
+ * “input_size” is the size of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ * <ul>
+ * <li>0: None;
+ * <li>1: Relu;
+ * <li>3: Relu6;
+ * <li>4: Tanh;
+ * <li>6: Sigmoid.
+ * </ul>
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * * 23:Time-major if true, batch-major if false.
+ * * 24:The input layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 25:The forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 26:The cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 27:The output layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The output (\f$o_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, output_size]
+ * If batch-major: [batch_size, max_time, output_size]
+ */
+ UNIDIRECTIONAL_SEQUENCE_LSTM = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_LSTM,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs.
+ *
+ * This layer unrolls the input along the sequence dimension, and implements
+ * the following operation
+ * for each element in the sequence s = 1...sequence_length:
+ * outputs[s] = state = activation(inputs[s] * input_weights’ + state *
+ * recurrent_weights’ + bias)
+ *
+ * Where:
+ * * “input_weights” is a weight matrix that multiplies the inputs;
+ * * “recurrent_weights” is a weight matrix that multiplies the current
+ * “state” which itself is the output from the previous time step
+ * computation;
+ * * “bias” is a bias vector (added to each output vector in the batch);
+ * * “activation” is the function passed as the “fused_activation_function”
+ * argument (if not “NONE”).
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: weights.
+ * A 2-D tensor of shape [numUnits, inputSize].
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [numUnits, numUnits].
+ * * 3: bias.
+ * A 1-D tensor of shape [numUnits].
+ * * 4: hidden state
+ * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: fusedActivationFunction.
+ * A {@link FusedActivationFunc} value indicating the activation function. If
+ * “NONE” is specified then it results in a linear activation.
+ * * 6: timeMajor
+ * An {@link OperandType::INT32} scalar specifying the shape format
+ * of input and output tensors. Must be set to either 0 or 1.
+ * Outputs:
+ * * 0: output.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the output has a shape [maxTime, batchSize,
+ * numUnits], otherwise the output has a shape [batchSize, maxTime,
+ * numUnits].
+ */
+ UNIDIRECTIONAL_SEQUENCE_RNN = @1.2::OperationType:UNIDIRECTIONAL_SEQUENCE_RNN,
+
+ /**
+ * Resizes images to given size using the nearest neighbor interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16}
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: An {@link OperandType::INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link OperandType::INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link OperandType::FLOAT16} if input0 is
+ * of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} otherwise.
+ * * 3: An {@link OperandType::BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ */
+ RESIZE_NEAREST_NEIGHBOR = @1.2::OperationType:RESIZE_NEAREST_NEIGHBOR,
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+ FUNDAMENTAL_MAX = 94,
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -109,6 +4647,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -233,28 +4797,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
index d41cfd2..e06f5d6 100644
--- a/neuralnetworks/1.3/types.t
+++ b/neuralnetworks/1.3/types.t
@@ -44,6 +44,47 @@
BASE_MAX = 0xFFFF,
};
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+%insert Operation_1.0
+
+%insert Operation_1.1
+
+%insert Operation_1.2
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.2::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operation_1.3_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
/**
* The capabilities of a driver.
@@ -80,6 +121,32 @@
};
/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
* Describes one operand of the model's graph.
*/
struct Operand {
@@ -204,28 +271,6 @@
};
/**
- * Describes one operation of the model's graph.
- */
-struct Operation {
- /**
- * The operation type.
- */
- OperationType type;
-
- /**
- * Describes the table that contains the indexes of the inputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> inputs;
-
- /**
- * Describes the table that contains the indexes of the outputs of the
- * operation. The offset is the index in the operandIndexes table.
- */
- vec<uint32_t> outputs;
-};
-
-/**
* A Neural Network Model.
*
* This includes not only the execution graph, but also constant data such as
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
index d8a7534..60992d5 100644
--- a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -456,8 +456,7 @@
}
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
@@ -519,8 +518,7 @@
}
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
@@ -541,8 +539,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -566,8 +563,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -590,8 +586,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -615,8 +610,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -727,8 +721,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -752,8 +745,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -776,8 +768,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -801,8 +792,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -914,8 +904,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -937,8 +926,7 @@
saveModelToCache(model, modelCache, dataCache, &preparedModel);
ASSERT_NE(preparedModel, nullptr);
// Execute and verify results.
- EvaluatePreparedModel(preparedModel, testModel,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModel, /*testKind=*/TestKind::GENERAL);
// Check if prepareModelFromCache fails.
preparedModel = nullptr;
ErrorStatus status;
@@ -1082,8 +1070,7 @@
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- EvaluatePreparedModel(preparedModel, testModelAdd,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}
@@ -1144,8 +1131,7 @@
ASSERT_EQ(preparedModel, nullptr);
} else {
ASSERT_NE(preparedModel, nullptr);
- EvaluatePreparedModel(preparedModel, testModelAdd,
- /*testDynamicOutputShape=*/false);
+ EvaluatePreparedModel(preparedModel, testModelAdd, /*testKind=*/TestKind::GENERAL);
}
}
}
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 325d641..3e947f5 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -63,15 +63,41 @@
using V1_1::ExecutionPreference;
using V1_2::Constant;
using V1_2::MeasureTiming;
-using V1_2::OperationType;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
using V1_2::implementation::ExecutionCallback;
using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+namespace {
+
+enum class Executor { ASYNC, SYNC, BURST };
+
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+struct TestConfig {
+ Executor executor;
+ MeasureTiming measureTiming;
+ OutputType outputType;
+ // `reportSkipping` indicates if a test should print an info message in case
+ // it is skipped. The field is set to true by default and is set to false in
+ // quantization coupling tests to suppress skipping a test
+ bool reportSkipping;
+ TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ reportSkipping(true) {}
+ TestConfig(Executor executor, MeasureTiming measureTiming, OutputType outputType,
+ bool reportSkipping)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ reportSkipping(reportSkipping) {}
+};
+
+} // namespace
+
Model createModel(const TestModel& testModel) {
// Model operands.
hidl_vec<Operand> operands(testModel.operands.size());
@@ -206,31 +232,34 @@
return android::nn::ExecutionBurstController::create(preparedModel,
std::chrono::microseconds{0});
}
-enum class Executor { ASYNC, SYNC, BURST };
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- Executor executor, MeasureTiming measure, OutputType outputType) {
+ const TestConfig& testConfig, bool* skipped = nullptr) {
+ if (skipped != nullptr) {
+ *skipped = false;
+ }
// If output0 does not have size larger than one byte, we can not test with insufficient buffer.
- if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT &&
+ !isOutputSizeGreaterThanOne(testModel, 0)) {
return;
}
Request request = createRequest(testModel);
- if (outputType == OutputType::INSUFFICIENT) {
+ if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
ErrorStatus executionStatus;
hidl_vec<OutputShape> outputShapes;
Timing timing;
- switch (executor) {
+ switch (testConfig.executor) {
case Executor::ASYNC: {
SCOPED_TRACE("asynchronous");
// launch execution
sp<ExecutionCallback> executionCallback = new ExecutionCallback();
- Return<ErrorStatus> executionLaunchStatus =
- ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ Return<ErrorStatus> executionLaunchStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, executionCallback);
ASSERT_TRUE(executionLaunchStatus.isOk());
EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
@@ -246,8 +275,8 @@
SCOPED_TRACE("synchronous");
// execute
- Return<ErrorStatus> executionReturnStatus =
- ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ Return<ErrorStatus> executionReturnStatus = ExecutePreparedModel(
+ preparedModel, request, testConfig.measureTiming, &outputShapes, &timing);
ASSERT_TRUE(executionReturnStatus.isOk());
executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
@@ -270,15 +299,21 @@
// execute burst
int n;
std::tie(n, outputShapes, timing, std::ignore) =
- controller->compute(request, measure, keys);
+ controller->compute(request, testConfig.measureTiming, keys);
executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
}
- if (outputType != OutputType::FULLY_SPECIFIED &&
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ if (skipped != nullptr) {
+ *skipped = true;
+ }
+ if (!testConfig.reportSkipping) {
+ return;
+ }
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
"execute model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
@@ -286,7 +321,7 @@
<< std::endl;
GTEST_SKIP();
}
- if (measure == MeasureTiming::NO) {
+ if (testConfig.measureTiming == MeasureTiming::NO) {
EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
} else {
@@ -295,7 +330,7 @@
}
}
- switch (outputType) {
+ switch (testConfig.outputType) {
case OutputType::FULLY_SPECIFIED:
// If the model output operands are fully specified, outputShapes must be either
// either empty, or have the same number of elements as the number of outputs.
@@ -332,59 +367,117 @@
}
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- bool testDynamicOutputShape) {
- if (testDynamicOutputShape) {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::UNSPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::INSUFFICIENT);
- } else {
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
- EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
- OutputType::FULLY_SPECIFIED);
+ TestKind testKind) {
+ std::initializer_list<OutputType> outputTypesList;
+ std::initializer_list<MeasureTiming> measureTimingList;
+ std::initializer_list<Executor> executorList;
+
+ switch (testKind) {
+ case TestKind::GENERAL: {
+ outputTypesList = {OutputType::FULLY_SPECIFIED};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ } break;
+ case TestKind::DYNAMIC_SHAPE: {
+ outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
+ measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
+ executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ } break;
+ case TestKind::QUANTIZATION_COUPLING: {
+ LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
+ return;
+ } break;
+ }
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig(executor, measureTiming, outputType);
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
+ }
}
}
-void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+void EvaluatePreparedCoupledModels(const sp<IPreparedModel>& preparedModel,
+ const TestModel& testModel,
+ const sp<IPreparedModel>& preparedCoupledModel,
+ const TestModel& coupledModel) {
+ std::initializer_list<OutputType> outputTypesList = {OutputType::FULLY_SPECIFIED};
+ std::initializer_list<MeasureTiming> measureTimingList = {MeasureTiming::NO,
+ MeasureTiming::YES};
+ std::initializer_list<Executor> executorList = {Executor::ASYNC, Executor::SYNC,
+ Executor::BURST};
+
+ for (const OutputType outputType : outputTypesList) {
+ for (const MeasureTiming measureTiming : measureTimingList) {
+ for (const Executor executor : executorList) {
+ const TestConfig testConfig(executor, measureTiming, outputType,
+ /*reportSkipping=*/false);
+ bool baseSkipped = false;
+ EvaluatePreparedModel(preparedModel, testModel, testConfig, &baseSkipped);
+ bool coupledSkipped = false;
+ EvaluatePreparedModel(preparedCoupledModel, coupledModel, testConfig,
+ &coupledSkipped);
+ ASSERT_EQ(baseSkipped, coupledSkipped);
+ if (baseSkipped) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service "
+ "cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ }
+ }
+ }
+}
+
+void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) {
Model model = createModel(testModel);
- if (testDynamicOutputShape) {
+ if (testKind == TestKind::DYNAMIC_SHAPE) {
makeOutputDimensionsUnspecified(&model);
}
sp<IPreparedModel> preparedModel;
- createPreparedModel(device, model, &preparedModel);
- if (preparedModel == nullptr) return;
-
- EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
+ switch (testKind) {
+ case TestKind::GENERAL: {
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+ EvaluatePreparedModel(preparedModel, testModel, TestKind::GENERAL);
+ } break;
+ case TestKind::DYNAMIC_SHAPE: {
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+ EvaluatePreparedModel(preparedModel, testModel, TestKind::DYNAMIC_SHAPE);
+ } break;
+ case TestKind::QUANTIZATION_COUPLING: {
+ ASSERT_TRUE(testModel.hasQuant8AsymmOperands());
+ createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ false);
+ TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel);
+ sp<IPreparedModel> preparedCoupledModel;
+ createPreparedModel(device, createModel(signedQuantizedModel), &preparedCoupledModel,
+ /*reportSkipping*/ false);
+ // If we couldn't prepare a model with unsigned quantization, we must
+ // fail to prepare a model with signed quantization as well.
+ if (preparedModel == nullptr) {
+ ASSERT_EQ(preparedCoupledModel, nullptr);
+ // If we failed to prepare both of the models, we can safely skip
+ // the test.
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout
+ << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ ASSERT_NE(preparedCoupledModel, nullptr);
+ EvaluatePreparedCoupledModels(preparedModel, testModel, preparedCoupledModel,
+ signedQuantizedModel);
+ } break;
+ }
}
void GeneratedTestBase::SetUp() {
@@ -407,12 +500,19 @@
// Tag for the dynamic output shape tests
class DynamicOutputShapeTest : public GeneratedTest {};
+// Tag for the dynamic output shape tests
+class DISABLED_QuantizationCouplingTest : public GeneratedTest {};
+
TEST_P(GeneratedTest, Test) {
- Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::GENERAL);
}
TEST_P(DynamicOutputShapeTest, Test) {
- Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::DYNAMIC_SHAPE);
+}
+
+TEST_P(DISABLED_QuantizationCouplingTest, Test) {
+ Execute(kDevice, kTestModel, /*testKind=*/TestKind::QUANTIZATION_COUPLING);
}
INSTANTIATE_GENERATED_TEST(GeneratedTest,
@@ -421,4 +521,8 @@
INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
[](const TestModel& testModel) { return !testModel.expectFailure; });
+INSTANTIATE_GENERATED_TEST(DISABLED_QuantizationCouplingTest, [](const TestModel& testModel) {
+ return testModel.hasQuant8AsymmOperands() && testModel.operations.size() == 1;
+});
+
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 45cff5b..ad6323f 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -57,8 +57,19 @@
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
+enum class TestKind {
+ // Runs a test model and compares the results to a golden data
+ GENERAL,
+ // Same as GENERAL but sets dimensions for the output tensors to zeros
+ DYNAMIC_SHAPE,
+ // Tests if quantized model with TENSOR_QUANT8_ASYMM produces the same result
+ // (OK/SKIPPED/FAILED) as the model with all such tensors converted to
+ // TENSOR_QUANT8_ASYMM_SIGNED.
+ QUANTIZATION_COUPLING
+};
+
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
- const test_helper::TestModel& testModel, bool testDynamicOutputShape);
+ const test_helper::TestModel& testModel, TestKind testKind);
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
index 7361078..a7569e6 100644
--- a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
+++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
@@ -25,8 +25,6 @@
#define CHECK_TEST_ENUM(EnumType, enumValue) \
static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
-using V1_2::OperationType;
-
CHECK_TEST_ENUM(OperandType, FLOAT32);
CHECK_TEST_ENUM(OperandType, INT32);
CHECK_TEST_ENUM(OperandType, UINT32);
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
index 1ff02dc..242e12e 100644
--- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -27,7 +27,6 @@
using V1_0::ErrorStatus;
using V1_0::OperandLifeTime;
using V1_1::ExecutionPreference;
-using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::SymmPerChannelQuantParams;
using HidlToken =
@@ -345,7 +344,17 @@
return true;
}
} break;
- case OperationType::QUANTIZE:
+ case OperationType::QUANTIZE: {
+ if (operand == operation.inputs[0] &&
+ (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
+ return true;
+ }
+ if (operand == operation.outputs[0] &&
+ (type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)) {
+ return true;
+ }
+ } break;
case OperationType::RANDOM_MULTINOMIAL: {
if (operand == operation.inputs[0] &&
(type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 625913d..92d8fa7 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -37,7 +37,7 @@
// internal helper function
void createPreparedModel(const sp<IDevice>& device, const Model& model,
- sp<IPreparedModel>* preparedModel) {
+ sp<IPreparedModel>* preparedModel, bool reportSkipping) {
ASSERT_NE(nullptr, preparedModel);
*preparedModel = nullptr;
@@ -74,6 +74,9 @@
// can continue.
if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
ASSERT_EQ(nullptr, preparedModel->get());
+ if (!reportSkipping) {
+ return;
+ }
LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
"model that it does not support.";
std::cout << "[ ] Early termination of test because vendor service cannot "
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 8cb42d4..4e51052 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -47,7 +47,7 @@
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
- sp<IPreparedModel>* preparedModel);
+ sp<IPreparedModel>* preparedModel, bool reportSkipping = true);
// Utility function to get PreparedModel from callback and downcast to V1_2.
sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);
diff --git a/radio/1.0/vts/functional/Android.bp b/radio/1.0/vts/functional/Android.bp
index 9dec2f2..2351d90 100644
--- a/radio/1.0/vts/functional/Android.bp
+++ b/radio/1.0/vts/functional/Android.bp
@@ -33,7 +33,8 @@
static_libs: [
"android.hardware.radio@1.0",
],
- test_suites: ["general-tests"],
+ test_config: "vts_hal_radio_target_test.xml",
+ test_suites: ["general-tests", "vts-core"],
}
cc_test {
@@ -47,7 +48,8 @@
static_libs: [
"android.hardware.radio@1.0",
],
- test_suites: ["general-tests"],
+ test_config: "vts_hal_sap_target_test.xml",
+ test_suites: ["general-tests", "vts-core"],
}
cc_library_static {
diff --git a/radio/1.0/vts/functional/VtsHalRadioV1_0TargetTest.cpp b/radio/1.0/vts/functional/VtsHalRadioV1_0TargetTest.cpp
index d53c062..9d61b52 100644
--- a/radio/1.0/vts/functional/VtsHalRadioV1_0TargetTest.cpp
+++ b/radio/1.0/vts/functional/VtsHalRadioV1_0TargetTest.cpp
@@ -14,12 +14,18 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <radio_hidl_hal_utils_v1_0.h>
+INSTANTIATE_TEST_SUITE_P(PerInstance, RadioHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_0::IRadio::descriptor)),
+ android::hardware::PrintInstanceNameToString);
+
int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(RadioHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
- RadioHidlEnvironment::Instance()->init(&argc, argv);
// setup seed for rand function
int seedSrand = time(NULL);
diff --git a/radio/1.0/vts/functional/VtsHalSapV1_0TargetTest.cpp b/radio/1.0/vts/functional/VtsHalSapV1_0TargetTest.cpp
index 859e6fb..b80b971 100644
--- a/radio/1.0/vts/functional/VtsHalSapV1_0TargetTest.cpp
+++ b/radio/1.0/vts/functional/VtsHalSapV1_0TargetTest.cpp
@@ -14,12 +14,18 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <sap_hidl_hal_utils.h>
+INSTANTIATE_TEST_SUITE_P(PerInstance, SapHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_0::ISap::descriptor)),
+ android::hardware::PrintInstanceNameToString);
+
int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(SapHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
- SapHidlEnvironment::Instance()->init(&argc, argv);
// setup seed for rand function
int seedSrand = time(NULL);
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_cell_broadcast.cpp b/radio/1.0/vts/functional/radio_hidl_hal_cell_broadcast.cpp
index 2c1eb60..125ea0c 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_cell_broadcast.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_cell_broadcast.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.setGsmBroadcastConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, setGsmBroadcastConfig) {
+TEST_P(RadioHidlTest, setGsmBroadcastConfig) {
serial = GetRandomSerialNumber();
// Create GsmBroadcastSmsConfigInfo #1
@@ -84,7 +84,7 @@
/*
* Test IRadio.getGsmBroadcastConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, getGsmBroadcastConfig) {
+TEST_P(RadioHidlTest, getGsmBroadcastConfig) {
serial = GetRandomSerialNumber();
radio->getGsmBroadcastConfig(serial);
@@ -104,7 +104,7 @@
/*
* Test IRadio.setCdmaBroadcastConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, setCdmaBroadcastConfig) {
+TEST_P(RadioHidlTest, setCdmaBroadcastConfig) {
serial = GetRandomSerialNumber();
CdmaBroadcastSmsConfigInfo cbSmsConfig;
@@ -131,7 +131,7 @@
/*
* Test IRadio.getCdmaBroadcastConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, getCdmaBroadcastConfig) {
+TEST_P(RadioHidlTest, getCdmaBroadcastConfig) {
serial = GetRandomSerialNumber();
radio->getCdmaBroadcastConfig(serial);
@@ -149,7 +149,7 @@
/*
* Test IRadio.setCdmaBroadcastActivation() for the response returned.
*/
-TEST_F(RadioHidlTest, setCdmaBroadcastActivation) {
+TEST_P(RadioHidlTest, setCdmaBroadcastActivation) {
serial = GetRandomSerialNumber();
bool activate = false;
@@ -169,7 +169,7 @@
/*
* Test IRadio.setGsmBroadcastActivation() for the response returned.
*/
-TEST_F(RadioHidlTest, setGsmBroadcastActivation) {
+TEST_P(RadioHidlTest, setGsmBroadcastActivation) {
serial = GetRandomSerialNumber();
bool activate = false;
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_data.cpp b/radio/1.0/vts/functional/radio_hidl_hal_data.cpp
index eaef3ed..d937d74 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_data.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_data.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.getDataRegistrationState() for the response returned.
*/
-TEST_F(RadioHidlTest, getDataRegistrationState) {
+TEST_P(RadioHidlTest, getDataRegistrationState) {
serial = GetRandomSerialNumber();
radio->getDataRegistrationState(serial);
@@ -99,7 +99,7 @@
/*
* Test IRadio.setupDataCall() for the response returned.
*/
-TEST_F(RadioHidlTest, setupDataCall) {
+TEST_P(RadioHidlTest, setupDataCall) {
serial = GetRandomSerialNumber();
RadioTechnology radioTechnology = RadioTechnology::LTE;
@@ -147,7 +147,7 @@
/*
* Test IRadio.deactivateDataCall() for the response returned.
*/
-TEST_F(RadioHidlTest, deactivateDataCall) {
+TEST_P(RadioHidlTest, deactivateDataCall) {
serial = GetRandomSerialNumber();
int cid = 1;
bool reasonRadioShutDown = false;
@@ -169,7 +169,7 @@
/*
* Test IRadio.getDataCallList() for the response returned.
*/
-TEST_F(RadioHidlTest, getDataCallList) {
+TEST_P(RadioHidlTest, getDataCallList) {
serial = GetRandomSerialNumber();
radio->getDataCallList(serial);
@@ -188,7 +188,7 @@
/*
* Test IRadio.setInitialAttachApn() for the response returned.
*/
-TEST_F(RadioHidlTest, setInitialAttachApn) {
+TEST_P(RadioHidlTest, setInitialAttachApn) {
serial = GetRandomSerialNumber();
DataProfileInfo dataProfileInfo;
@@ -231,7 +231,7 @@
/*
* Test IRadio.setDataAllowed() for the response returned.
*/
-TEST_F(RadioHidlTest, setDataAllowed) {
+TEST_P(RadioHidlTest, setDataAllowed) {
serial = GetRandomSerialNumber();
bool allow = true;
@@ -249,7 +249,7 @@
/*
* Test IRadio.setDataProfile() for the response returned.
*/
-TEST_F(RadioHidlTest, setDataProfile) {
+TEST_P(RadioHidlTest, setDataProfile) {
serial = GetRandomSerialNumber();
// Create a dataProfileInfo
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_icc.cpp b/radio/1.0/vts/functional/radio_hidl_hal_icc.cpp
index 2670d96..60cb2fe 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_icc.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_icc.cpp
@@ -19,7 +19,7 @@
/*
* Test IRadio.getIccCardStatus() for the response returned.
*/
-TEST_F(RadioHidlTest, getIccCardStatus) {
+TEST_P(RadioHidlTest, getIccCardStatus) {
EXPECT_LE(cardStatus.applications.size(), (unsigned int)RadioConst::CARD_MAX_APPS);
EXPECT_LT(cardStatus.gsmUmtsSubscriptionAppIndex, (int)RadioConst::CARD_MAX_APPS);
EXPECT_LT(cardStatus.cdmaSubscriptionAppIndex, (int)RadioConst::CARD_MAX_APPS);
@@ -29,7 +29,7 @@
/*
* Test IRadio.supplyIccPinForApp() for the response returned
*/
-TEST_F(RadioHidlTest, supplyIccPinForApp) {
+TEST_P(RadioHidlTest, supplyIccPinForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -54,7 +54,7 @@
/*
* Test IRadio.supplyIccPukForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, supplyIccPukForApp) {
+TEST_P(RadioHidlTest, supplyIccPukForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -78,7 +78,7 @@
/*
* Test IRadio.supplyIccPin2ForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, supplyIccPin2ForApp) {
+TEST_P(RadioHidlTest, supplyIccPin2ForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -104,7 +104,7 @@
/*
* Test IRadio.supplyIccPuk2ForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, supplyIccPuk2ForApp) {
+TEST_P(RadioHidlTest, supplyIccPuk2ForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -128,7 +128,7 @@
/*
* Test IRadio.changeIccPinForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, changeIccPinForApp) {
+TEST_P(RadioHidlTest, changeIccPinForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -153,7 +153,7 @@
/*
* Test IRadio.changeIccPin2ForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, changeIccPin2ForApp) {
+TEST_P(RadioHidlTest, changeIccPin2ForApp) {
serial = GetRandomSerialNumber();
// Pass wrong password and check PASSWORD_INCORRECT returned for 3GPP and
@@ -179,7 +179,7 @@
/*
* Test IRadio.getImsiForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, getImsiForApp) {
+TEST_P(RadioHidlTest, getImsiForApp) {
serial = GetRandomSerialNumber();
// Check success returned while getting imsi for 3GPP and 3GPP2 apps only
@@ -208,7 +208,7 @@
/*
* Test IRadio.iccIOForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, iccIOForApp) {
+TEST_P(RadioHidlTest, iccIOForApp) {
serial = GetRandomSerialNumber();
for (int i = 0; i < (int)cardStatus.applications.size(); i++) {
@@ -233,7 +233,7 @@
/*
* Test IRadio.iccTransmitApduBasicChannel() for the response returned.
*/
-TEST_F(RadioHidlTest, iccTransmitApduBasicChannel) {
+TEST_P(RadioHidlTest, iccTransmitApduBasicChannel) {
serial = GetRandomSerialNumber();
SimApdu msg;
memset(&msg, 0, sizeof(msg));
@@ -250,7 +250,7 @@
/*
* Test IRadio.iccOpenLogicalChannel() for the response returned.
*/
-TEST_F(RadioHidlTest, iccOpenLogicalChannel) {
+TEST_P(RadioHidlTest, iccOpenLogicalChannel) {
serial = GetRandomSerialNumber();
int p2 = 0x04;
// Specified in ISO 7816-4 clause 7.1.1 0x04 means that FCP template is requested.
@@ -265,7 +265,7 @@
/*
* Test IRadio.iccCloseLogicalChannel() for the response returned.
*/
-TEST_F(RadioHidlTest, iccCloseLogicalChannel) {
+TEST_P(RadioHidlTest, iccCloseLogicalChannel) {
serial = GetRandomSerialNumber();
// Try closing invalid channel and check INVALID_ARGUMENTS returned as error
radio->iccCloseLogicalChannel(serial, 0);
@@ -279,7 +279,7 @@
/*
* Test IRadio.iccTransmitApduLogicalChannel() for the response returned.
*/
-TEST_F(RadioHidlTest, iccTransmitApduLogicalChannel) {
+TEST_P(RadioHidlTest, iccTransmitApduLogicalChannel) {
serial = GetRandomSerialNumber();
SimApdu msg;
memset(&msg, 0, sizeof(msg));
@@ -296,7 +296,7 @@
/*
* Test IRadio.requestIccSimAuthentication() for the response returned.
*/
-TEST_F(RadioHidlTest, requestIccSimAuthentication) {
+TEST_P(RadioHidlTest, requestIccSimAuthentication) {
serial = GetRandomSerialNumber();
// Pass wrong challenge string and check RadioError::INVALID_ARGUMENTS
@@ -315,7 +315,7 @@
/*
* Test IRadio.supplyNetworkDepersonalization() for the response returned.
*/
-TEST_F(RadioHidlTest, supplyNetworkDepersonalization) {
+TEST_P(RadioHidlTest, supplyNetworkDepersonalization) {
serial = GetRandomSerialNumber();
radio->supplyNetworkDepersonalization(serial, hidl_string("test"));
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_ims.cpp b/radio/1.0/vts/functional/radio_hidl_hal_ims.cpp
index 4331c06..e253dcf 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_ims.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_ims.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.getClir() for the response returned.
*/
-TEST_F(RadioHidlTest, getClir) {
+TEST_P(RadioHidlTest, getClir) {
serial = GetRandomSerialNumber();
radio->getClir(serial);
@@ -39,7 +39,7 @@
/*
* Test IRadio.setClir() for the response returned.
*/
-TEST_F(RadioHidlTest, setClir) {
+TEST_P(RadioHidlTest, setClir) {
serial = GetRandomSerialNumber();
int32_t status = 1;
@@ -57,7 +57,7 @@
/*
* Test IRadio.getFacilityLockForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, getFacilityLockForApp) {
+TEST_P(RadioHidlTest, getFacilityLockForApp) {
serial = GetRandomSerialNumber();
std::string facility = "";
std::string password = "";
@@ -80,7 +80,7 @@
/*
* Test IRadio.setFacilityLockForApp() for the response returned.
*/
-TEST_F(RadioHidlTest, setFacilityLockForApp) {
+TEST_P(RadioHidlTest, setFacilityLockForApp) {
serial = GetRandomSerialNumber();
std::string facility = "";
bool lockState = false;
@@ -104,7 +104,7 @@
/*
* Test IRadio.setBarringPassword() for the response returned.
*/
-TEST_F(RadioHidlTest, setBarringPassword) {
+TEST_P(RadioHidlTest, setBarringPassword) {
serial = GetRandomSerialNumber();
std::string facility = "";
std::string oldPassword = "";
@@ -127,7 +127,7 @@
/*
* Test IRadio.getClip() for the response returned.
*/
-TEST_F(RadioHidlTest, getClip) {
+TEST_P(RadioHidlTest, getClip) {
serial = GetRandomSerialNumber();
radio->getClip(serial);
@@ -145,7 +145,7 @@
/*
* Test IRadio.setSuppServiceNotifications() for the response returned.
*/
-TEST_F(RadioHidlTest, setSuppServiceNotifications) {
+TEST_P(RadioHidlTest, setSuppServiceNotifications) {
serial = GetRandomSerialNumber();
bool enable = false;
@@ -164,7 +164,7 @@
/*
* Test IRadio.requestIsimAuthentication() for the response returned.
*/
-TEST_F(RadioHidlTest, requestIsimAuthentication) {
+TEST_P(RadioHidlTest, requestIsimAuthentication) {
serial = GetRandomSerialNumber();
std::string challenge = "";
@@ -186,7 +186,7 @@
/*
* Test IRadio.getImsRegistrationState() for the response returned.
*/
-TEST_F(RadioHidlTest, getImsRegistrationState) {
+TEST_P(RadioHidlTest, getImsRegistrationState) {
serial = GetRandomSerialNumber();
radio->getImsRegistrationState(serial);
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_misc.cpp b/radio/1.0/vts/functional/radio_hidl_hal_misc.cpp
index 3499762..d04cb36 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_misc.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_misc.cpp
@@ -19,7 +19,7 @@
/*
* Test IRadio.getSignalStrength() for the response returned.
*/
-TEST_F(RadioHidlTest, getSignalStrength) {
+TEST_P(RadioHidlTest, getSignalStrength) {
serial = GetRandomSerialNumber();
radio->getSignalStrength(serial);
@@ -35,7 +35,7 @@
/*
* Test IRadio.getVoiceRegistrationState() for the response returned.
*/
-TEST_F(RadioHidlTest, getVoiceRegistrationState) {
+TEST_P(RadioHidlTest, getVoiceRegistrationState) {
serial = GetRandomSerialNumber();
radio->getVoiceRegistrationState(serial);
@@ -51,7 +51,7 @@
/*
* Test IRadio.getOperator() for the response returned.
*/
-TEST_F(RadioHidlTest, getOperator) {
+TEST_P(RadioHidlTest, getOperator) {
serial = GetRandomSerialNumber();
radio->getOperator(serial);
@@ -67,7 +67,7 @@
/*
* Test IRadio.setRadioPower() for the response returned.
*/
-TEST_F(RadioHidlTest, setRadioPower) {
+TEST_P(RadioHidlTest, setRadioPower) {
serial = GetRandomSerialNumber();
radio->setRadioPower(serial, 1);
@@ -83,7 +83,7 @@
/*
* Test IRadio.getNetworkSelectionMode() for the response returned.
*/
-TEST_F(RadioHidlTest, getNetworkSelectionMode) {
+TEST_P(RadioHidlTest, getNetworkSelectionMode) {
serial = GetRandomSerialNumber();
radio->getNetworkSelectionMode(serial);
@@ -99,7 +99,7 @@
/*
* Test IRadio.setNetworkSelectionModeAutomatic() for the response returned.
*/
-TEST_F(RadioHidlTest, setNetworkSelectionModeAutomatic) {
+TEST_P(RadioHidlTest, setNetworkSelectionModeAutomatic) {
serial = GetRandomSerialNumber();
radio->setNetworkSelectionModeAutomatic(serial);
@@ -118,7 +118,7 @@
/*
* Test IRadio.setNetworkSelectionModeManual() for the response returned.
*/
-TEST_F(RadioHidlTest, setNetworkSelectionModeManual) {
+TEST_P(RadioHidlTest, setNetworkSelectionModeManual) {
serial = GetRandomSerialNumber();
radio->setNetworkSelectionModeManual(serial, "123456");
@@ -137,7 +137,7 @@
/*
* Test IRadio.getAvailableNetworks() for the response returned.
*/
-TEST_F(RadioHidlTest, getAvailableNetworks) {
+TEST_P(RadioHidlTest, getAvailableNetworks) {
serial = GetRandomSerialNumber();
radio->getAvailableNetworks(serial);
@@ -158,7 +158,7 @@
/*
* Test IRadio.getBasebandVersion() for the response returned.
*/
-TEST_F(RadioHidlTest, getBasebandVersion) {
+TEST_P(RadioHidlTest, getBasebandVersion) {
serial = GetRandomSerialNumber();
radio->getBasebandVersion(serial);
@@ -174,7 +174,7 @@
/*
* Test IRadio.setBandMode() for the response returned.
*/
-TEST_F(RadioHidlTest, setBandMode) {
+TEST_P(RadioHidlTest, setBandMode) {
serial = GetRandomSerialNumber();
radio->setBandMode(serial, RadioBandMode::BAND_MODE_USA);
@@ -191,7 +191,7 @@
/*
* Test IRadio.getAvailableBandModes() for the response returned.
*/
-TEST_F(RadioHidlTest, getAvailableBandModes) {
+TEST_P(RadioHidlTest, getAvailableBandModes) {
serial = GetRandomSerialNumber();
radio->getAvailableBandModes(serial);
@@ -207,7 +207,7 @@
/*
* Test IRadio.setPreferredNetworkType() for the response returned.
*/
-TEST_F(RadioHidlTest, setPreferredNetworkType) {
+TEST_P(RadioHidlTest, setPreferredNetworkType) {
serial = GetRandomSerialNumber();
radio->setPreferredNetworkType(serial, PreferredNetworkType::GSM_ONLY);
@@ -224,7 +224,7 @@
/*
* Test IRadio.getPreferredNetworkType() for the response returned.
*/
-TEST_F(RadioHidlTest, getPreferredNetworkType) {
+TEST_P(RadioHidlTest, getPreferredNetworkType) {
serial = GetRandomSerialNumber();
radio->getPreferredNetworkType(serial);
@@ -240,7 +240,7 @@
/*
* Test IRadio.getNeighboringCids() for the response returned.
*/
-TEST_F(RadioHidlTest, getNeighboringCids) {
+TEST_P(RadioHidlTest, getNeighboringCids) {
serial = GetRandomSerialNumber();
radio->getNeighboringCids(serial);
@@ -258,7 +258,7 @@
/*
* Test IRadio.setLocationUpdates() for the response returned.
*/
-TEST_F(RadioHidlTest, setLocationUpdates) {
+TEST_P(RadioHidlTest, setLocationUpdates) {
serial = GetRandomSerialNumber();
radio->setLocationUpdates(serial, true);
@@ -275,7 +275,7 @@
/*
* Test IRadio.setCdmaRoamingPreference() for the response returned.
*/
-TEST_F(RadioHidlTest, setCdmaRoamingPreference) {
+TEST_P(RadioHidlTest, setCdmaRoamingPreference) {
serial = GetRandomSerialNumber();
radio->setCdmaRoamingPreference(serial, CdmaRoamingType::HOME_NETWORK);
@@ -293,7 +293,7 @@
/*
* Test IRadio.getCdmaRoamingPreference() for the response returned.
*/
-TEST_F(RadioHidlTest, getCdmaRoamingPreference) {
+TEST_P(RadioHidlTest, getCdmaRoamingPreference) {
serial = GetRandomSerialNumber();
radio->getCdmaRoamingPreference(serial);
@@ -312,7 +312,7 @@
/*
* Test IRadio.getTTYMode() for the response returned.
*/
-TEST_F(RadioHidlTest, getTTYMode) {
+TEST_P(RadioHidlTest, getTTYMode) {
serial = GetRandomSerialNumber();
radio->getTTYMode(serial);
@@ -328,7 +328,7 @@
/*
* Test IRadio.setTTYMode() for the response returned.
*/
-TEST_F(RadioHidlTest, setTTYMode) {
+TEST_P(RadioHidlTest, setTTYMode) {
serial = GetRandomSerialNumber();
radio->setTTYMode(serial, TtyMode::OFF);
@@ -344,7 +344,7 @@
/*
* Test IRadio.setPreferredVoicePrivacy() for the response returned.
*/
-TEST_F(RadioHidlTest, setPreferredVoicePrivacy) {
+TEST_P(RadioHidlTest, setPreferredVoicePrivacy) {
serial = GetRandomSerialNumber();
radio->setPreferredVoicePrivacy(serial, true);
@@ -361,7 +361,7 @@
/*
* Test IRadio.getPreferredVoicePrivacy() for the response returned.
*/
-TEST_F(RadioHidlTest, getPreferredVoicePrivacy) {
+TEST_P(RadioHidlTest, getPreferredVoicePrivacy) {
serial = GetRandomSerialNumber();
radio->getPreferredVoicePrivacy(serial);
@@ -378,7 +378,7 @@
/*
* Test IRadio.getCDMASubscription() for the response returned.
*/
-TEST_F(RadioHidlTest, getCDMASubscription) {
+TEST_P(RadioHidlTest, getCDMASubscription) {
serial = GetRandomSerialNumber();
radio->getCDMASubscription(serial);
@@ -396,7 +396,7 @@
/*
* Test IRadio.getDeviceIdentity() for the response returned.
*/
-TEST_F(RadioHidlTest, getDeviceIdentity) {
+TEST_P(RadioHidlTest, getDeviceIdentity) {
serial = GetRandomSerialNumber();
radio->getDeviceIdentity(serial);
@@ -413,7 +413,7 @@
/*
* Test IRadio.exitEmergencyCallbackMode() for the response returned.
*/
-TEST_F(RadioHidlTest, exitEmergencyCallbackMode) {
+TEST_P(RadioHidlTest, exitEmergencyCallbackMode) {
serial = GetRandomSerialNumber();
radio->exitEmergencyCallbackMode(serial);
@@ -431,7 +431,7 @@
/*
* Test IRadio.getCdmaSubscriptionSource() for the response returned.
*/
-TEST_F(RadioHidlTest, getCdmaSubscriptionSource) {
+TEST_P(RadioHidlTest, getCdmaSubscriptionSource) {
serial = GetRandomSerialNumber();
radio->getCdmaSubscriptionSource(serial);
@@ -449,7 +449,7 @@
/*
* Test IRadio.setCdmaSubscriptionSource() for the response returned.
*/
-TEST_F(RadioHidlTest, setCdmaSubscriptionSource) {
+TEST_P(RadioHidlTest, setCdmaSubscriptionSource) {
serial = GetRandomSerialNumber();
radio->setCdmaSubscriptionSource(serial, CdmaSubscriptionSource::RUIM_SIM);
@@ -468,7 +468,7 @@
/*
* Test IRadio.getVoiceRadioTechnology() for the response returned.
*/
-TEST_F(RadioHidlTest, getVoiceRadioTechnology) {
+TEST_P(RadioHidlTest, getVoiceRadioTechnology) {
serial = GetRandomSerialNumber();
radio->getVoiceRadioTechnology(serial);
@@ -484,7 +484,7 @@
/*
* Test IRadio.getCellInfoList() for the response returned.
*/
-TEST_F(RadioHidlTest, getCellInfoList) {
+TEST_P(RadioHidlTest, getCellInfoList) {
serial = GetRandomSerialNumber();
radio->getCellInfoList(serial);
@@ -502,7 +502,7 @@
/*
* Test IRadio.setCellInfoListRate() for the response returned.
*/
-TEST_F(RadioHidlTest, setCellInfoListRate) {
+TEST_P(RadioHidlTest, setCellInfoListRate) {
serial = GetRandomSerialNumber();
// TODO(sanketpadawe): RIL crashes with value of rate = 10
@@ -520,7 +520,7 @@
/*
* Test IRadio.nvReadItem() for the response returned.
*/
-TEST_F(RadioHidlTest, nvReadItem) {
+TEST_P(RadioHidlTest, nvReadItem) {
serial = GetRandomSerialNumber();
radio->nvReadItem(serial, NvItem::LTE_BAND_ENABLE_25);
@@ -537,7 +537,7 @@
/*
* Test IRadio.nvWriteItem() for the response returned.
*/
-TEST_F(RadioHidlTest, nvWriteItem) {
+TEST_P(RadioHidlTest, nvWriteItem) {
serial = GetRandomSerialNumber();
NvWriteItem item;
memset(&item, 0, sizeof(item));
@@ -557,7 +557,7 @@
/*
* Test IRadio.nvWriteCdmaPrl() for the response returned.
*/
-TEST_F(RadioHidlTest, nvWriteCdmaPrl) {
+TEST_P(RadioHidlTest, nvWriteCdmaPrl) {
serial = GetRandomSerialNumber();
std::vector<uint8_t> prl = {1, 2, 3, 4, 5};
@@ -575,7 +575,7 @@
/*
* Test IRadio.nvResetConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, nvResetConfig) {
+TEST_P(RadioHidlTest, nvResetConfig) {
serial = GetRandomSerialNumber();
radio->nvResetConfig(serial, ResetNvType::ERASE);
@@ -592,7 +592,7 @@
/*
* Test IRadio.setUiccSubscription() for the response returned.
*/
-TEST_F(RadioHidlTest, setUiccSubscription) {
+TEST_P(RadioHidlTest, setUiccSubscription) {
serial = GetRandomSerialNumber();
SelectUiccSub item;
memset(&item, 0, sizeof(item));
@@ -614,7 +614,7 @@
/*
* Test IRadio.getHardwareConfig() for the response returned.
*/
-TEST_F(RadioHidlTest, getHardwareConfig) {
+TEST_P(RadioHidlTest, getHardwareConfig) {
serial = GetRandomSerialNumber();
radio->getHardwareConfig(serial);
@@ -631,7 +631,7 @@
/*
* Test IRadio.requestShutdown() for the response returned.
*/
-TEST_F(RadioHidlTest, requestShutdown) {
+TEST_P(RadioHidlTest, requestShutdown) {
serial = GetRandomSerialNumber();
radio->requestShutdown(serial);
@@ -648,7 +648,7 @@
/*
* Test IRadio.getRadioCapability() for the response returned.
*/
-TEST_F(RadioHidlTest, getRadioCapability) {
+TEST_P(RadioHidlTest, getRadioCapability) {
serial = GetRandomSerialNumber();
radio->getRadioCapability(serial);
@@ -664,7 +664,7 @@
/*
* Test IRadio.setRadioCapability() for the response returned.
*/
-TEST_F(RadioHidlTest, setRadioCapability) {
+TEST_P(RadioHidlTest, setRadioCapability) {
serial = GetRandomSerialNumber();
RadioCapability rc;
memset(&rc, 0, sizeof(rc));
@@ -685,7 +685,7 @@
/*
* Test IRadio.startLceService() for the response returned.
*/
-TEST_F(RadioHidlTest, startLceService) {
+TEST_P(RadioHidlTest, startLceService) {
serial = GetRandomSerialNumber();
radio->startLceService(serial, 5, true);
@@ -704,7 +704,7 @@
/*
* Test IRadio.stopLceService() for the response returned.
*/
-TEST_F(RadioHidlTest, stopLceService) {
+TEST_P(RadioHidlTest, stopLceService) {
serial = GetRandomSerialNumber();
radio->stopLceService(serial);
@@ -722,7 +722,7 @@
/*
* Test IRadio.pullLceData() for the response returned.
*/
-TEST_F(RadioHidlTest, pullLceData) {
+TEST_P(RadioHidlTest, pullLceData) {
serial = GetRandomSerialNumber();
radio->pullLceData(serial);
@@ -741,7 +741,7 @@
/*
* Test IRadio.getModemActivityInfo() for the response returned.
*/
-TEST_F(RadioHidlTest, getModemActivityInfo) {
+TEST_P(RadioHidlTest, getModemActivityInfo) {
serial = GetRandomSerialNumber();
radio->getModemActivityInfo(serial);
@@ -758,7 +758,7 @@
/*
* Test IRadio.setAllowedCarriers() for the response returned.
*/
-TEST_F(RadioHidlTest, setAllowedCarriers) {
+TEST_P(RadioHidlTest, setAllowedCarriers) {
serial = GetRandomSerialNumber();
CarrierRestrictions carriers;
memset(&carriers, 0, sizeof(carriers));
@@ -835,7 +835,7 @@
/*
* Test IRadio.getAllowedCarriers() for the response returned.
*/
-TEST_F(RadioHidlTest, getAllowedCarriers) {
+TEST_P(RadioHidlTest, getAllowedCarriers) {
serial = GetRandomSerialNumber();
radio->getAllowedCarriers(serial);
@@ -852,7 +852,7 @@
/*
* Test IRadio.sendDeviceState() for the response returned.
*/
-TEST_F(RadioHidlTest, sendDeviceState) {
+TEST_P(RadioHidlTest, sendDeviceState) {
serial = GetRandomSerialNumber();
radio->sendDeviceState(serial, DeviceStateType::POWER_SAVE_MODE, true);
@@ -871,7 +871,7 @@
/*
* Test IRadio.setIndicationFilter() for the response returned.
*/
-TEST_F(RadioHidlTest, setIndicationFilter) {
+TEST_P(RadioHidlTest, setIndicationFilter) {
serial = GetRandomSerialNumber();
radio->setIndicationFilter(serial, 1);
@@ -890,7 +890,7 @@
/*
* Test IRadio.setSimCardPower() for the response returned.
*/
-TEST_F(RadioHidlTest, setSimCardPower) {
+TEST_P(RadioHidlTest, setSimCardPower) {
serial = GetRandomSerialNumber();
radio->setSimCardPower(serial, true);
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_sms.cpp b/radio/1.0/vts/functional/radio_hidl_hal_sms.cpp
index 9e41429..58c3bbd 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_sms.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_sms.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.sendSms() for the response returned.
*/
-TEST_F(RadioHidlTest, sendSms) {
+TEST_P(RadioHidlTest, sendSms) {
serial = GetRandomSerialNumber();
GsmSmsMessage msg;
msg.smscPdu = "";
@@ -45,7 +45,7 @@
/*
* Test IRadio.sendSMSExpectMore() for the response returned.
*/
-TEST_F(RadioHidlTest, sendSMSExpectMore) {
+TEST_P(RadioHidlTest, sendSMSExpectMore) {
serial = GetRandomSerialNumber();
GsmSmsMessage msg;
msg.smscPdu = "";
@@ -71,7 +71,7 @@
/*
* Test IRadio.acknowledgeLastIncomingGsmSms() for the response returned.
*/
-TEST_F(RadioHidlTest, acknowledgeLastIncomingGsmSms) {
+TEST_P(RadioHidlTest, acknowledgeLastIncomingGsmSms) {
serial = GetRandomSerialNumber();
bool success = true;
@@ -92,7 +92,7 @@
/*
* Test IRadio.acknowledgeIncomingGsmSmsWithPdu() for the response returned.
*/
-TEST_F(RadioHidlTest, acknowledgeIncomingGsmSmsWithPdu) {
+TEST_P(RadioHidlTest, acknowledgeIncomingGsmSmsWithPdu) {
serial = GetRandomSerialNumber();
bool success = true;
std::string ackPdu = "";
@@ -111,7 +111,7 @@
/*
* Test IRadio.sendCdmaSms() for the response returned.
*/
-TEST_F(RadioHidlTest, sendCdmaSms) {
+TEST_P(RadioHidlTest, sendCdmaSms) {
serial = GetRandomSerialNumber();
// Create a CdmaSmsAddress
@@ -155,7 +155,7 @@
/*
* Test IRadio.acknowledgeLastIncomingCdmaSms() for the response returned.
*/
-TEST_F(RadioHidlTest, acknowledgeLastIncomingCdmaSms) {
+TEST_P(RadioHidlTest, acknowledgeLastIncomingCdmaSms) {
serial = GetRandomSerialNumber();
// Create a CdmaSmsAck
@@ -179,7 +179,7 @@
/*
* Test IRadio.sendImsSms() for the response returned.
*/
-TEST_F(RadioHidlTest, sendImsSms) {
+TEST_P(RadioHidlTest, sendImsSms) {
serial = GetRandomSerialNumber();
// Create a CdmaSmsAddress
@@ -229,7 +229,7 @@
/*
* Test IRadio.getSmscAddress() for the response returned.
*/
-TEST_F(RadioHidlTest, getSmscAddress) {
+TEST_P(RadioHidlTest, getSmscAddress) {
serial = GetRandomSerialNumber();
radio->getSmscAddress(serial);
@@ -249,7 +249,7 @@
/*
* Test IRadio.setSmscAddress() for the response returned.
*/
-TEST_F(RadioHidlTest, setSmscAddress) {
+TEST_P(RadioHidlTest, setSmscAddress) {
serial = GetRandomSerialNumber();
hidl_string address = hidl_string("smscAddress");
@@ -270,7 +270,7 @@
/*
* Test IRadio.writeSmsToSim() for the response returned.
*/
-TEST_F(RadioHidlTest, writeSmsToSim) {
+TEST_P(RadioHidlTest, writeSmsToSim) {
serial = GetRandomSerialNumber();
SmsWriteArgs smsWriteArgs;
smsWriteArgs.status = SmsWriteArgsStatus::REC_UNREAD;
@@ -296,7 +296,7 @@
/*
* Test IRadio.deleteSmsOnSim() for the response returned.
*/
-TEST_F(RadioHidlTest, deleteSmsOnSim) {
+TEST_P(RadioHidlTest, deleteSmsOnSim) {
serial = GetRandomSerialNumber();
int index = 1;
@@ -319,7 +319,7 @@
/*
* Test IRadio.writeSmsToRuim() for the response returned.
*/
-TEST_F(RadioHidlTest, writeSmsToRuim) {
+TEST_P(RadioHidlTest, writeSmsToRuim) {
serial = GetRandomSerialNumber();
// Create a CdmaSmsAddress
@@ -370,7 +370,7 @@
/*
* Test IRadio.deleteSmsOnRuim() for the response returned.
*/
-TEST_F(RadioHidlTest, deleteSmsOnRuim) {
+TEST_P(RadioHidlTest, deleteSmsOnRuim) {
serial = GetRandomSerialNumber();
int index = 1;
@@ -421,7 +421,7 @@
/*
* Test IRadio.reportSmsMemoryStatus() for the response returned.
*/
-TEST_F(RadioHidlTest, reportSmsMemoryStatus) {
+TEST_P(RadioHidlTest, reportSmsMemoryStatus) {
serial = GetRandomSerialNumber();
bool available = true;
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_stk.cpp b/radio/1.0/vts/functional/radio_hidl_hal_stk.cpp
index a3b5029..1170111 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_stk.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_stk.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.sendEnvelope() for the response returned.
*/
-TEST_F(RadioHidlTest, sendEnvelope) {
+TEST_P(RadioHidlTest, sendEnvelope) {
serial = GetRandomSerialNumber();
// Test with sending empty string
@@ -44,7 +44,7 @@
/*
* Test IRadio.sendTerminalResponseToSim() for the response returned.
*/
-TEST_F(RadioHidlTest, sendTerminalResponseToSim) {
+TEST_P(RadioHidlTest, sendTerminalResponseToSim) {
serial = GetRandomSerialNumber();
// Test with sending empty string
@@ -67,7 +67,7 @@
/*
* Test IRadio.handleStkCallSetupRequestFromSim() for the response returned.
*/
-TEST_F(RadioHidlTest, handleStkCallSetupRequestFromSim) {
+TEST_P(RadioHidlTest, handleStkCallSetupRequestFromSim) {
serial = GetRandomSerialNumber();
bool accept = false;
@@ -88,7 +88,7 @@
/*
* Test IRadio.reportStkServiceIsRunning() for the response returned.
*/
-TEST_F(RadioHidlTest, reportStkServiceIsRunning) {
+TEST_P(RadioHidlTest, reportStkServiceIsRunning) {
serial = GetRandomSerialNumber();
radio->reportStkServiceIsRunning(serial);
@@ -107,7 +107,7 @@
* Test IRadio.sendEnvelopeWithStatus() for the response returned with empty
* string.
*/
-TEST_F(RadioHidlTest, sendEnvelopeWithStatus) {
+TEST_P(RadioHidlTest, sendEnvelopeWithStatus) {
serial = GetRandomSerialNumber();
// Test with sending empty string
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_test.cpp b/radio/1.0/vts/functional/radio_hidl_hal_test.cpp
index 96719d6..3c833c0 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_test.cpp
@@ -17,13 +17,10 @@
#include <radio_hidl_hal_utils_v1_0.h>
void RadioHidlTest::SetUp() {
- radio = ::testing::VtsHalHidlTargetTestBase::getService<IRadio>(
- RadioHidlEnvironment::Instance()->getServiceName<IRadio>(hidl_string(RADIO_SERVICE_NAME)));
+ radio = IRadio::getService(GetParam());
if (radio == NULL) {
sleep(60);
- radio = ::testing::VtsHalHidlTargetTestBase::getService<IRadio>(
- RadioHidlEnvironment::Instance()->getServiceName<IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio = IRadio::getService(GetParam());
}
ASSERT_NE(nullptr, radio.get());
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_utils_v1_0.h b/radio/1.0/vts/functional/radio_hidl_hal_utils_v1_0.h
index 23bc434..8a551f7 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_utils_v1_0.h
+++ b/radio/1.0/vts/functional/radio_hidl_hal_utils_v1_0.h
@@ -16,8 +16,6 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -26,6 +24,7 @@
#include <android/hardware/radio/1.0/IRadioIndication.h>
#include <android/hardware/radio/1.0/IRadioResponse.h>
#include <android/hardware/radio/1.0/types.h>
+#include <gtest/gtest.h>
#include "vts_test_util.h"
@@ -515,23 +514,9 @@
const ::android::hardware::hidl_string& reason);
};
-// Test environment for Radio HIDL HAL.
-class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioHidlEnvironment* Instance() {
- static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override { registerTestService<IRadio>(); }
-
- private:
- RadioHidlEnvironment() {}
-};
-
// The main test class for Radio HIDL.
-class RadioHidlTest : public ::testing::VtsHalHidlTargetTestBase {
- protected:
+class RadioHidlTest : public ::testing::TestWithParam<std::string> {
+ protected:
std::mutex mtx;
std::condition_variable cv;
int count;
diff --git a/radio/1.0/vts/functional/radio_hidl_hal_voice.cpp b/radio/1.0/vts/functional/radio_hidl_hal_voice.cpp
index 1fce470..a192a33 100644
--- a/radio/1.0/vts/functional/radio_hidl_hal_voice.cpp
+++ b/radio/1.0/vts/functional/radio_hidl_hal_voice.cpp
@@ -19,7 +19,7 @@
/*
* Test IRadio.getCurrentCalls() for the response returned.
*/
-TEST_F(RadioHidlTest, getCurrentCalls) {
+TEST_P(RadioHidlTest, getCurrentCalls) {
serial = GetRandomSerialNumber();
radio->getCurrentCalls(serial);
@@ -35,7 +35,7 @@
/*
* Test IRadio.dial() for the response returned.
*/
-TEST_F(RadioHidlTest, dial) {
+TEST_P(RadioHidlTest, dial) {
serial = GetRandomSerialNumber();
Dial dialInfo;
@@ -62,7 +62,7 @@
/*
* Test IRadio.hangup() for the response returned.
*/
-TEST_F(RadioHidlTest, hangup) {
+TEST_P(RadioHidlTest, hangup) {
serial = GetRandomSerialNumber();
radio->hangup(serial, 1);
@@ -81,7 +81,7 @@
/*
* Test IRadio.hangupWaitingOrBackground() for the response returned.
*/
-TEST_F(RadioHidlTest, hangupWaitingOrBackground) {
+TEST_P(RadioHidlTest, hangupWaitingOrBackground) {
serial = GetRandomSerialNumber();
radio->hangupWaitingOrBackground(serial);
@@ -99,7 +99,7 @@
/*
* Test IRadio.hangupForegroundResumeBackground() for the response returned.
*/
-TEST_F(RadioHidlTest, hangupForegroundResumeBackground) {
+TEST_P(RadioHidlTest, hangupForegroundResumeBackground) {
serial = GetRandomSerialNumber();
radio->hangupForegroundResumeBackground(serial);
@@ -117,7 +117,7 @@
/*
* Test IRadio.switchWaitingOrHoldingAndActive() for the response returned.
*/
-TEST_F(RadioHidlTest, switchWaitingOrHoldingAndActive) {
+TEST_P(RadioHidlTest, switchWaitingOrHoldingAndActive) {
serial = GetRandomSerialNumber();
radio->switchWaitingOrHoldingAndActive(serial);
@@ -135,7 +135,7 @@
/*
* Test IRadio.conference() for the response returned.
*/
-TEST_F(RadioHidlTest, conference) {
+TEST_P(RadioHidlTest, conference) {
serial = GetRandomSerialNumber();
radio->conference(serial);
@@ -153,7 +153,7 @@
/*
* Test IRadio.rejectCall() for the response returned.
*/
-TEST_F(RadioHidlTest, rejectCall) {
+TEST_P(RadioHidlTest, rejectCall) {
serial = GetRandomSerialNumber();
radio->rejectCall(serial);
@@ -171,7 +171,7 @@
/*
* Test IRadio.getLastCallFailCause() for the response returned.
*/
-TEST_F(RadioHidlTest, getLastCallFailCause) {
+TEST_P(RadioHidlTest, getLastCallFailCause) {
serial = GetRandomSerialNumber();
radio->getLastCallFailCause(serial);
@@ -188,7 +188,7 @@
/*
* Test IRadio.sendUssd() for the response returned.
*/
-TEST_F(RadioHidlTest, sendUssd) {
+TEST_P(RadioHidlTest, sendUssd) {
serial = GetRandomSerialNumber();
radio->sendUssd(serial, hidl_string("test"));
EXPECT_EQ(std::cv_status::no_timeout, wait());
@@ -206,7 +206,7 @@
/*
* Test IRadio.cancelPendingUssd() for the response returned.
*/
-TEST_F(RadioHidlTest, cancelPendingUssd) {
+TEST_P(RadioHidlTest, cancelPendingUssd) {
serial = GetRandomSerialNumber();
radio->cancelPendingUssd(serial);
@@ -225,7 +225,7 @@
/*
* Test IRadio.getCallForwardStatus() for the response returned.
*/
-TEST_F(RadioHidlTest, getCallForwardStatus) {
+TEST_P(RadioHidlTest, getCallForwardStatus) {
serial = GetRandomSerialNumber();
CallForwardInfo callInfo;
memset(&callInfo, 0, sizeof(callInfo));
@@ -247,7 +247,7 @@
/*
* Test IRadio.setCallForward() for the response returned.
*/
-TEST_F(RadioHidlTest, setCallForward) {
+TEST_P(RadioHidlTest, setCallForward) {
serial = GetRandomSerialNumber();
CallForwardInfo callInfo;
memset(&callInfo, 0, sizeof(callInfo));
@@ -269,7 +269,7 @@
/*
* Test IRadio.getCallWaiting() for the response returned.
*/
-TEST_F(RadioHidlTest, getCallWaiting) {
+TEST_P(RadioHidlTest, getCallWaiting) {
serial = GetRandomSerialNumber();
radio->getCallWaiting(serial, 1);
@@ -288,7 +288,7 @@
/*
* Test IRadio.setCallWaiting() for the response returned.
*/
-TEST_F(RadioHidlTest, setCallWaiting) {
+TEST_P(RadioHidlTest, setCallWaiting) {
serial = GetRandomSerialNumber();
radio->setCallWaiting(serial, true, 1);
@@ -307,7 +307,7 @@
/*
* Test IRadio.acceptCall() for the response returned.
*/
-TEST_F(RadioHidlTest, acceptCall) {
+TEST_P(RadioHidlTest, acceptCall) {
serial = GetRandomSerialNumber();
radio->acceptCall(serial);
@@ -325,7 +325,7 @@
/*
* Test IRadio.separateConnection() for the response returned.
*/
-TEST_F(RadioHidlTest, separateConnection) {
+TEST_P(RadioHidlTest, separateConnection) {
serial = GetRandomSerialNumber();
radio->separateConnection(serial, 1);
@@ -344,7 +344,7 @@
/*
* Test IRadio.explicitCallTransfer() for the response returned.
*/
-TEST_F(RadioHidlTest, explicitCallTransfer) {
+TEST_P(RadioHidlTest, explicitCallTransfer) {
serial = GetRandomSerialNumber();
radio->explicitCallTransfer(serial);
@@ -362,7 +362,7 @@
/*
* Test IRadio.sendCDMAFeatureCode() for the response returned.
*/
-TEST_F(RadioHidlTest, sendCDMAFeatureCode) {
+TEST_P(RadioHidlTest, sendCDMAFeatureCode) {
serial = GetRandomSerialNumber();
radio->sendCDMAFeatureCode(serial, hidl_string());
@@ -382,7 +382,7 @@
/*
* Test IRadio.sendDtmf() for the response returned.
*/
-TEST_F(RadioHidlTest, sendDtmf) {
+TEST_P(RadioHidlTest, sendDtmf) {
serial = GetRandomSerialNumber();
radio->sendDtmf(serial, "1");
@@ -402,7 +402,7 @@
/*
* Test IRadio.startDtmf() for the response returned.
*/
-TEST_F(RadioHidlTest, startDtmf) {
+TEST_P(RadioHidlTest, startDtmf) {
serial = GetRandomSerialNumber();
radio->startDtmf(serial, "1");
@@ -422,7 +422,7 @@
/*
* Test IRadio.stopDtmf() for the response returned.
*/
-TEST_F(RadioHidlTest, stopDtmf) {
+TEST_P(RadioHidlTest, stopDtmf) {
serial = GetRandomSerialNumber();
radio->stopDtmf(serial);
@@ -441,7 +441,7 @@
/*
* Test IRadio.setMute() for the response returned.
*/
-TEST_F(RadioHidlTest, setMute) {
+TEST_P(RadioHidlTest, setMute) {
serial = GetRandomSerialNumber();
radio->setMute(serial, true);
@@ -459,7 +459,7 @@
/*
* Test IRadio.getMute() for the response returned.
*/
-TEST_F(RadioHidlTest, getMute) {
+TEST_P(RadioHidlTest, getMute) {
serial = GetRandomSerialNumber();
radio->getMute(serial);
@@ -475,7 +475,7 @@
/*
* Test IRadio.sendBurstDtmf() for the response returned.
*/
-TEST_F(RadioHidlTest, sendBurstDtmf) {
+TEST_P(RadioHidlTest, sendBurstDtmf) {
serial = GetRandomSerialNumber();
radio->sendBurstDtmf(serial, "1", 0, 0);
diff --git a/radio/1.0/vts/functional/sap_hidl_hal_api.cpp b/radio/1.0/vts/functional/sap_hidl_hal_api.cpp
index 1d79ff6..6bd2c88 100644
--- a/radio/1.0/vts/functional/sap_hidl_hal_api.cpp
+++ b/radio/1.0/vts/functional/sap_hidl_hal_api.cpp
@@ -19,7 +19,7 @@
/*
* Test ISap.connectReq() for the response returned.
*/
-TEST_F(SapHidlTest, connectReq) {
+TEST_P(SapHidlTest, connectReq) {
token = GetRandomSerialNumber();
int32_t maxMsgSize = 100;
@@ -35,7 +35,7 @@
/*
* Test IRadio.disconnectReq() for the response returned
*/
-TEST_F(SapHidlTest, disconnectReq) {
+TEST_P(SapHidlTest, disconnectReq) {
token = GetRandomSerialNumber();
sap->disconnectReq(token);
@@ -46,7 +46,7 @@
/*
* Test IRadio.apduReq() for the response returned.
*/
-TEST_F(SapHidlTest, apduReq) {
+TEST_P(SapHidlTest, apduReq) {
token = GetRandomSerialNumber();
SapApduType sapApduType = SapApduType::APDU;
android::hardware::hidl_vec<uint8_t> command = {};
@@ -64,7 +64,7 @@
/*
* Test IRadio.transferAtrReq() for the response returned.
*/
-TEST_F(SapHidlTest, transferAtrReq) {
+TEST_P(SapHidlTest, transferAtrReq) {
token = GetRandomSerialNumber();
sap->transferAtrReq(token);
@@ -80,7 +80,7 @@
/*
* Test IRadio.powerReq() for the response returned.
*/
-TEST_F(SapHidlTest, powerReq) {
+TEST_P(SapHidlTest, powerReq) {
token = GetRandomSerialNumber();
bool state = true;
@@ -97,7 +97,7 @@
/*
* Test IRadio.resetSimReq() for the response returned.
*/
-TEST_F(SapHidlTest, resetSimReq) {
+TEST_P(SapHidlTest, resetSimReq) {
token = GetRandomSerialNumber();
sap->resetSimReq(token);
@@ -113,7 +113,7 @@
/*
* Test IRadio.transferCardReaderStatusReq() for the response returned.
*/
-TEST_F(SapHidlTest, transferCardReaderStatusReq) {
+TEST_P(SapHidlTest, transferCardReaderStatusReq) {
token = GetRandomSerialNumber();
sap->transferCardReaderStatusReq(token);
@@ -127,7 +127,7 @@
/*
* Test IRadio.setTransferProtocolReq() for the response returned.
*/
-TEST_F(SapHidlTest, setTransferProtocolReq) {
+TEST_P(SapHidlTest, setTransferProtocolReq) {
token = GetRandomSerialNumber();
SapTransferProtocol sapTransferProtocol = SapTransferProtocol::T0;
diff --git a/radio/1.0/vts/functional/sap_hidl_hal_test.cpp b/radio/1.0/vts/functional/sap_hidl_hal_test.cpp
index 65b344b..fe10587 100644
--- a/radio/1.0/vts/functional/sap_hidl_hal_test.cpp
+++ b/radio/1.0/vts/functional/sap_hidl_hal_test.cpp
@@ -17,8 +17,7 @@
#include <sap_hidl_hal_utils.h>
void SapHidlTest::SetUp() {
- sap = ::testing::VtsHalHidlTargetTestBase::getService<ISap>(
- SapHidlEnvironment::Instance()->getServiceName<ISap>(hidl_string(SAP_SERVICE_NAME)));
+ sap = ISap::getService(GetParam());
ASSERT_NE(sap, nullptr);
sapCb = new SapCallback(*this);
diff --git a/radio/1.0/vts/functional/sap_hidl_hal_utils.h b/radio/1.0/vts/functional/sap_hidl_hal_utils.h
index f432932..2fc9ae3 100644
--- a/radio/1.0/vts/functional/sap_hidl_hal_utils.h
+++ b/radio/1.0/vts/functional/sap_hidl_hal_utils.h
@@ -16,8 +16,6 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -25,6 +23,7 @@
#include <android/hardware/radio/1.0/ISap.h>
#include <android/hardware/radio/1.0/ISapCallback.h>
#include <android/hardware/radio/1.0/types.h>
+#include <gtest/gtest.h>
#include "vts_test_util.h"
@@ -80,23 +79,9 @@
Return<void> transferProtocolResponse(int32_t token, SapResultCode resultCode);
};
-// Test environment for Sap HIDL HAL.
-class SapHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static SapHidlEnvironment* Instance() {
- static SapHidlEnvironment* instance = new SapHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override { registerTestService<ISap>(); }
-
- private:
- SapHidlEnvironment() {}
-};
-
// The main test class for Sap HIDL.
-class SapHidlTest : public ::testing::VtsHalHidlTargetTestBase {
- private:
+class SapHidlTest : public ::testing::TestWithParam<std::string> {
+ private:
std::mutex mtx;
std::condition_variable cv;
int count;
diff --git a/radio/1.0/vts/functional/vts_hal_radio_target_test.xml b/radio/1.0/vts/functional/vts_hal_radio_target_test.xml
new file mode 100644
index 0000000..5e4a1cd
--- /dev/null
+++ b/radio/1.0/vts/functional/vts_hal_radio_target_test.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalRadioV1_0TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalRadioV1_0TargetTest->/data/local/tmp/VtsHalRadioV1_0TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalRadioV1_0TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.0/vts/functional/vts_hal_sap_target_test.xml b/radio/1.0/vts/functional/vts_hal_sap_target_test.xml
new file mode 100644
index 0000000..457d700
--- /dev/null
+++ b/radio/1.0/vts/functional/vts_hal_sap_target_test.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalSapV1_0TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalSapV1_0TargetTest->/data/local/tmp/VtsHalSapV1_0TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalSapV1_0TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.0/vts/functional/vts_test_util.h b/radio/1.0/vts/functional/vts_test_util.h
index 826f0de..05b47c9 100644
--- a/radio/1.0/vts/functional/vts_test_util.h
+++ b/radio/1.0/vts/functional/vts_test_util.h
@@ -16,9 +16,8 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-
#include <android/hardware/radio/1.0/types.h>
+#include <gtest/gtest.h>
using ::android::hardware::radio::V1_0::RadioError;
using ::android::hardware::radio::V1_0::SapResultCode;
diff --git a/radio/1.1/vts/functional/Android.bp b/radio/1.1/vts/functional/Android.bp
index 5695c6b..58aa67e 100644
--- a/radio/1.1/vts/functional/Android.bp
+++ b/radio/1.1/vts/functional/Android.bp
@@ -30,5 +30,5 @@
header_libs: [
"radio.util.header@1.0",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/radio/1.1/vts/functional/AndroidTest.xml b/radio/1.1/vts/functional/AndroidTest.xml
new file mode 100644
index 0000000..5badadd
--- /dev/null
+++ b/radio/1.1/vts/functional/AndroidTest.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalRadioV1_1TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalRadioV1_1TargetTest->/data/local/tmp/VtsHalRadioV1_1TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalRadioV1_1TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.1/vts/functional/VtsHalRadioV1_1TargetTest.cpp b/radio/1.1/vts/functional/VtsHalRadioV1_1TargetTest.cpp
index 83564ee..98dbf62 100644
--- a/radio/1.1/vts/functional/VtsHalRadioV1_1TargetTest.cpp
+++ b/radio/1.1/vts/functional/VtsHalRadioV1_1TargetTest.cpp
@@ -14,13 +14,12 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <radio_hidl_hal_utils_v1_1.h>
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(RadioHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- RadioHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(PerInstance, RadioHidlTest_v1_1,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_1::IRadio::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/radio/1.1/vts/functional/radio_hidl_hal_api.cpp b/radio/1.1/vts/functional/radio_hidl_hal_api.cpp
index 33347c5..75abbbf 100644
--- a/radio/1.1/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.1/vts/functional/radio_hidl_hal_api.cpp
@@ -20,7 +20,7 @@
/*
* Test IRadio.setSimCardPower() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, setSimCardPower_1_1) {
+TEST_P(RadioHidlTest_v1_1, setSimCardPower_1_1) {
/* Record the sim card state for the testing environment */
CardState cardStateForTest = cardStatus.cardState;
@@ -81,7 +81,7 @@
/*
* Test IRadio.startNetworkScan() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, startNetworkScan) {
+TEST_P(RadioHidlTest_v1_1, startNetworkScan) {
serial = GetRandomSerialNumber();
NetworkScanRequest request;
@@ -115,7 +115,7 @@
/*
* Test IRadio.startNetworkScan() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, startNetworkScan_InvalidArgument) {
+TEST_P(RadioHidlTest_v1_1, startNetworkScan_InvalidArgument) {
serial = GetRandomSerialNumber();
NetworkScanRequest request;
@@ -139,7 +139,7 @@
/*
* Test IRadio.stopNetworkScan() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, stopNetworkScan) {
+TEST_P(RadioHidlTest_v1_1, stopNetworkScan) {
serial = GetRandomSerialNumber();
radio_v1_1->stopNetworkScan(serial);
@@ -158,7 +158,7 @@
/*
* Test IRadio.setCarrierInfoForImsiEncryption() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, setCarrierInfoForImsiEncryption) {
+TEST_P(RadioHidlTest_v1_1, setCarrierInfoForImsiEncryption) {
serial = GetRandomSerialNumber();
ImsiEncryptionInfo imsiInfo;
imsiInfo.mcc = "310";
@@ -181,7 +181,7 @@
/*
* Test IRadio.startKeepalive() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, startKeepalive) {
+TEST_P(RadioHidlTest_v1_1, startKeepalive) {
std::vector<KeepaliveRequest> requests = {
{
// Invalid IPv4 source address
@@ -279,7 +279,7 @@
/*
* Test IRadio.stopKeepalive() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_1, stopKeepalive) {
+TEST_P(RadioHidlTest_v1_1, stopKeepalive) {
serial = GetRandomSerialNumber();
radio_v1_1->stopKeepalive(serial, 0xBAD);
diff --git a/radio/1.1/vts/functional/radio_hidl_hal_test.cpp b/radio/1.1/vts/functional/radio_hidl_hal_test.cpp
index 2f657b4..020168e 100644
--- a/radio/1.1/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.1/vts/functional/radio_hidl_hal_test.cpp
@@ -17,18 +17,10 @@
#include <radio_hidl_hal_utils_v1_1.h>
void RadioHidlTest_v1_1::SetUp() {
- radio_v1_1 =
- ::testing::VtsHalHidlTargetTestBase::getService<::android::hardware::radio::V1_1::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_1::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_1 = ::android::hardware::radio::V1_1::IRadio::getService(GetParam());
if (radio_v1_1 == NULL) {
sleep(60);
- radio_v1_1 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_1::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_1::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_1 = ::android::hardware::radio::V1_1::IRadio::getService(GetParam());
}
ASSERT_NE(nullptr, radio_v1_1.get());
diff --git a/radio/1.1/vts/functional/radio_hidl_hal_utils_v1_1.h b/radio/1.1/vts/functional/radio_hidl_hal_utils_v1_1.h
index 925f4fc..b81ee13 100644
--- a/radio/1.1/vts/functional/radio_hidl_hal_utils_v1_1.h
+++ b/radio/1.1/vts/functional/radio_hidl_hal_utils_v1_1.h
@@ -16,8 +16,7 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <log/log.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -26,6 +25,7 @@
#include <android/hardware/radio/1.1/IRadioIndication.h>
#include <android/hardware/radio/1.1/IRadioResponse.h>
#include <android/hardware/radio/1.1/types.h>
+#include <gtest/gtest.h>
#include "vts_test_util.h"
@@ -535,25 +535,9 @@
const ::android::hardware::hidl_string& reason);
};
-// Test environment for Radio HIDL HAL.
-class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioHidlEnvironment* Instance() {
- static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override {
- registerTestService<::android::hardware::radio::V1_1::IRadio>();
- }
-
- private:
- RadioHidlEnvironment() {}
-};
-
// The main test class for Radio HIDL.
-class RadioHidlTest_v1_1 : public ::testing::VtsHalHidlTargetTestBase {
- protected:
+class RadioHidlTest_v1_1 : public ::testing::TestWithParam<std::string> {
+ protected:
std::mutex mtx;
std::condition_variable cv;
int count;
diff --git a/radio/1.2/vts/functional/Android.bp b/radio/1.2/vts/functional/Android.bp
index c5838a8..f7189a8 100644
--- a/radio/1.2/vts/functional/Android.bp
+++ b/radio/1.2/vts/functional/Android.bp
@@ -34,5 +34,5 @@
"android.hardware.radio.config@1.1",
],
header_libs: ["radio.util.header@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/radio/1.2/vts/functional/AndroidTest.xml b/radio/1.2/vts/functional/AndroidTest.xml
new file mode 100644
index 0000000..5d92248
--- /dev/null
+++ b/radio/1.2/vts/functional/AndroidTest.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalRadioV1_2TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalRadioV1_2TargetTest->/data/local/tmp/VtsHalRadioV1_2TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalRadioV1_2TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.2/vts/functional/VtsHalRadioV1_2TargetTest.cpp b/radio/1.2/vts/functional/VtsHalRadioV1_2TargetTest.cpp
index c1a2f3d..400e394 100644
--- a/radio/1.2/vts/functional/VtsHalRadioV1_2TargetTest.cpp
+++ b/radio/1.2/vts/functional/VtsHalRadioV1_2TargetTest.cpp
@@ -14,13 +14,12 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <radio_hidl_hal_utils_v1_2.h>
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(RadioHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- RadioHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(PerInstance, RadioHidlTest_v1_2,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_2::IRadio::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index a98f22a..7464307 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -31,7 +31,7 @@
/*
* Test IRadio.startNetworkScan() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan) {
serial = GetRandomSerialNumber();
if (radioConfig != NULL && DDS_LOGICAL_SLOT_INDEX != logicalSlotId) {
@@ -82,7 +82,7 @@
/*
* Test IRadio.startNetworkScan() with invalid specifier.
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidArgument) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {.type = ScanType::ONE_SHOT,
@@ -109,7 +109,7 @@
/*
* Test IRadio.startNetworkScan() with invalid interval (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval1) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -141,7 +141,7 @@
/*
* Test IRadio.startNetworkScan() with invalid interval (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidInterval2) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -173,7 +173,7 @@
/*
* Test IRadio.startNetworkScan() with invalid max search time (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime1) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -205,7 +205,7 @@
/*
* Test IRadio.startNetworkScan() with invalid max search time (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidMaxSearchTime2) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -237,7 +237,7 @@
/*
* Test IRadio.startNetworkScan() with invalid periodicity (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity1) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -269,7 +269,7 @@
/*
* Test IRadio.startNetworkScan() with invalid periodicity (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_InvalidPeriodicity2) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -301,7 +301,7 @@
/*
* Test IRadio.startNetworkScan() with valid periodicity
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_GoodRequest1) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -335,7 +335,7 @@
/*
* Test IRadio.startNetworkScan() with valid periodicity and plmns
*/
-TEST_F(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
+TEST_P(RadioHidlTest_v1_2, startNetworkScan_GoodRequest2) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {
@@ -370,7 +370,7 @@
/*
* Test IRadio.setIndicationFilter_1_2()
*/
-TEST_F(RadioHidlTest_v1_2, setIndicationFilter_1_2) {
+TEST_P(RadioHidlTest_v1_2, setIndicationFilter_1_2) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setIndicationFilter_1_2(
@@ -388,7 +388,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() with invalid hysteresisDb
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_invalidHysteresisDb) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_invalidHysteresisDb) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -408,7 +408,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() with empty parameters
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_EmptyParams) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_EmptyParams) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -426,7 +426,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() for GERAN
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Geran) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Geran) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -445,7 +445,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() for UTRAN
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Utran) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Utran) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -464,7 +464,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() for EUTRAN
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Eutran) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Eutran) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -483,7 +483,7 @@
/*
* Test IRadio.setSignalStrengthReportingCriteria() for CDMA2000
*/
-TEST_F(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Cdma2000) {
+TEST_P(RadioHidlTest_v1_2, setSignalStrengthReportingCriteria_Cdma2000) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setSignalStrengthReportingCriteria(
@@ -502,7 +502,7 @@
/*
* Test IRadio.setLinkCapacityReportingCriteria() invalid hysteresisDlKbps
*/
-TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisDlKbps) {
+TEST_P(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisDlKbps) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
@@ -527,7 +527,7 @@
/*
* Test IRadio.setLinkCapacityReportingCriteria() invalid hysteresisUlKbps
*/
-TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisUlKbps) {
+TEST_P(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_invalidHysteresisUlKbps) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
@@ -552,7 +552,7 @@
/*
* Test IRadio.setLinkCapacityReportingCriteria() empty params
*/
-TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_emptyParams) {
+TEST_P(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_emptyParams) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
@@ -573,7 +573,7 @@
/*
* Test IRadio.setLinkCapacityReportingCriteria() GERAN
*/
-TEST_F(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_Geran) {
+TEST_P(RadioHidlTest_v1_2, setLinkCapacityReportingCriteria_Geran) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->setLinkCapacityReportingCriteria(
@@ -595,7 +595,7 @@
/*
* Test IRadio.setupDataCall_1_2() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, setupDataCall_1_2) {
+TEST_P(RadioHidlTest_v1_2, setupDataCall_1_2) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::AccessNetwork accessNetwork =
@@ -655,7 +655,7 @@
/*
* Test IRadio.deactivateDataCall_1_2() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, deactivateDataCall_1_2) {
+TEST_P(RadioHidlTest_v1_2, deactivateDataCall_1_2) {
serial = GetRandomSerialNumber();
int cid = 1;
::android::hardware::radio::V1_2::DataRequestReason reason =
@@ -686,7 +686,7 @@
/*
* Test IRadio.getCellInfoList() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, getCellInfoList_1_2) {
+TEST_P(RadioHidlTest_v1_2, getCellInfoList_1_2) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->getCellInfoList(serial);
@@ -704,7 +704,7 @@
/*
* Test IRadio.getVoiceRegistrationState() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, getVoiceRegistrationState) {
+TEST_P(RadioHidlTest_v1_2, getVoiceRegistrationState) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->getVoiceRegistrationState(serial);
@@ -722,7 +722,7 @@
/*
* Test IRadio.getDataRegistrationState() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, getDataRegistrationState) {
+TEST_P(RadioHidlTest_v1_2, getDataRegistrationState) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->getDataRegistrationState(serial);
@@ -797,7 +797,7 @@
/*
* Test IRadio.getAvailableBandModes() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_2, getAvailableBandModes) {
+TEST_P(RadioHidlTest_v1_2, getAvailableBandModes) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_2->getAvailableBandModes(serial);
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
index 21caddb..4845c7f 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_test.cpp
@@ -17,18 +17,10 @@
#include <radio_hidl_hal_utils_v1_2.h>
void RadioHidlTest_v1_2::SetUp() {
- radio_v1_2 =
- ::testing::VtsHalHidlTargetTestBase::getService<::android::hardware::radio::V1_2::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_2 = ::android::hardware::radio::V1_2::IRadio::getService(GetParam());
if (radio_v1_2 == NULL) {
sleep(60);
- radio_v1_2 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_2::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_2::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_2 = ::android::hardware::radio::V1_2::IRadio::getService(GetParam());
}
ASSERT_NE(nullptr, radio_v1_2.get());
@@ -51,8 +43,7 @@
/* Enforce Vts Testing with Sim Status Present only. */
EXPECT_EQ(CardState::PRESENT, cardStatus.base.cardState);
- radioConfig = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::config::V1_1::IRadioConfig>();
+ radioConfig = ::android::hardware::radio::config::V1_1::IRadioConfig::getService();
/* Enforce Vts tesing with RadioConfig for network scan excemption. */
// Some devices can only perform network scan on logical modem that currently used for packet
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
index 2db1cac..479340c 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
+++ b/radio/1.2/vts/functional/radio_hidl_hal_utils_v1_2.h
@@ -16,8 +16,7 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <log/log.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -30,6 +29,7 @@
#include <android/hardware/radio/1.2/IRadioIndication.h>
#include <android/hardware/radio/1.2/IRadioResponse.h>
#include <android/hardware/radio/1.2/types.h>
+#include <gtest/gtest.h>
#include "vts_test_util.h"
@@ -631,25 +631,9 @@
const ::android::hardware::hidl_string& reason);
};
-// Test environment for Radio HIDL HAL.
-class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioHidlEnvironment* Instance() {
- static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override {
- registerTestService<::android::hardware::radio::V1_2::IRadio>();
- }
-
- private:
- RadioHidlEnvironment() {}
-};
-
// The main test class for Radio HIDL.
-class RadioHidlTest_v1_2 : public ::testing::VtsHalHidlTargetTestBase {
- protected:
+class RadioHidlTest_v1_2 : public ::testing::TestWithParam<std::string> {
+ protected:
std::mutex mtx_;
std::condition_variable cv_;
int count_;
diff --git a/radio/1.3/vts/functional/Android.bp b/radio/1.3/vts/functional/Android.bp
index 67aff6e..2301732 100644
--- a/radio/1.3/vts/functional/Android.bp
+++ b/radio/1.3/vts/functional/Android.bp
@@ -32,5 +32,5 @@
"android.hardware.radio@1.0",
],
header_libs: ["radio.util.header@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/radio/1.3/vts/functional/AndroidTest.xml b/radio/1.3/vts/functional/AndroidTest.xml
new file mode 100644
index 0000000..c910047
--- /dev/null
+++ b/radio/1.3/vts/functional/AndroidTest.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalRadioV1_3TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalRadioV1_3TargetTest->/data/local/tmp/VtsHalRadioV1_3TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalRadioV1_3TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.3/vts/functional/VtsHalRadioV1_3TargetTest.cpp b/radio/1.3/vts/functional/VtsHalRadioV1_3TargetTest.cpp
index 7d2623e..2622bbc 100644
--- a/radio/1.3/vts/functional/VtsHalRadioV1_3TargetTest.cpp
+++ b/radio/1.3/vts/functional/VtsHalRadioV1_3TargetTest.cpp
@@ -14,13 +14,12 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <radio_hidl_hal_utils_v1_3.h>
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(RadioHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- RadioHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(PerInstance, RadioHidlTest_v1_3,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_3::IRadio::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
index 813dd13..4e48141 100644
--- a/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.3/vts/functional/radio_hidl_hal_api.cpp
@@ -22,7 +22,7 @@
/*
* Test IRadio.enableMddem() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_3, enableModem) {
+TEST_P(RadioHidlTest_v1_3, enableModem) {
serial = GetRandomSerialNumber();
bool responseToggle = radioRsp_v1_3->enableModemResponseToggle;
@@ -61,7 +61,7 @@
/*
* Test IRadio.getModemStackStatus() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_3, getModemStackStatus) {
+TEST_P(RadioHidlTest_v1_3, getModemStackStatus) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_3->getModemStackStatus(serial);
@@ -81,7 +81,7 @@
*
* This test is excluded from manifest, due to non-implementation in Q. Tracked by b/130254624.
*/
-TEST_F(RadioHidlTest_v1_3, setSystemSelectionChannels) {
+TEST_P(RadioHidlTest_v1_3, setSystemSelectionChannels) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
diff --git a/radio/1.3/vts/functional/radio_hidl_hal_test.cpp b/radio/1.3/vts/functional/radio_hidl_hal_test.cpp
index a876b1a..4581350 100644
--- a/radio/1.3/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.3/vts/functional/radio_hidl_hal_test.cpp
@@ -17,18 +17,10 @@
#include <radio_hidl_hal_utils_v1_3.h>
void RadioHidlTest_v1_3::SetUp() {
- radio_v1_3 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_3::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_3::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_3 = ::android::hardware::radio::V1_3::IRadio::getService(GetParam());
if (radio_v1_3 == NULL) {
sleep(60);
- radio_v1_3 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_3::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_3::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_3 = ::android::hardware::radio::V1_3::IRadio::getService(GetParam());
}
ASSERT_NE(nullptr, radio_v1_3.get());
diff --git a/radio/1.3/vts/functional/radio_hidl_hal_utils_v1_3.h b/radio/1.3/vts/functional/radio_hidl_hal_utils_v1_3.h
index 1d03a99..893eac5 100644
--- a/radio/1.3/vts/functional/radio_hidl_hal_utils_v1_3.h
+++ b/radio/1.3/vts/functional/radio_hidl_hal_utils_v1_3.h
@@ -16,8 +16,7 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <log/log.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -609,25 +608,9 @@
const ::android::hardware::hidl_string& reason);
};
-// Test environment for Radio HIDL HAL.
-class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioHidlEnvironment* Instance() {
- static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override {
- registerTestService<::android::hardware::radio::V1_3::IRadio>();
- }
-
- private:
- RadioHidlEnvironment() {}
-};
-
// The main test class for Radio HIDL.
-class RadioHidlTest_v1_3 : public ::testing::VtsHalHidlTargetTestBase {
- protected:
+class RadioHidlTest_v1_3 : public ::testing::TestWithParam<std::string> {
+ protected:
std::mutex mtx_;
std::condition_variable cv_;
int count_;
diff --git a/radio/1.4/vts/functional/Android.bp b/radio/1.4/vts/functional/Android.bp
index 6827132..8284404 100644
--- a/radio/1.4/vts/functional/Android.bp
+++ b/radio/1.4/vts/functional/Android.bp
@@ -35,5 +35,5 @@
"android.hardware.radio.config@1.1",
],
header_libs: ["radio.util.header@1.0"],
- test_suites: ["general-tests"]
+ test_suites: ["general-tests", "vts-core"]
}
diff --git a/radio/1.4/vts/functional/AndroidTest.xml b/radio/1.4/vts/functional/AndroidTest.xml
new file mode 100644
index 0000000..c910047
--- /dev/null
+++ b/radio/1.4/vts/functional/AndroidTest.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs VtsHalRadioV1_3TargetTest.">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-native" />
+
+ <target_preparer class="com.android.tradefed.targetprep.MultiSimPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push" value="VtsHalRadioV1_3TargetTest->/data/local/tmp/VtsHalRadioV1_3TargetTest" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="VtsHalRadioV1_3TargetTest" />
+ </test>
+</configuration>
diff --git a/radio/1.4/vts/functional/VtsHalRadioV1_4TargetTest.cpp b/radio/1.4/vts/functional/VtsHalRadioV1_4TargetTest.cpp
index d6330e6..23ec011 100644
--- a/radio/1.4/vts/functional/VtsHalRadioV1_4TargetTest.cpp
+++ b/radio/1.4/vts/functional/VtsHalRadioV1_4TargetTest.cpp
@@ -14,13 +14,12 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <radio_hidl_hal_utils_v1_4.h>
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(RadioHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- RadioHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
\ No newline at end of file
+INSTANTIATE_TEST_SUITE_P(PerInstance, RadioHidlTest_v1_4,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::V1_4::IRadio::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/radio/1.4/vts/functional/radio_hidl_hal_api.cpp b/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
index a4953d7..95136bb 100644
--- a/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
@@ -21,7 +21,7 @@
/*
* Test IRadio.emergencyDial() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, emergencyDial) {
+TEST_P(RadioHidlTest_v1_4, emergencyDial) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_0::Dial dialInfo;
@@ -52,7 +52,7 @@
/*
* Test IRadio.emergencyDial() with specified service and its response returned.
*/
-TEST_F(RadioHidlTest_v1_4, emergencyDial_withServices) {
+TEST_P(RadioHidlTest_v1_4, emergencyDial_withServices) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_0::Dial dialInfo;
@@ -84,7 +84,7 @@
/*
* Test IRadio.emergencyDial() with known emergency call routing and its response returned.
*/
-TEST_F(RadioHidlTest_v1_4, emergencyDial_withEmergencyRouting) {
+TEST_P(RadioHidlTest_v1_4, emergencyDial_withEmergencyRouting) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_0::Dial dialInfo;
@@ -116,7 +116,7 @@
/*
* Test IRadio.getPreferredNetworkTypeBitmap() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, getPreferredNetworkTypeBitmap) {
+TEST_P(RadioHidlTest_v1_4, getPreferredNetworkTypeBitmap) {
serial = GetRandomSerialNumber();
Return<void> res = radio_v1_4->getPreferredNetworkTypeBitmap(serial);
@@ -130,7 +130,7 @@
EXPECT_EQ(RadioError::NONE, radioRsp_v1_4->rspInfo.error);
}
-TEST_F(RadioHidlTest_v1_4, setPreferredNetworkTypeBitmap) {
+TEST_P(RadioHidlTest_v1_4, setPreferredNetworkTypeBitmap) {
serial = GetRandomSerialNumber();
::android::hardware::hidl_bitfield<::android::hardware::radio::V1_4::RadioAccessFamily>
network_type_bitmap{};
@@ -175,7 +175,7 @@
* REQUEST_NOT_SUPPORTED will be disallowed for all tests. Modems have "GSM" rat scan need to
* support scanning requests combined with some parameters.
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -218,7 +218,7 @@
/*
* Test IRadio.startNetworkScan() with invalid specifier.
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidArgument) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidArgument) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_2::NetworkScanRequest request = {.type = ScanType::ONE_SHOT,
@@ -246,7 +246,7 @@
/*
* Test IRadio.startNetworkScan() with invalid interval (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidInterval1) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidInterval1) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -283,7 +283,7 @@
/*
* Test IRadio.startNetworkScan() with invalid interval (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidInterval2) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidInterval2) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -319,7 +319,7 @@
/*
* Test IRadio.startNetworkScan() with invalid max search time (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidMaxSearchTime1) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidMaxSearchTime1) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -355,7 +355,7 @@
/*
* Test IRadio.startNetworkScan() with invalid max search time (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidMaxSearchTime2) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidMaxSearchTime2) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -391,7 +391,7 @@
/*
* Test IRadio.startNetworkScan() with invalid periodicity (lower boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidPeriodicity1) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidPeriodicity1) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -427,7 +427,7 @@
/*
* Test IRadio.startNetworkScan() with invalid periodicity (upper boundary).
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_InvalidPeriodicity2) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_InvalidPeriodicity2) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -463,7 +463,7 @@
/*
* Test IRadio.startNetworkScan() with valid periodicity
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_GoodRequest1) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_GoodRequest1) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -502,7 +502,7 @@
/*
* Test IRadio.startNetworkScan() with valid periodicity and plmns
*/
-TEST_F(RadioHidlTest_v1_4, startNetworkScan_GoodRequest2) {
+TEST_P(RadioHidlTest_v1_4, startNetworkScan_GoodRequest2) {
serial = GetRandomSerialNumber();
RadioAccessSpecifier specifier = {.radioAccessNetwork = RadioAccessNetworks::GERAN,
@@ -543,7 +543,7 @@
/*
* Test IRadio.getSignalStrength_1_4() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, getSignalStrength_1_4) {
+TEST_P(RadioHidlTest_v1_4, getSignalStrength_1_4) {
serial = GetRandomSerialNumber();
radio_v1_4->getSignalStrength_1_4(serial);
@@ -562,7 +562,7 @@
/*
* Test IRadio.setupDataCall_1_4() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, setupDataCall_1_4) {
+TEST_P(RadioHidlTest_v1_4, setupDataCall_1_4) {
serial = GetRandomSerialNumber();
::android::hardware::radio::V1_4::AccessNetwork accessNetwork =
@@ -617,7 +617,7 @@
/*
* Test IRadio.getAllowedCarriers_1_4() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, getAllowedCarriers_1_4) {
+TEST_P(RadioHidlTest_v1_4, getAllowedCarriers_1_4) {
serial = GetRandomSerialNumber();
radio_v1_4->getAllowedCarriers_1_4(serial);
@@ -632,7 +632,7 @@
/**
* Test IRadio.setAllowedCarriers_1_4() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, setAllowedCarriers_1_4) {
+TEST_P(RadioHidlTest_v1_4, setAllowedCarriers_1_4) {
serial = GetRandomSerialNumber();
CarrierRestrictionsWithPriority carrierRestrictions;
memset(&carrierRestrictions, 0, sizeof(carrierRestrictions));
@@ -727,7 +727,7 @@
}
}
-TEST_F(RadioHidlTest_v1_4, setDataProfile_1_4) {
+TEST_P(RadioHidlTest_v1_4, setDataProfile_1_4) {
serial = GetRandomSerialNumber();
// Create a dataProfileInfo
@@ -770,7 +770,7 @@
}
}
-TEST_F(RadioHidlTest_v1_4, setInitialAttachApn_1_4) {
+TEST_P(RadioHidlTest_v1_4, setInitialAttachApn_1_4) {
serial = GetRandomSerialNumber();
// Create a dataProfileInfo
@@ -812,7 +812,7 @@
/*
* Test IRadio.getDataRegistrationStateResponse_1_4() for the response returned.
*/
-TEST_F(RadioHidlTest_v1_4, getDataRegistrationState_1_4) {
+TEST_P(RadioHidlTest_v1_4, getDataRegistrationState_1_4) {
int rat;
serial = GetRandomSerialNumber();
diff --git a/radio/1.4/vts/functional/radio_hidl_hal_test.cpp b/radio/1.4/vts/functional/radio_hidl_hal_test.cpp
index f27749b..15a0b24 100644
--- a/radio/1.4/vts/functional/radio_hidl_hal_test.cpp
+++ b/radio/1.4/vts/functional/radio_hidl_hal_test.cpp
@@ -17,18 +17,11 @@
#include <radio_hidl_hal_utils_v1_4.h>
void RadioHidlTest_v1_4::SetUp() {
- radio_v1_4 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_4::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_4::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_4 = ::android::hardware::radio::V1_4::IRadio::getService(GetParam());
+
if (radio_v1_4 == NULL) {
sleep(60);
- radio_v1_4 = ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::V1_4::IRadio>(
- RadioHidlEnvironment::Instance()
- ->getServiceName<::android::hardware::radio::V1_4::IRadio>(
- hidl_string(RADIO_SERVICE_NAME)));
+ radio_v1_4 = ::android::hardware::radio::V1_4::IRadio::getService(GetParam());
}
ASSERT_NE(nullptr, radio_v1_4.get());
@@ -48,8 +41,7 @@
EXPECT_EQ(RadioError::NONE, radioRsp_v1_4->rspInfo.error);
sp<::android::hardware::radio::config::V1_1::IRadioConfig> radioConfig =
- ::testing::VtsHalHidlTargetTestBase::getService<
- ::android::hardware::radio::config::V1_1::IRadioConfig>();
+ ::android::hardware::radio::config::V1_1::IRadioConfig::getService();
/* Enforce Vts tesing with RadioConfig is existed. */
ASSERT_NE(nullptr, radioConfig.get());
diff --git a/radio/1.4/vts/functional/radio_hidl_hal_utils_v1_4.h b/radio/1.4/vts/functional/radio_hidl_hal_utils_v1_4.h
index b07f9c3..31b7e13 100644
--- a/radio/1.4/vts/functional/radio_hidl_hal_utils_v1_4.h
+++ b/radio/1.4/vts/functional/radio_hidl_hal_utils_v1_4.h
@@ -16,8 +16,7 @@
#include <android-base/logging.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <log/log.h>
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -28,6 +27,7 @@
#include <android/hardware/radio/1.4/IRadioIndication.h>
#include <android/hardware/radio/1.4/IRadioResponse.h>
#include <android/hardware/radio/1.4/types.h>
+#include <gtest/gtest.h>
#include "vts_test_util.h"
@@ -705,25 +705,9 @@
const ::android::hardware::hidl_string& reason);
};
-// Test environment for Radio HIDL HAL.
-class RadioHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static RadioHidlEnvironment* Instance() {
- static RadioHidlEnvironment* instance = new RadioHidlEnvironment;
- return instance;
- }
- virtual void registerTestServices() override {
- registerTestService<::android::hardware::radio::V1_4::IRadio>();
- }
-
- private:
- RadioHidlEnvironment() {}
-};
-
// The main test class for Radio HIDL.
-class RadioHidlTest_v1_4 : public ::testing::VtsHalHidlTargetTestBase {
- protected:
+class RadioHidlTest_v1_4 : public ::testing::TestWithParam<std::string> {
+ protected:
std::mutex mtx_;
std::condition_variable cv_;
int count_;
diff --git a/radio/1.5/Android.bp b/radio/1.5/Android.bp
index de9ec6e..06a2a6e 100644
--- a/radio/1.5/Android.bp
+++ b/radio/1.5/Android.bp
@@ -19,6 +19,7 @@
"android.hardware.radio@1.3",
"android.hardware.radio@1.4",
"android.hidl.base@1.0",
+ "android.hidl.safe_union@1.0",
],
gen_java: true,
}
diff --git a/radio/config/1.3/Android.bp b/radio/config/1.3/Android.bp
new file mode 100644
index 0000000..88de666
--- /dev/null
+++ b/radio/config/1.3/Android.bp
@@ -0,0 +1,23 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.radio.config@1.3",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IRadioConfig.hal",
+ "IRadioConfigIndication.hal",
+ "IRadioConfigResponse.hal",
+ ],
+ interfaces: [
+ "android.hardware.radio.config@1.0",
+ "android.hardware.radio.config@1.1",
+ "android.hardware.radio.config@1.2",
+ "android.hardware.radio@1.0",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/radio/config/1.3/IRadioConfig.hal b/radio/config/1.3/IRadioConfig.hal
new file mode 100644
index 0000000..a0ce6e0
--- /dev/null
+++ b/radio/config/1.3/IRadioConfig.hal
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.1::IRadioConfig;
+
+/**
+ * This interface is used by telephony and telecom to talk to cellular radio for the purpose of
+ * radio configuration, and it is not associated with any specific modem or slot.
+ * All the functions have minimum one parameter:
+ * serial: which corresponds to serial no. of request. Serial numbers must only be memorized for the
+ * duration of a method call. If clients provide colliding serials (including passing the same
+ * serial to different methods), multiple responses (one for each method call) must still be served.
+ */
+interface IRadioConfig extends @1.1::IRadioConfig {
+
+};
diff --git a/radio/config/1.3/IRadioConfigIndication.hal b/radio/config/1.3/IRadioConfigIndication.hal
new file mode 100644
index 0000000..9ef496c
--- /dev/null
+++ b/radio/config/1.3/IRadioConfigIndication.hal
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.2::IRadioConfigIndication;
+
+/**
+ * Interface declaring unsolicited radio config indications.
+ */
+interface IRadioConfigIndication extends @1.2::IRadioConfigIndication {
+
+};
diff --git a/radio/config/1.3/IRadioConfigResponse.hal b/radio/config/1.3/IRadioConfigResponse.hal
new file mode 100644
index 0000000..9c4c971
--- /dev/null
+++ b/radio/config/1.3/IRadioConfigResponse.hal
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
+
+import @1.2::IRadioConfigResponse;
+
+/**
+ * Interface declaring response functions to solicited radio config requests.
+ */
+interface IRadioConfigResponse extends @1.2::IRadioConfigResponse {
+
+};
diff --git a/radio/config/1.3/default/Android.bp b/radio/config/1.3/default/Android.bp
new file mode 100644
index 0000000..163c5c5
--- /dev/null
+++ b/radio/config/1.3/default/Android.bp
@@ -0,0 +1,28 @@
+cc_binary {
+ name: "android.hardware.radio.config@1.3-service",
+ init_rc: ["android.hardware.radio.config@1.3-service.rc"],
+ relative_install_path: "hw",
+ vintf_fragments: ["radio-config-default.xml"],
+ vendor: true,
+ srcs: [
+ "RadioConfig.cpp",
+ "RadioConfigIndication.cpp",
+ "RadioConfigResponse.cpp",
+ "service.cpp",
+ ],
+ shared_libs: [
+ "libhidlbase",
+ "liblog",
+ "libutils",
+ "android.hardware.radio.config@1.0",
+ "android.hardware.radio.config@1.1",
+ "android.hardware.radio.config@1.2",
+ "android.hardware.radio.config@1.3",
+ "android.hardware.radio@1.0",
+ "android.hardware.radio@1.1",
+ "android.hardware.radio@1.2",
+ "android.hardware.radio@1.3",
+ "android.hardware.radio@1.4",
+ "android.hardware.radio@1.5",
+ ],
+}
diff --git a/radio/config/1.3/default/RadioConfig.cpp b/radio/config/1.3/default/RadioConfig.cpp
new file mode 100644
index 0000000..c28119c
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfig.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfig.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfig follow.
+Return<void> RadioConfig::setResponseFunctions(
+ const sp<V1_0::IRadioConfigResponse>& radioConfigResponse,
+ const sp<V1_0::IRadioConfigIndication>& radioConfigIndication) {
+ mRadioConfigResponse = radioConfigResponse;
+ mRadioConfigIndication = radioConfigIndication;
+
+ mRadioConfigResponseV1_3 =
+ V1_3::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+ mRadioConfigIndicationV1_3 =
+ V1_3::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+ if (mRadioConfigResponseV1_3 == nullptr || mRadioConfigIndicationV1_3 == nullptr) {
+ mRadioConfigResponseV1_3 = nullptr;
+ mRadioConfigIndicationV1_3 = nullptr;
+ }
+
+ mRadioConfigResponseV1_2 =
+ V1_2::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+ mRadioConfigIndicationV1_2 =
+ V1_2::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+ if (mRadioConfigResponseV1_2 == nullptr || mRadioConfigIndicationV1_2 == nullptr) {
+ mRadioConfigResponseV1_2 = nullptr;
+ mRadioConfigIndicationV1_2 = nullptr;
+ }
+
+ mRadioConfigResponseV1_1 =
+ V1_1::IRadioConfigResponse::castFrom(mRadioConfigResponse).withDefault(nullptr);
+ mRadioConfigIndicationV1_1 =
+ V1_1::IRadioConfigIndication::castFrom(mRadioConfigIndication).withDefault(nullptr);
+ if (mRadioConfigResponseV1_1 == nullptr || mRadioConfigIndicationV1_1 == nullptr) {
+ mRadioConfigResponseV1_1 = nullptr;
+ mRadioConfigIndicationV1_1 = nullptr;
+ }
+
+ return Void();
+}
+
+Return<void> RadioConfig::getSimSlotsStatus(int32_t /* serial */) {
+ hidl_vec<V1_0::SimSlotStatus> slotStatus;
+ RadioResponseInfo info;
+ mRadioConfigResponse->getSimSlotsStatusResponse(info, slotStatus);
+ return Void();
+}
+
+Return<void> RadioConfig::setSimSlotsMapping(int32_t /* serial */,
+ const hidl_vec<uint32_t>& /* slotMap */) {
+ RadioResponseInfo info;
+ mRadioConfigResponse->setSimSlotsMappingResponse(info);
+ return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_1::IRadioConfig follow.
+Return<void> RadioConfig::getPhoneCapability(int32_t /* serial */) {
+ V1_1::PhoneCapability phoneCapability;
+ RadioResponseInfo info;
+ mRadioConfigResponseV1_1->getPhoneCapabilityResponse(info, phoneCapability);
+ return Void();
+}
+
+Return<void> RadioConfig::setPreferredDataModem(int32_t /* serial */, uint8_t /* modemId */) {
+ RadioResponseInfo info;
+ mRadioConfigResponseV1_1->setPreferredDataModemResponse(info);
+ return Void();
+}
+
+Return<void> RadioConfig::setModemsConfig(int32_t /* serial */,
+ const V1_1::ModemsConfig& /* modemsConfig */) {
+ RadioResponseInfo info;
+ mRadioConfigResponseV1_1->setModemsConfigResponse(info);
+ return Void();
+}
+
+Return<void> RadioConfig::getModemsConfig(int32_t /* serial */) {
+ V1_1::ModemsConfig modemsConfig;
+ RadioResponseInfo info;
+ mRadioConfigResponseV1_1->getModemsConfigResponse(info, modemsConfig);
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
diff --git a/radio/config/1.3/default/RadioConfig.h b/radio/config/1.3/default/RadioConfig.h
new file mode 100644
index 0000000..00585e6
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfig.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::config;
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfig : public V1_3::IRadioConfig {
+ sp<V1_0::IRadioConfigResponse> mRadioConfigResponse;
+ sp<V1_0::IRadioConfigIndication> mRadioConfigIndication;
+ sp<V1_1::IRadioConfigResponse> mRadioConfigResponseV1_1;
+ sp<V1_1::IRadioConfigIndication> mRadioConfigIndicationV1_1;
+ sp<V1_2::IRadioConfigResponse> mRadioConfigResponseV1_2;
+ sp<V1_2::IRadioConfigIndication> mRadioConfigIndicationV1_2;
+ sp<V1_3::IRadioConfigResponse> mRadioConfigResponseV1_3;
+ sp<V1_3::IRadioConfigIndication> mRadioConfigIndicationV1_3;
+
+ // Methods from ::android::hardware::radio::config::V1_0::IRadioConfig follow.
+ Return<void> setResponseFunctions(
+ const sp<V1_0::IRadioConfigResponse>& radioConfigResponse,
+ const sp<V1_0::IRadioConfigIndication>& radioConfigIndication);
+ Return<void> getSimSlotsStatus(int32_t serial);
+ Return<void> setSimSlotsMapping(int32_t serial, const hidl_vec<uint32_t>& slotMap);
+
+ // Methods from ::android::hardware::radio::config::V1_1::IRadioConfig follow.
+ Return<void> getPhoneCapability(int32_t serial);
+ Return<void> setPreferredDataModem(int32_t serial, uint8_t modemId);
+ Return<void> setModemsConfig(int32_t serial, const V1_1::ModemsConfig& modemsConfig);
+ Return<void> getModemsConfig(int32_t serial);
+};
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIG_H
diff --git a/radio/config/1.3/default/RadioConfigIndication.cpp b/radio/config/1.3/default/RadioConfigIndication.cpp
new file mode 100644
index 0000000..eb77a48
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigIndication.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfigIndication.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config::V1_0;
+using namespace ::android::hardware::radio::config::V1_2;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfigIndication follow.
+Return<void> RadioConfigIndication::simSlotsStatusChanged(
+ RadioIndicationType /* type */, const hidl_vec<V1_0::SimSlotStatus>& /* slotStatus */) {
+ // TODO implement
+ return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_2::IRadioConfigIndication follow.
+Return<void> RadioConfigIndication::simSlotsStatusChanged_1_2(
+ RadioIndicationType /* type */, const hidl_vec<V1_2::SimSlotStatus>& /* slotStatus */) {
+ // TODO implement
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
diff --git a/radio/config/1.3/default/RadioConfigIndication.h b/radio/config/1.3/default/RadioConfigIndication.h
new file mode 100644
index 0000000..3697492
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigIndication.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config;
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfigIndication : public IRadioConfigIndication {
+ // Methods from ::android::hardware::radio::config::V1_0::IRadioConfigIndication follow.
+ Return<void> simSlotsStatusChanged(RadioIndicationType type,
+ const hidl_vec<V1_0::SimSlotStatus>& slotStatus) override;
+
+ // Methods from ::android::hardware::radio::config::V1_2::IRadioConfigIndication follow.
+ Return<void> simSlotsStatusChanged_1_2(
+ RadioIndicationType type, const hidl_vec<V1_2::SimSlotStatus>& slotStatus) override;
+};
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGINDICATION_H
diff --git a/radio/config/1.3/default/RadioConfigResponse.cpp b/radio/config/1.3/default/RadioConfigResponse.cpp
new file mode 100644
index 0000000..48e81da
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigResponse.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RadioConfigResponse.h"
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using namespace ::android::hardware::radio::V1_0;
+using namespace ::android::hardware::radio::config::V1_0;
+using namespace ::android::hardware::radio::config::V1_1;
+using namespace ::android::hardware::radio::config::V1_2;
+
+// Methods from ::android::hardware::radio::config::V1_0::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse(
+ const RadioResponseInfo& /* info */,
+ const hidl_vec<V1_0::SimSlotStatus>& /* slotStatus */) {
+ // TODO implement
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setSimSlotsMappingResponse(const RadioResponseInfo& /* info */) {
+ // TODO implement
+ return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_1::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getPhoneCapabilityResponse(
+ const RadioResponseInfo& /* info */, const V1_1::PhoneCapability& /* phoneCapability */) {
+ // TODO implement
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setPreferredDataModemResponse(
+ const RadioResponseInfo& /* info */) {
+ // TODO implement
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setModemsConfigResponse(const RadioResponseInfo& /* info */) {
+ // TODO implement
+ return Void();
+}
+
+Return<void> RadioConfigResponse::getModemsConfigResponse(
+ const RadioResponseInfo& /* info */, const V1_1::ModemsConfig& /* modemsConfig */) {
+ // TODO implement
+ return Void();
+}
+
+// Methods from ::android::hardware::radio::config::V1_2::IRadioConfigResponse follow.
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse_1_2(
+ const RadioResponseInfo& /* info */,
+ const hidl_vec<V1_2::SimSlotStatus>& /* slotStatus */) {
+ // TODO implement
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
diff --git a/radio/config/1.3/default/RadioConfigResponse.h b/radio/config/1.3/default/RadioConfigResponse.h
new file mode 100644
index 0000000..0f0033f
--- /dev/null
+++ b/radio/config/1.3/default/RadioConfigResponse.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
+#define ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
+
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+namespace android {
+namespace hardware {
+namespace radio {
+namespace config {
+namespace V1_3 {
+namespace implementation {
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+struct RadioConfigResponse : public IRadioConfigResponse {
+ // Methods from ::android::hardware::radio::config::V1_0::IRadioConfigResponse follow.
+ Return<void> getSimSlotsStatusResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+ const hidl_vec<::android::hardware::radio::config::V1_0::SimSlotStatus>& slotStatus)
+ override;
+ Return<void> setSimSlotsMappingResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+
+ // Methods from ::android::hardware::radio::config::V1_1::IRadioConfigResponse follow.
+ Return<void> getPhoneCapabilityResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+ const ::android::hardware::radio::config::V1_1::PhoneCapability& phoneCapability)
+ override;
+ Return<void> setPreferredDataModemResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+ Return<void> setModemsConfigResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info) override;
+ Return<void> getModemsConfigResponse(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+ const ::android::hardware::radio::config::V1_1::ModemsConfig& modemsConfig) override;
+
+ // Methods from ::android::hardware::radio::config::V1_2::IRadioConfigResponse follow.
+ Return<void> getSimSlotsStatusResponse_1_2(
+ const ::android::hardware::radio::V1_0::RadioResponseInfo& info,
+ const hidl_vec<::android::hardware::radio::config::V1_2::SimSlotStatus>& slotStatus)
+ override;
+
+ // Methods from ::android::hidl::base::V1_0::IBase follow.
+};
+
+} // namespace implementation
+} // namespace V1_3
+} // namespace config
+} // namespace radio
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_CONFIG_V1_3_RADIOCONFIGRESPONSE_H
diff --git a/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc b/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc
new file mode 100644
index 0000000..6df9b52
--- /dev/null
+++ b/radio/config/1.3/default/android.hardware.radio.config@1.3-service.rc
@@ -0,0 +1,7 @@
+service vendor.radio-config-hal-1-3 /vendor/bin/hw/android.hardware.radio.config@1.3-service
+ interface android.hardware.radio.config@1.0::IRadioConfig default
+ interface android.hardware.radio.config@1.1::IRadioConfig default
+ interface android.hardware.radio.config@1.3::IRadioConfig default
+ class hal
+ user system
+ group system
diff --git a/radio/config/1.3/default/radio-config-default.xml b/radio/config/1.3/default/radio-config-default.xml
new file mode 100644
index 0000000..72f363e
--- /dev/null
+++ b/radio/config/1.3/default/radio-config-default.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/*
+** Copyright 2019, The Android Open Source Project.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+-->
+<manifest version="1.0" type="device">
+ <hal format="hidl">
+ <name>android.hardware.radio.config</name>
+ <transport>hwbinder</transport>
+ <version>1.3</version>
+ <interface>
+ <name>IRadioConfig</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+</manifest>
diff --git a/radio/config/1.3/default/service.cpp b/radio/config/1.3/default/service.cpp
new file mode 100644
index 0000000..b1e6736
--- /dev/null
+++ b/radio/config/1.3/default/service.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.1 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.1
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.radio.config@1.3-service"
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <hidl/HidlTransportSupport.h>
+
+#include "RadioConfig.h"
+
+using android::OK;
+using android::sp;
+using android::status_t;
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using android::hardware::radio::config::V1_3::IRadioConfig;
+using android::hardware::radio::config::V1_3::implementation::RadioConfig;
+
+int main() {
+ configureRpcThreadpool(1, true);
+ sp<IRadioConfig> radioConfig = new RadioConfig;
+ const status_t status = radioConfig->registerAsService();
+ ALOGW_IF(status != OK, "Could not register IRadioConfig 1.3");
+ ALOGD("Default service is ready.");
+
+ joinRpcThreadpool();
+ return 1;
+}
diff --git a/radio/config/1.3/types.hal b/radio/config/1.3/types.hal
new file mode 100644
index 0000000..866002a
--- /dev/null
+++ b/radio/config/1.3/types.hal
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.radio.config@1.3;
diff --git a/radio/config/1.3/vts/functional/Android.bp b/radio/config/1.3/vts/functional/Android.bp
new file mode 100644
index 0000000..6b28faf
--- /dev/null
+++ b/radio/config/1.3/vts/functional/Android.bp
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+ name: "VtsHalRadioConfigV1_3TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "radio_config_hidl_hal_api.cpp",
+ "radio_config_hidl_hal_test.cpp",
+ "radio_config_response.cpp",
+ "VtsHalRadioConfigV1_3TargetTest.cpp",
+ ],
+ static_libs: [
+ "RadioVtsTestUtilBase",
+ "android.hardware.radio.config@1.0",
+ "android.hardware.radio.config@1.1",
+ "android.hardware.radio.config@1.2",
+ "android.hardware.radio.config@1.3",
+ ],
+ header_libs: ["radio.util.header@1.0"],
+ test_suites: ["general-tests", "vts-core"],
+}
diff --git a/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp b/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp
new file mode 100644
index 0000000..3bacacf
--- /dev/null
+++ b/radio/config/1.3/vts/functional/VtsHalRadioConfigV1_3TargetTest.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, RadioConfigHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(
+ ::android::hardware::radio::config::V1_3::IRadioConfig::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp b/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp
new file mode 100644
index 0000000..07e9ede
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_api.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+#define ASSERT_OK(ret) ASSERT_TRUE(ret.isOk())
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp b/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp
new file mode 100644
index 0000000..dbb4bf4
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_test.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+void RadioConfigHidlTest::SetUp() {
+ radioConfig = ::android::hardware::radio::config::V1_3::IRadioConfig::getService(GetParam());
+ ASSERT_NE(nullptr, radioConfig.get());
+
+ radioConfigRsp = new (std::nothrow) RadioConfigResponse(*this);
+ ASSERT_NE(nullptr, radioConfigRsp.get());
+
+ count_ = 0;
+
+ radioConfig->setResponseFunctions(radioConfigRsp, nullptr);
+}
+
+/*
+ * Notify that the response message is received.
+ */
+void RadioConfigHidlTest::notify(int receivedSerial) {
+ std::unique_lock<std::mutex> lock(mtx_);
+ if (serial == receivedSerial) {
+ count_++;
+ cv_.notify_one();
+ }
+}
+
+/*
+ * Wait till the response message is notified or till TIMEOUT_PERIOD.
+ */
+std::cv_status RadioConfigHidlTest::wait() {
+ std::unique_lock<std::mutex> lock(mtx_);
+
+ std::cv_status status = std::cv_status::no_timeout;
+ auto now = std::chrono::system_clock::now();
+ while (count_ == 0) {
+ status = cv_.wait_until(lock, now + std::chrono::seconds(TIMEOUT_PERIOD));
+ if (status == std::cv_status::timeout) {
+ return status;
+ }
+ }
+ count_--;
+ return status;
+}
diff --git a/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h b/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h
new file mode 100644
index 0000000..9b78c04
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_hidl_hal_utils.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+
+#include <android/hardware/radio/config/1.3/IRadioConfig.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigIndication.h>
+#include <android/hardware/radio/config/1.3/IRadioConfigResponse.h>
+#include <android/hardware/radio/config/1.3/types.h>
+
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+#include "vts_test_util.h"
+
+using namespace ::android::hardware::radio::config::V1_1;
+using namespace ::android::hardware::radio::config::V1_2;
+using namespace ::android::hardware::radio::config::V1_3;
+
+using ::android::sp;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+using ::android::hardware::radio::V1_0::RadioResponseInfo;
+using ::android::hardware::radio::V1_0::RadioResponseType;
+
+#define TIMEOUT_PERIOD 75
+
+class RadioConfigHidlTest;
+
+/* Callback class for radio config response */
+class RadioConfigResponse : public ::android::hardware::radio::config::V1_3::IRadioConfigResponse {
+ protected:
+ RadioConfigHidlTest& parent;
+
+ public:
+ RadioResponseInfo rspInfo;
+ PhoneCapability phoneCap;
+
+ RadioConfigResponse(RadioConfigHidlTest& parent);
+ virtual ~RadioConfigResponse() = default;
+
+ /* 1.0 Api */
+ Return<void> getSimSlotsStatusResponse(
+ const RadioResponseInfo& info,
+ const hidl_vec<::android::hardware::radio::config::V1_0::SimSlotStatus>& slotStatus);
+
+ Return<void> setSimSlotsMappingResponse(const RadioResponseInfo& info);
+
+ /* 1.1 Api */
+ Return<void> getPhoneCapabilityResponse(const RadioResponseInfo& info,
+ const PhoneCapability& phoneCapability);
+
+ Return<void> setPreferredDataModemResponse(const RadioResponseInfo& info);
+
+ Return<void> getModemsConfigResponse(const RadioResponseInfo& info,
+ const ModemsConfig& mConfig);
+
+ Return<void> setModemsConfigResponse(const RadioResponseInfo& info);
+
+ /* 1.2 Api */
+ Return<void> getSimSlotsStatusResponse_1_2(const RadioResponseInfo& info,
+ const hidl_vec<SimSlotStatus>& slotStatus);
+};
+
+/* Callback class for radio config indication */
+class RadioConfigIndication
+ : public ::android::hardware::radio::config::V1_3::IRadioConfigIndication {
+ protected:
+ RadioConfigHidlTest& parent;
+
+ public:
+ RadioConfigIndication(RadioConfigHidlTest& parent);
+ virtual ~RadioConfigIndication() = default;
+
+ /* 1.2 Api */
+ Return<void> simSlotsStatusChanged_1_2(
+ ::android::hardware::radio::V1_0::RadioIndicationType type,
+ const hidl_vec<SimSlotStatus>& slotStatus);
+};
+
+// The main test class for Radio config HIDL.
+class RadioConfigHidlTest : public ::testing::TestWithParam<std::string> {
+ protected:
+ std::mutex mtx_;
+ std::condition_variable cv_;
+ int count_;
+
+ public:
+ virtual void SetUp() override;
+
+ /* Used as a mechanism to inform the test about data/event callback */
+ void notify(int receivedSerial);
+
+ /* Test code calls this function to wait for response */
+ std::cv_status wait();
+
+ void updateSimCardStatus();
+
+ /* Serial number for radio request */
+ int serial;
+
+ /* radio config service handle */
+ sp<::android::hardware::radio::config::V1_3::IRadioConfig> radioConfig;
+
+ /* radio config response handle */
+ sp<RadioConfigResponse> radioConfigRsp;
+};
diff --git a/radio/config/1.3/vts/functional/radio_config_response.cpp b/radio/config/1.3/vts/functional/radio_config_response.cpp
new file mode 100644
index 0000000..1ca960e
--- /dev/null
+++ b/radio/config/1.3/vts/functional/radio_config_response.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <radio_config_hidl_hal_utils.h>
+
+using ::android::hardware::radio::V1_0::RadioResponseInfo;
+
+// SimSlotStatus slotStatus;
+
+RadioConfigResponse::RadioConfigResponse(RadioConfigHidlTest& parent) : parent(parent) {}
+
+/* 1.0 Apis */
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse(
+ const RadioResponseInfo& /* info */,
+ const ::android::hardware::hidl_vec<
+ ::android::hardware::radio::config::V1_0::SimSlotStatus>& /* slotStatus */) {
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setSimSlotsMappingResponse(const RadioResponseInfo& /* info */) {
+ return Void();
+}
+
+/* 1.1 Apis */
+Return<void> RadioConfigResponse::getPhoneCapabilityResponse(
+ const RadioResponseInfo& info, const PhoneCapability& phoneCapability) {
+ rspInfo = info;
+ phoneCap = phoneCapability;
+ parent.notify(info.serial);
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setPreferredDataModemResponse(
+ const RadioResponseInfo& /* info */) {
+ return Void();
+}
+
+Return<void> RadioConfigResponse::getModemsConfigResponse(const RadioResponseInfo& /* info */,
+ const ModemsConfig& /* mConfig */) {
+ return Void();
+}
+
+Return<void> RadioConfigResponse::setModemsConfigResponse(const RadioResponseInfo& /* info */) {
+ return Void();
+}
+
+/* 1.2 Apis */
+Return<void> RadioConfigResponse::getSimSlotsStatusResponse_1_2(
+ const RadioResponseInfo& /* info */,
+ const ::android::hardware::hidl_vec<SimSlotStatus>& /* slotStatus */) {
+ return Void();
+}
\ No newline at end of file
diff --git a/sensors/2.0/multihal/Android.bp b/sensors/2.0/multihal/Android.bp
index c13eaf2..811c455 100644
--- a/sensors/2.0/multihal/Android.bp
+++ b/sensors/2.0/multihal/Android.bp
@@ -43,10 +43,10 @@
srcs: [
"service.cpp",
"HalProxy.cpp",
- "ScopedWakelock.cpp",
],
init_rc: ["android.hardware.sensors@2.0-service-multihal.rc"],
vintf_fragments: ["android.hardware.sensors@2.0-multihal.xml"],
+ shared_libs: ["android.hardware.sensors@2.0-ScopedWakelock"]
}
cc_library_headers {
@@ -55,19 +55,40 @@
export_include_dirs: ["include"],
}
+cc_library_shared {
+ name: "android.hardware.sensors@2.0-ScopedWakelock",
+ defaults: [
+ "hidl_defaults",
+ "android.hardware.sensors@2.0-multihal-defaults",
+ ],
+ srcs: [
+ "ScopedWakelock.cpp",
+ ],
+ vendor_available: true,
+ export_header_lib_headers: [
+ "android.hardware.sensors@2.0-multihal.header"
+ ]
+}
+
// The below targets should only be used for testing.
cc_test_library {
name: "android.hardware.sensors@2.0-HalProxy",
- defaults: ["android.hardware.sensors@2.0-multihal-defaults"],
+ defaults: [
+ "hidl_defaults",
+ "android.hardware.sensors@2.0-multihal-defaults",
+ ],
vendor_available: true,
srcs: [
"HalProxy.cpp",
- "ScopedWakelock.cpp",
],
export_header_lib_headers: [
"android.hardware.sensors@2.0-multihal.header",
],
+ export_shared_lib_headers: [
+ "android.hardware.sensors@2.0-ScopedWakelock",
+ ],
shared_libs: [
"libutils",
+ "android.hardware.sensors@2.0-ScopedWakelock",
],
}
diff --git a/sensors/2.0/multihal/tests/Android.bp b/sensors/2.0/multihal/tests/Android.bp
index e7f9499..1637312 100644
--- a/sensors/2.0/multihal/tests/Android.bp
+++ b/sensors/2.0/multihal/tests/Android.bp
@@ -25,6 +25,7 @@
shared_libs: [
"android.hardware.sensors@1.0",
"android.hardware.sensors@2.0",
+ "android.hardware.sensors@2.0-ScopedWakelock",
"libcutils",
"libfmq",
"libhardware",
@@ -83,6 +84,7 @@
shared_libs: [
"android.hardware.sensors@1.0",
"android.hardware.sensors@2.0",
+ "android.hardware.sensors@2.0-ScopedWakelock",
"libbase",
"libcutils",
"libfmq",
diff --git a/soundtrigger/2.1/Android.bp b/soundtrigger/2.1/Android.bp
index 68e425b..30173cb 100644
--- a/soundtrigger/2.1/Android.bp
+++ b/soundtrigger/2.1/Android.bp
@@ -15,5 +15,5 @@
"android.hardware.soundtrigger@2.0",
"android.hidl.base@1.0",
],
- gen_java: false,
+ gen_java: true,
}
diff --git a/soundtrigger/2.2/Android.bp b/soundtrigger/2.2/Android.bp
index 43898c7..7556aa4 100644
--- a/soundtrigger/2.2/Android.bp
+++ b/soundtrigger/2.2/Android.bp
@@ -15,5 +15,5 @@
"android.hardware.soundtrigger@2.1",
"android.hidl.base@1.0",
],
- gen_java: false,
+ gen_java: true,
}
diff --git a/tests/extension/vibrator/aidl/client/test-cpp-client.cpp b/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
index f6f5537..015a345 100644
--- a/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
+++ b/tests/extension/vibrator/aidl/client/test-cpp-client.cpp
@@ -20,9 +20,9 @@
#include <binder/IServiceManager.h>
#include <gtest/gtest.h>
+using android::checked_interface_cast;
using android::IBinder;
using android::IInterface;
-using android::interface_cast;
using android::OK;
using android::sp;
using android::waitForVintfService;
@@ -44,7 +44,7 @@
// getting the extension
sp<IBinder> ext;
ASSERT_EQ(OK, IInterface::asBinder(vib)->getExtension(&ext));
- sp<ICustomVibrator> cvib = interface_cast<ICustomVibrator>(ext);
+ sp<ICustomVibrator> cvib = checked_interface_cast<ICustomVibrator>(ext);
ASSERT_NE(nullptr, cvib.get());
// calling extension method
diff --git a/tests/memory/1.0/Android.bp b/tests/memory/1.0/Android.bp
index 29f6be7..6612e31 100644
--- a/tests/memory/1.0/Android.bp
+++ b/tests/memory/1.0/Android.bp
@@ -11,5 +11,5 @@
"android.hidl.memory.block@1.0",
"android.hidl.memory.token@1.0",
],
- gen_java: false,
+ gen_java: true,
}
diff --git a/tests/memory/2.0/.hidl_for_test b/tests/memory/2.0/.hidl_for_test
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/memory/2.0/.hidl_for_test
diff --git a/tests/memory/2.0/Android.bp b/tests/memory/2.0/Android.bp
index 5166652..d24bd21 100644
--- a/tests/memory/2.0/Android.bp
+++ b/tests/memory/2.0/Android.bp
@@ -1,9 +1,11 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
hidl_interface {
name: "android.hardware.tests.memory@2.0",
root: "android.hardware",
srcs: [
- "IMemoryInterface.hal",
"types.hal",
+ "IMemoryInterface.hal",
],
interfaces: [
"android.hidl.base@1.0",
diff --git a/usb/1.0/vts/functional/Android.bp b/usb/1.0/vts/functional/Android.bp
index 683ee17..1a3b56b 100644
--- a/usb/1.0/vts/functional/Android.bp
+++ b/usb/1.0/vts/functional/Android.bp
@@ -19,5 +19,5 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalUsbV1_0TargetTest.cpp"],
static_libs: ["android.hardware.usb@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/usb/1.0/vts/functional/VtsHalUsbV1_0TargetTest.cpp b/usb/1.0/vts/functional/VtsHalUsbV1_0TargetTest.cpp
index ee7ef1b..bba75c8 100644
--- a/usb/1.0/vts/functional/VtsHalUsbV1_0TargetTest.cpp
+++ b/usb/1.0/vts/functional/VtsHalUsbV1_0TargetTest.cpp
@@ -20,9 +20,10 @@
#include <android/hardware/usb/1.0/IUsb.h>
#include <android/hardware/usb/1.0/IUsbCallback.h>
#include <android/hardware/usb/1.0/types.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <log/log.h>
#include <stdlib.h>
#include <chrono>
@@ -49,20 +50,8 @@
using ::android::hardware::Void;
using ::android::sp;
-// Test environment for Usb HIDL HAL.
-class UsbHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static UsbHidlEnvironment* Instance() {
- static UsbHidlEnvironment* instance = new UsbHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IUsb>(); }
-};
-
// The main test class for the USB hidl HAL
-class UsbHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class UsbHidlTest : public testing::TestWithParam<std::string> {
public:
// Callback class for the USB HIDL hal.
// Usb Hal will call this object upon role switch or port query.
@@ -109,8 +98,7 @@
virtual void SetUp() override {
ALOGI("Setup");
- usb = ::testing::VtsHalHidlTargetTestBase::getService<IUsb>(
- UsbHidlEnvironment::Instance()->getServiceName<IUsb>());
+ usb = IUsb::getService(GetParam());
ASSERT_NE(usb, nullptr);
usb_cb_2 = new UsbCallback(*this, 2);
@@ -182,7 +170,7 @@
* Callback oject is created and registered.
* Check to see if the hidl transaction succeeded.
*/
-TEST_F(UsbHidlTest, setCallback) {
+TEST_P(UsbHidlTest, setCallback) {
usb_cb_1 = new UsbCallback(*this, 1);
ASSERT_NE(usb_cb_1, nullptr);
Return<void> ret = usb->setCallback(usb_cb_1);
@@ -193,7 +181,7 @@
* Check to see if querying type-c
* port status succeeds.
*/
-TEST_F(UsbHidlTest, queryPortStatus) {
+TEST_P(UsbHidlTest, queryPortStatus) {
Return<void> ret = usb->queryPortStatus();
ASSERT_TRUE(ret.isOk());
EXPECT_EQ(std::cv_status::no_timeout, wait());
@@ -206,7 +194,7 @@
* This test case tried to switch the port with empty
* name which is expected to fail.
*/
-TEST_F(UsbHidlTest, switchEmptyPort) {
+TEST_P(UsbHidlTest, switchEmptyPort) {
struct PortRole role;
role.type = PortRoleType::DATA_ROLE;
@@ -218,52 +206,6 @@
}
/*
- * Test switching the mode of usb port.
- * Test case queries the usb ports present in device.
- * If there is atleast one usb port, a mode switch
- * to DFP is attempted for the port.
- * The callback parametes are checked to see if the mode
- * switch was successfull. Upon success, Status::SUCCESS
- * is expected to be returned.
- */
-TEST_F(UsbHidlTest, switchModetoDFP) {
- struct PortRole role;
- role.type = PortRoleType::MODE;
- role.role = static_cast<uint32_t>(PortMode::DFP);
-
- Return<void> ret = usb->queryPortStatus();
- ASSERT_TRUE(ret.isOk());
- EXPECT_EQ(std::cv_status::no_timeout, wait());
- EXPECT_EQ(2, usb_last_cookie);
-
- if (!usb_last_port_status.portName.empty()) {
- hidl_string portBeingSwitched = usb_last_port_status.portName;
- ALOGI("mode portname:%s", portBeingSwitched.c_str());
- usb_role_switch_done = false;
- Return<void> ret = usb->switchRole(portBeingSwitched.c_str(), role);
- ASSERT_TRUE(ret.isOk());
-
- std::cv_status waitStatus = wait();
- while (waitStatus == std::cv_status::no_timeout &&
- usb_role_switch_done == false)
- waitStatus = wait();
-
- EXPECT_EQ(std::cv_status::no_timeout, waitStatus);
- EXPECT_EQ(2, usb_last_cookie);
-
- EXPECT_EQ(static_cast<uint32_t>(PortRoleType::MODE),
- static_cast<uint32_t>(usb_last_port_role.type));
- if (usb_last_status == Status::SUCCESS) {
- EXPECT_EQ(static_cast<uint32_t>(PortMode::DFP),
- static_cast<uint32_t>(usb_last_port_role.role));
- } else {
- EXPECT_NE(static_cast<uint32_t>(PortMode::UFP),
- static_cast<uint32_t>(usb_last_port_role.role));
- }
- }
-}
-
-/*
* Test switching the power role of usb port.
* Test case queries the usb ports present in device.
* If there is atleast one usb port, a power role switch
@@ -273,7 +215,7 @@
* is expected to be returned.
*/
-TEST_F(UsbHidlTest, switchPowerRole) {
+TEST_P(UsbHidlTest, switchPowerRole) {
struct PortRole role;
role.type = PortRoleType::POWER_ROLE;
role.role = static_cast<uint32_t>(PortPowerRole::SOURCE);
@@ -319,7 +261,7 @@
* switch was successfull. Upon success, Status::SUCCESS
* is expected to be returned.
*/
-TEST_F(UsbHidlTest, switchDataRole) {
+TEST_P(UsbHidlTest, switchDataRole) {
struct PortRole role;
role.type = PortRoleType::DATA_ROLE;
role.role = static_cast<uint32_t>(PortDataRole::HOST);
@@ -356,11 +298,7 @@
}
}
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(UsbHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- UsbHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, UsbHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IUsb::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/vibrator/aidl/Android.bp b/vibrator/aidl/Android.bp
index cd5439f..1eec1da 100644
--- a/vibrator/aidl/Android.bp
+++ b/vibrator/aidl/Android.bp
@@ -5,5 +5,14 @@
"android/hardware/vibrator/*.aidl",
],
stability: "vintf",
+ backend: {
+ java: {
+ platform_apis: true,
+ },
+ ndk: {
+ vndk: {
+ enabled: true,
+ },
+ },
+ },
}
-
diff --git a/vibrator/aidl/android/hardware/vibrator/CompositeEffect.aidl b/vibrator/aidl/android/hardware/vibrator/CompositeEffect.aidl
new file mode 100644
index 0000000..84556b5
--- /dev/null
+++ b/vibrator/aidl/android/hardware/vibrator/CompositeEffect.aidl
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator;
+
+import android.hardware.vibrator.CompositePrimitive;
+
+@VintfStability
+parcelable CompositeEffect {
+ /* Period of silence preceding primitive. */
+ int delayMs;
+ CompositePrimitive primitive;
+ /* 0.0 (exclusive) - 1.0 (inclusive) */
+ float scale;
+}
diff --git a/vibrator/aidl/android/hardware/vibrator/CompositePrimitive.aidl b/vibrator/aidl/android/hardware/vibrator/CompositePrimitive.aidl
new file mode 100644
index 0000000..2a9d0be
--- /dev/null
+++ b/vibrator/aidl/android/hardware/vibrator/CompositePrimitive.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator;
+
+@VintfStability
+@Backing(type="int")
+enum CompositePrimitive {
+ NOOP,
+ CLICK,
+ THUD,
+ SPIN,
+ QUICK_RISE,
+ SLOW_RISE,
+ QUICK_FALL,
+}
diff --git a/vibrator/aidl/android/hardware/vibrator/IVibrator.aidl b/vibrator/aidl/android/hardware/vibrator/IVibrator.aidl
index 8c4fd05..ebf5faa 100644
--- a/vibrator/aidl/android/hardware/vibrator/IVibrator.aidl
+++ b/vibrator/aidl/android/hardware/vibrator/IVibrator.aidl
@@ -19,6 +19,8 @@
import android.hardware.vibrator.IVibratorCallback;
import android.hardware.vibrator.Effect;
import android.hardware.vibrator.EffectStrength;
+import android.hardware.vibrator.CompositeEffect;
+import android.hardware.vibrator.CompositePrimitive;
@VintfStability
interface IVibrator {
@@ -42,6 +44,10 @@
* Whether setAmplitude is supported (when external control is enabled)
*/
const int CAP_EXTERNAL_AMPLITUDE_CONTROL = 1 << 4;
+ /**
+ * Whether compose is supported.
+ */
+ const int CAP_COMPOSE_EFFECTS = 1 << 5;
/**
* Determine capabilities of the vibrator HAL (CAP_* mask)
@@ -107,11 +113,10 @@
* CAP_EXTERNAL_AMPLITUDE_CONTROL.
*
* @param amplitude The unitless force setting. Note that this number must
- * be between 1 and 255, inclusive. If the motor does not
- * have exactly 255 steps, it must do it's best to map it
- * onto the number of steps it does have.
+ * be between 0.0 (exclusive) and 1.0 (inclusive). It must
+ * do it's best to map it onto the number of steps it does have.
*/
- void setAmplitude(in int amplitude);
+ void setAmplitude(in float amplitude);
/**
* Enables/disables control override of vibrator to audio.
@@ -128,4 +133,36 @@
* @param enabled Whether external control should be enabled or disabled.
*/
void setExternalControl(in boolean enabled);
+
+ /**
+ * Retrieve composition delay limit.
+ *
+ * Support is reflected in getCapabilities (CAP_COMPOSE_EFFECTS).
+ *
+ * @return Maximum delay for a single CompositeEffect[] entry.
+ */
+ int getCompositionDelayMax();
+
+ /**
+ * Retrieve composition size limit.
+ *
+ * Support is reflected in getCapabilities (CAP_COMPOSE_EFFECTS).
+ *
+ * @return Maximum number of entries in CompositeEffect[].
+ * @param maxDelayMs Maximum delay for a single CompositeEffect[] entry.
+ */
+ int getCompositionSizeMax();
+
+ /**
+ * Fire off a string of effect primitives, combined to perform richer effects.
+ *
+ * Support is reflected in getCapabilities (CAP_COMPOSE_EFFECTS).
+ *
+ * Doing this operation while the vibrator is already on is undefined behavior. Clients should
+ * explicitly call off.
+ *
+ * @param composite Array of composition parameters.
+ */
+ void compose(in CompositeEffect[] composite, in IVibratorCallback callback);
+
}
diff --git a/vibrator/aidl/default/Vibrator.cpp b/vibrator/aidl/default/Vibrator.cpp
index 09cd234..befdeab 100644
--- a/vibrator/aidl/default/Vibrator.cpp
+++ b/vibrator/aidl/default/Vibrator.cpp
@@ -24,11 +24,14 @@
namespace hardware {
namespace vibrator {
+static constexpr int32_t kComposeDelayMaxMs = 1000;
+static constexpr int32_t kComposeSizeMax = 256;
+
ndk::ScopedAStatus Vibrator::getCapabilities(int32_t* _aidl_return) {
LOG(INFO) << "Vibrator reporting capabilities";
*_aidl_return = IVibrator::CAP_ON_CALLBACK | IVibrator::CAP_PERFORM_CALLBACK |
IVibrator::CAP_AMPLITUDE_CONTROL | IVibrator::CAP_EXTERNAL_CONTROL |
- IVibrator::CAP_EXTERNAL_AMPLITUDE_CONTROL;
+ IVibrator::CAP_EXTERNAL_AMPLITUDE_CONTROL | IVibrator::CAP_COMPOSE_EFFECTS;
return ndk::ScopedAStatus::ok();
}
@@ -45,7 +48,9 @@
LOG(INFO) << "Starting on on another thread";
usleep(timeoutMs * 1000);
LOG(INFO) << "Notifying on complete";
- callback->onComplete();
+ if (!callback->onComplete().isOk()) {
+ LOG(ERROR) << "Failed to call onComplete";
+ }
}).detach();
}
return ndk::ScopedAStatus::ok();
@@ -84,9 +89,9 @@
return ndk::ScopedAStatus::ok();
}
-ndk::ScopedAStatus Vibrator::setAmplitude(int32_t amplitude) {
+ndk::ScopedAStatus Vibrator::setAmplitude(float amplitude) {
LOG(INFO) << "Vibrator set amplitude: " << amplitude;
- if (amplitude <= 0 || amplitude > 255) {
+ if (amplitude <= 0.0f || amplitude > 1.0f) {
return ndk::ScopedAStatus(AStatus_fromExceptionCode(EX_ILLEGAL_ARGUMENT));
}
return ndk::ScopedAStatus::ok();
@@ -97,6 +102,55 @@
return ndk::ScopedAStatus::ok();
}
+ndk::ScopedAStatus Vibrator::getCompositionDelayMax(int32_t* maxDelayMs) {
+ *maxDelayMs = kComposeDelayMaxMs;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Vibrator::getCompositionSizeMax(int32_t* maxSize) {
+ *maxSize = kComposeSizeMax;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus Vibrator::compose(const std::vector<CompositeEffect>& composite,
+ const std::shared_ptr<IVibratorCallback>& callback) {
+ if (composite.size() > kComposeSizeMax) {
+ return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
+ }
+
+ for (auto& e : composite) {
+ if (e.delayMs > kComposeDelayMaxMs) {
+ return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
+ }
+ if (e.scale <= 0.0f || e.scale > 1.0f) {
+ return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
+ }
+ if (e.primitive < CompositePrimitive::NOOP ||
+ e.primitive > CompositePrimitive::QUICK_FALL) {
+ return ndk::ScopedAStatus::fromExceptionCode(EX_UNSUPPORTED_OPERATION);
+ }
+ }
+
+ std::thread([=] {
+ LOG(INFO) << "Starting compose on another thread";
+
+ for (auto& e : composite) {
+ if (e.delayMs) {
+ usleep(e.delayMs * 1000);
+ }
+ LOG(INFO) << "triggering primitive " << static_cast<int>(e.primitive) << " @ scale "
+ << e.scale;
+ }
+
+ if (callback != nullptr) {
+ LOG(INFO) << "Notifying perform complete";
+ callback->onComplete();
+ }
+ }).detach();
+
+ return ndk::ScopedAStatus::ok();
+}
+
} // namespace vibrator
} // namespace hardware
} // namespace android
diff --git a/vibrator/aidl/default/include/vibrator-impl/Vibrator.h b/vibrator/aidl/default/include/vibrator-impl/Vibrator.h
index 14e7292..817ec80 100644
--- a/vibrator/aidl/default/include/vibrator-impl/Vibrator.h
+++ b/vibrator/aidl/default/include/vibrator-impl/Vibrator.h
@@ -32,8 +32,12 @@
const std::shared_ptr<IVibratorCallback>& callback,
int32_t* _aidl_return) override;
ndk::ScopedAStatus getSupportedEffects(std::vector<Effect>* _aidl_return) override;
- ndk::ScopedAStatus setAmplitude(int32_t amplitude) override;
+ ndk::ScopedAStatus setAmplitude(float amplitude) override;
ndk::ScopedAStatus setExternalControl(bool enabled) override;
+ ndk::ScopedAStatus getCompositionDelayMax(int32_t* maxDelayMs);
+ ndk::ScopedAStatus getCompositionSizeMax(int32_t* maxSize);
+ ndk::ScopedAStatus compose(const std::vector<CompositeEffect>& composite,
+ const std::shared_ptr<IVibratorCallback>& callback) override;
};
} // namespace vibrator
diff --git a/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp b/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
index b6aa9e2..5c6120b 100644
--- a/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
+++ b/vibrator/aidl/vts/VtsHalVibratorTargetTest.cpp
@@ -28,6 +28,8 @@
using android::String16;
using android::binder::Status;
using android::hardware::vibrator::BnVibratorCallback;
+using android::hardware::vibrator::CompositeEffect;
+using android::hardware::vibrator::CompositePrimitive;
using android::hardware::vibrator::Effect;
using android::hardware::vibrator::EffectStrength;
using android::hardware::vibrator::IVibrator;
@@ -55,6 +57,20 @@
static_cast<EffectStrength>(static_cast<int8_t>(kEffectStrengths.back()) + 1),
};
+// TODO(b/143992652): autogenerate
+const std::vector<CompositePrimitive> kCompositePrimitives = {
+ CompositePrimitive::NOOP, CompositePrimitive::CLICK,
+ CompositePrimitive::THUD, CompositePrimitive::SPIN,
+ CompositePrimitive::QUICK_RISE, CompositePrimitive::SLOW_RISE,
+ CompositePrimitive::QUICK_FALL,
+};
+// TODO(b/143992652): autogenerate
+
+const std::vector<CompositePrimitive> kInvalidPrimitives = {
+ static_cast<CompositePrimitive>(static_cast<int32_t>(kCompositePrimitives.front()) - 1),
+ static_cast<CompositePrimitive>(static_cast<int32_t>(kCompositePrimitives.back()) + 1),
+};
+
class CompletionCallback : public BnVibratorCallback {
public:
CompletionCallback(const std::function<void()>& callback) : mCallback(callback) {}
@@ -201,11 +217,11 @@
TEST_P(VibratorAidl, ChangeVibrationAmplitude) {
if (capabilities & IVibrator::CAP_AMPLITUDE_CONTROL) {
- EXPECT_TRUE(vibrator->setAmplitude(1).isOk());
+ EXPECT_EQ(Status::EX_NONE, vibrator->setAmplitude(0.1f).exceptionCode());
EXPECT_TRUE(vibrator->on(2000, nullptr /*callback*/).isOk());
- EXPECT_TRUE(vibrator->setAmplitude(128).isOk());
+ EXPECT_EQ(Status::EX_NONE, vibrator->setAmplitude(0.5f).exceptionCode());
sleep(1);
- EXPECT_TRUE(vibrator->setAmplitude(255).isOk());
+ EXPECT_EQ(Status::EX_NONE, vibrator->setAmplitude(1.0f).exceptionCode());
sleep(1);
}
}
@@ -214,7 +230,7 @@
if (capabilities & IVibrator::CAP_AMPLITUDE_CONTROL) {
EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT, vibrator->setAmplitude(-1).exceptionCode());
EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT, vibrator->setAmplitude(0).exceptionCode());
- EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT, vibrator->setAmplitude(256).exceptionCode());
+ EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT, vibrator->setAmplitude(1.1).exceptionCode());
}
}
@@ -240,7 +256,7 @@
if (capabilities & IVibrator::CAP_EXTERNAL_CONTROL) {
EXPECT_TRUE(vibrator->setExternalControl(true).isOk());
- Status amplitudeStatus = vibrator->setAmplitude(128);
+ Status amplitudeStatus = vibrator->setAmplitude(0.5);
if (supportsExternalAmplitudeControl) {
EXPECT_TRUE(amplitudeStatus.isOk());
} else {
@@ -259,6 +275,102 @@
}
}
+TEST_P(VibratorAidl, ComposeValidPrimitives) {
+ if (capabilities & IVibrator::CAP_COMPOSE_EFFECTS) {
+ int32_t maxDelay, maxSize;
+
+ EXPECT_EQ(Status::EX_NONE, vibrator->getCompositionDelayMax(&maxDelay).exceptionCode());
+ EXPECT_EQ(Status::EX_NONE, vibrator->getCompositionSizeMax(&maxSize).exceptionCode());
+
+ std::vector<CompositeEffect> composite;
+
+ for (auto primitive : kCompositePrimitives) {
+ CompositeEffect effect;
+
+ effect.delayMs = std::rand() % (maxDelay + 1);
+ effect.primitive = primitive;
+ effect.scale = static_cast<float>(std::rand()) / RAND_MAX ?: 1.0f;
+ composite.emplace_back(effect);
+
+ if (composite.size() == maxSize) {
+ EXPECT_EQ(Status::EX_NONE, vibrator->compose(composite, nullptr).exceptionCode());
+ composite.clear();
+ vibrator->off();
+ }
+ }
+
+ if (composite.size() != 0) {
+ EXPECT_EQ(Status::EX_NONE, vibrator->compose(composite, nullptr).exceptionCode());
+ vibrator->off();
+ }
+ }
+}
+
+TEST_P(VibratorAidl, ComposeUnsupportedPrimitives) {
+ if (capabilities & IVibrator::CAP_COMPOSE_EFFECTS) {
+ for (auto primitive : kInvalidPrimitives) {
+ std::vector<CompositeEffect> composite(1);
+
+ for (auto& effect : composite) {
+ effect.delayMs = 0;
+ effect.primitive = primitive;
+ effect.scale = 1.0f;
+ }
+ EXPECT_EQ(Status::EX_UNSUPPORTED_OPERATION,
+ vibrator->compose(composite, nullptr).exceptionCode());
+ vibrator->off();
+ }
+ }
+}
+
+TEST_P(VibratorAidl, CompseDelayBoundary) {
+ if (capabilities & IVibrator::CAP_COMPOSE_EFFECTS) {
+ int32_t maxDelay;
+
+ EXPECT_EQ(Status::EX_NONE, vibrator->getCompositionDelayMax(&maxDelay).exceptionCode());
+
+ std::vector<CompositeEffect> composite(1);
+ CompositeEffect effect;
+
+ effect.delayMs = 1;
+ effect.primitive = CompositePrimitive::CLICK;
+ effect.scale = 1.0f;
+
+ std::fill(composite.begin(), composite.end(), effect);
+ EXPECT_EQ(Status::EX_NONE, vibrator->compose(composite, nullptr).exceptionCode());
+
+ effect.delayMs = maxDelay + 1;
+
+ std::fill(composite.begin(), composite.end(), effect);
+ EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT,
+ vibrator->compose(composite, nullptr).exceptionCode());
+ vibrator->off();
+ }
+}
+
+TEST_P(VibratorAidl, CompseSizeBoundary) {
+ if (capabilities & IVibrator::CAP_COMPOSE_EFFECTS) {
+ int32_t maxSize;
+
+ EXPECT_EQ(Status::EX_NONE, vibrator->getCompositionSizeMax(&maxSize).exceptionCode());
+
+ std::vector<CompositeEffect> composite(maxSize);
+ CompositeEffect effect;
+
+ effect.delayMs = 1;
+ effect.primitive = CompositePrimitive::CLICK;
+ effect.scale = 1.0f;
+
+ std::fill(composite.begin(), composite.end(), effect);
+ EXPECT_EQ(Status::EX_NONE, vibrator->compose(composite, nullptr).exceptionCode());
+
+ composite.emplace_back(effect);
+ EXPECT_EQ(Status::EX_ILLEGAL_ARGUMENT,
+ vibrator->compose(composite, nullptr).exceptionCode());
+ vibrator->off();
+ }
+}
+
INSTANTIATE_TEST_SUITE_P(Vibrator, VibratorAidl,
testing::ValuesIn(android::getAidlHalInstanceNames(IVibrator::descriptor)),
android::PrintInstanceNameToString);